blob: aa63ebb114d4634d1f91c486873de81c7c73d6ba [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300522 else
523 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524
525 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300530 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
531 * local supported commands HCI command.
532 */
533 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
536 if (lmp_ssp_capable(hdev)) {
537 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
538 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
540 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541 } else {
542 struct hci_cp_write_eir cp;
543
544 memset(hdev->eir, 0, sizeof(hdev->eir));
545 memset(&cp, 0, sizeof(cp));
546
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200548 }
549 }
550
551 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200552 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200553
554 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556
557 if (lmp_ext_feat_capable(hdev)) {
558 struct hci_cp_read_local_ext_features cp;
559
560 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
562 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 }
564
565 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
566 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
568 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200569 }
570}
571
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 struct hci_cp_write_def_link_policy cp;
576 u16 link_policy = 0;
577
578 if (lmp_rswitch_capable(hdev))
579 link_policy |= HCI_LP_RSWITCH;
580 if (lmp_hold_capable(hdev))
581 link_policy |= HCI_LP_HOLD;
582 if (lmp_sniff_capable(hdev))
583 link_policy |= HCI_LP_SNIFF;
584 if (lmp_park_capable(hdev))
585 link_policy |= HCI_LP_PARK;
586
587 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589}
590
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594 struct hci_cp_write_le_host_supported cp;
595
Johan Hedbergc73eee92013-04-19 18:35:21 +0300596 /* LE-only devices do not support explicit enablement */
597 if (!lmp_bredr_capable(hdev))
598 return;
599
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 memset(&cp, 0, sizeof(cp));
601
602 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
603 cp.le = 0x01;
604 cp.simul = lmp_le_br_capable(hdev);
605 }
606
607 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
609 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610}
611
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300612static void hci_set_event_mask_page_2(struct hci_request *req)
613{
614 struct hci_dev *hdev = req->hdev;
615 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
616
617 /* If Connectionless Slave Broadcast master role is supported
618 * enable all necessary events for it.
619 */
620 if (hdev->features[2][0] & 0x01) {
621 events[1] |= 0x40; /* Triggered Clock Capture */
622 events[1] |= 0x80; /* Synchronization Train Complete */
623 events[2] |= 0x10; /* Slave Page Response Timeout */
624 events[2] |= 0x20; /* CSB Channel Map Change */
625 }
626
627 /* If Connectionless Slave Broadcast slave role is supported
628 * enable all necessary events for it.
629 */
630 if (hdev->features[2][0] & 0x02) {
631 events[2] |= 0x01; /* Synchronization Train Received */
632 events[2] |= 0x02; /* CSB Receive */
633 events[2] |= 0x04; /* CSB Timeout */
634 events[2] |= 0x08; /* Truncated Page Complete */
635 }
636
637 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
638}
639
Johan Hedberg42c6b122013-03-05 20:37:49 +0200640static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200641{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200642 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300643 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100645 /* Some Broadcom based Bluetooth controllers do not support the
646 * Delete Stored Link Key command. They are clearly indicating its
647 * absence in the bit mask of supported commands.
648 *
649 * Check the supported commands and only if the the command is marked
650 * as supported send it. If not supported assume that the controller
651 * does not have actual support for stored link keys which makes this
652 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700653 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300654 if (hdev->commands[6] & 0x80) {
655 struct hci_cp_delete_stored_link_key cp;
656
657 bacpy(&cp.bdaddr, BDADDR_ANY);
658 cp.delete_all = 0x01;
659 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
660 sizeof(cp), &cp);
661 }
662
Johan Hedberg2177bab2013-03-05 20:37:43 +0200663 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200665
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500666 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500668 hci_update_ad(req);
669 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300670
671 /* Read features beyond page 1 if available */
672 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
673 struct hci_cp_read_local_ext_features cp;
674
675 cp.page = p;
676 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
677 sizeof(cp), &cp);
678 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200679}
680
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300681static void hci_init4_req(struct hci_request *req, unsigned long opt)
682{
683 struct hci_dev *hdev = req->hdev;
684
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300685 /* Set event mask page 2 if the HCI command for it is supported */
686 if (hdev->commands[22] & 0x04)
687 hci_set_event_mask_page_2(req);
688
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300689 /* Check for Synchronization Train support */
690 if (hdev->features[2][0] & 0x04)
691 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
692}
693
Johan Hedberg2177bab2013-03-05 20:37:43 +0200694static int __hci_init(struct hci_dev *hdev)
695{
696 int err;
697
698 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
699 if (err < 0)
700 return err;
701
702 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
703 * BR/EDR/LE type controllers. AMP controllers only need the
704 * first stage init.
705 */
706 if (hdev->dev_type != HCI_BREDR)
707 return 0;
708
709 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
710 if (err < 0)
711 return err;
712
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300713 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
714 if (err < 0)
715 return err;
716
717 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200718}
719
Johan Hedberg42c6b122013-03-05 20:37:49 +0200720static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721{
722 __u8 scan = opt;
723
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728}
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731{
732 __u8 auth = opt;
733
Johan Hedberg42c6b122013-03-05 20:37:49 +0200734 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735
736 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200737 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738}
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741{
742 __u8 encrypt = opt;
743
Johan Hedberg42c6b122013-03-05 20:37:49 +0200744 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200746 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200747 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748}
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200751{
752 __le16 policy = cpu_to_le16(opt);
753
Johan Hedberg42c6b122013-03-05 20:37:49 +0200754 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200755
756 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200757 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200758}
759
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900760/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 * Device is held on return. */
762struct hci_dev *hci_dev_get(int index)
763{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200764 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
766 BT_DBG("%d", index);
767
768 if (index < 0)
769 return NULL;
770
771 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200772 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (d->id == index) {
774 hdev = hci_dev_hold(d);
775 break;
776 }
777 }
778 read_unlock(&hci_dev_list_lock);
779 return hdev;
780}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200783
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200784bool hci_discovery_active(struct hci_dev *hdev)
785{
786 struct discovery_state *discov = &hdev->discovery;
787
Andre Guedes6fbe1952012-02-03 17:47:58 -0300788 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300789 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300790 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200791 return true;
792
Andre Guedes6fbe1952012-02-03 17:47:58 -0300793 default:
794 return false;
795 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200796}
797
Johan Hedbergff9ef572012-01-04 14:23:45 +0200798void hci_discovery_set_state(struct hci_dev *hdev, int state)
799{
800 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
801
802 if (hdev->discovery.state == state)
803 return;
804
805 switch (state) {
806 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300807 if (hdev->discovery.state != DISCOVERY_STARTING)
808 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200809 break;
810 case DISCOVERY_STARTING:
811 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300812 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200813 mgmt_discovering(hdev, 1);
814 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200815 case DISCOVERY_RESOLVING:
816 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200817 case DISCOVERY_STOPPING:
818 break;
819 }
820
821 hdev->discovery.state = state;
822}
823
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300824void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825{
Johan Hedberg30883512012-01-04 14:16:21 +0200826 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200827 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
Johan Hedberg561aafb2012-01-04 13:31:59 +0200829 list_for_each_entry_safe(p, n, &cache->all, all) {
830 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200831 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200833
834 INIT_LIST_HEAD(&cache->unknown);
835 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836}
837
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300838struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
839 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Johan Hedberg30883512012-01-04 14:16:21 +0200841 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 struct inquiry_entry *e;
843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300844 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Johan Hedberg561aafb2012-01-04 13:31:59 +0200846 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200848 return e;
849 }
850
851 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852}
853
Johan Hedberg561aafb2012-01-04 13:31:59 +0200854struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300855 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856{
Johan Hedberg30883512012-01-04 14:16:21 +0200857 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200858 struct inquiry_entry *e;
859
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300860 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200861
862 list_for_each_entry(e, &cache->unknown, list) {
863 if (!bacmp(&e->data.bdaddr, bdaddr))
864 return e;
865 }
866
867 return NULL;
868}
869
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200870struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300871 bdaddr_t *bdaddr,
872 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200873{
874 struct discovery_state *cache = &hdev->discovery;
875 struct inquiry_entry *e;
876
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300877 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200878
879 list_for_each_entry(e, &cache->resolve, list) {
880 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
881 return e;
882 if (!bacmp(&e->data.bdaddr, bdaddr))
883 return e;
884 }
885
886 return NULL;
887}
888
Johan Hedberga3d4e202012-01-09 00:53:02 +0200889void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300890 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200891{
892 struct discovery_state *cache = &hdev->discovery;
893 struct list_head *pos = &cache->resolve;
894 struct inquiry_entry *p;
895
896 list_del(&ie->list);
897
898 list_for_each_entry(p, &cache->resolve, list) {
899 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300900 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200901 break;
902 pos = &p->list;
903 }
904
905 list_add(&ie->list, pos);
906}
907
Johan Hedberg31754052012-01-04 13:39:52 +0200908bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300909 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910{
Johan Hedberg30883512012-01-04 14:16:21 +0200911 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200912 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300914 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915
Szymon Janc2b2fec42012-11-20 11:38:54 +0100916 hci_remove_remote_oob_data(hdev, &data->bdaddr);
917
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200918 if (ssp)
919 *ssp = data->ssp_mode;
920
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200922 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200923 if (ie->data.ssp_mode && ssp)
924 *ssp = true;
925
Johan Hedberga3d4e202012-01-09 00:53:02 +0200926 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300927 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200928 ie->data.rssi = data->rssi;
929 hci_inquiry_cache_update_resolve(hdev, ie);
930 }
931
Johan Hedberg561aafb2012-01-04 13:31:59 +0200932 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200933 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200934
Johan Hedberg561aafb2012-01-04 13:31:59 +0200935 /* Entry not in the cache. Add new one. */
936 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
937 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200938 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200939
940 list_add(&ie->all, &cache->all);
941
942 if (name_known) {
943 ie->name_state = NAME_KNOWN;
944 } else {
945 ie->name_state = NAME_NOT_KNOWN;
946 list_add(&ie->list, &cache->unknown);
947 }
948
949update:
950 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300951 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200952 ie->name_state = NAME_KNOWN;
953 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 }
955
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200956 memcpy(&ie->data, data, sizeof(*data));
957 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200959
960 if (ie->name_state == NAME_NOT_KNOWN)
961 return false;
962
963 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964}
965
966static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
967{
Johan Hedberg30883512012-01-04 14:16:21 +0200968 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 struct inquiry_info *info = (struct inquiry_info *) buf;
970 struct inquiry_entry *e;
971 int copied = 0;
972
Johan Hedberg561aafb2012-01-04 13:31:59 +0200973 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200975
976 if (copied >= num)
977 break;
978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 bacpy(&info->bdaddr, &data->bdaddr);
980 info->pscan_rep_mode = data->pscan_rep_mode;
981 info->pscan_period_mode = data->pscan_period_mode;
982 info->pscan_mode = data->pscan_mode;
983 memcpy(info->dev_class, data->dev_class, 3);
984 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200985
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200987 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 }
989
990 BT_DBG("cache %p, copied %d", cache, copied);
991 return copied;
992}
993
Johan Hedberg42c6b122013-03-05 20:37:49 +0200994static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995{
996 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200997 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 struct hci_cp_inquiry cp;
999
1000 BT_DBG("%s", hdev->name);
1001
1002 if (test_bit(HCI_INQUIRY, &hdev->flags))
1003 return;
1004
1005 /* Start Inquiry */
1006 memcpy(&cp.lap, &ir->lap, 3);
1007 cp.length = ir->length;
1008 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001009 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010}
1011
Andre Guedes3e13fa12013-03-27 20:04:56 -03001012static int wait_inquiry(void *word)
1013{
1014 schedule();
1015 return signal_pending(current);
1016}
1017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018int hci_inquiry(void __user *arg)
1019{
1020 __u8 __user *ptr = arg;
1021 struct hci_inquiry_req ir;
1022 struct hci_dev *hdev;
1023 int err = 0, do_inquiry = 0, max_rsp;
1024 long timeo;
1025 __u8 *buf;
1026
1027 if (copy_from_user(&ir, ptr, sizeof(ir)))
1028 return -EFAULT;
1029
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001030 hdev = hci_dev_get(ir.dev_id);
1031 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 return -ENODEV;
1033
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001034 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1035 err = -EBUSY;
1036 goto done;
1037 }
1038
Johan Hedberg56f87902013-10-02 13:43:13 +03001039 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1040 err = -EOPNOTSUPP;
1041 goto done;
1042 }
1043
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001044 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001045 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001046 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001047 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 do_inquiry = 1;
1049 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001050 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051
Marcel Holtmann04837f62006-07-03 10:02:33 +02001052 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001053
1054 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001055 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1056 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001057 if (err < 0)
1058 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001059
1060 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1061 * cleared). If it is interrupted by a signal, return -EINTR.
1062 */
1063 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1064 TASK_INTERRUPTIBLE))
1065 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001066 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001068 /* for unlimited number of responses we will use buffer with
1069 * 255 entries
1070 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1072
1073 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1074 * copy it to the user space.
1075 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001076 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001077 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 err = -ENOMEM;
1079 goto done;
1080 }
1081
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001082 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001084 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085
1086 BT_DBG("num_rsp %d", ir.num_rsp);
1087
1088 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1089 ptr += sizeof(ir);
1090 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001091 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001093 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 err = -EFAULT;
1095
1096 kfree(buf);
1097
1098done:
1099 hci_dev_put(hdev);
1100 return err;
1101}
1102
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001103static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1104{
1105 u8 ad_len = 0, flags = 0;
1106 size_t name_len;
1107
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001108 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109 flags |= LE_AD_GENERAL;
1110
Johan Hedberg11802b22013-10-02 16:02:24 +03001111 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1112 if (lmp_le_br_capable(hdev))
1113 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1114 if (lmp_host_le_br_capable(hdev))
1115 flags |= LE_AD_SIM_LE_BREDR_HOST;
1116 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001117 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001118 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001119
1120 if (flags) {
1121 BT_DBG("adv flags 0x%02x", flags);
1122
1123 ptr[0] = 2;
1124 ptr[1] = EIR_FLAGS;
1125 ptr[2] = flags;
1126
1127 ad_len += 3;
1128 ptr += 3;
1129 }
1130
1131 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1132 ptr[0] = 2;
1133 ptr[1] = EIR_TX_POWER;
1134 ptr[2] = (u8) hdev->adv_tx_power;
1135
1136 ad_len += 3;
1137 ptr += 3;
1138 }
1139
1140 name_len = strlen(hdev->dev_name);
1141 if (name_len > 0) {
1142 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1143
1144 if (name_len > max_len) {
1145 name_len = max_len;
1146 ptr[1] = EIR_NAME_SHORT;
1147 } else
1148 ptr[1] = EIR_NAME_COMPLETE;
1149
1150 ptr[0] = name_len + 1;
1151
1152 memcpy(ptr + 2, hdev->dev_name, name_len);
1153
1154 ad_len += (name_len + 2);
1155 ptr += (name_len + 2);
1156 }
1157
1158 return ad_len;
1159}
1160
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001161void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001162{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001163 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001164 struct hci_cp_le_set_adv_data cp;
1165 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001166
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001167 if (!lmp_le_capable(hdev))
1168 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001169
1170 memset(&cp, 0, sizeof(cp));
1171
1172 len = create_ad(hdev, cp.data);
1173
1174 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001175 memcmp(cp.data, hdev->adv_data, len) == 0)
1176 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001177
1178 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1179 hdev->adv_data_len = len;
1180
1181 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001182
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001183 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001184}
1185
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001186static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 int ret = 0;
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 BT_DBG("%s %p", hdev->name, hdev);
1191
1192 hci_req_lock(hdev);
1193
Johan Hovold94324962012-03-15 14:48:41 +01001194 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1195 ret = -ENODEV;
1196 goto done;
1197 }
1198
Johan Hedbergbf543032013-09-13 08:58:18 +03001199 /* Check for rfkill but allow the HCI setup stage to proceed
1200 * (which in itself doesn't cause any RF activity).
1201 */
1202 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1203 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001204 ret = -ERFKILL;
1205 goto done;
1206 }
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 if (test_bit(HCI_UP, &hdev->flags)) {
1209 ret = -EALREADY;
1210 goto done;
1211 }
1212
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 if (hdev->open(hdev)) {
1214 ret = -EIO;
1215 goto done;
1216 }
1217
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001218 atomic_set(&hdev->cmd_cnt, 1);
1219 set_bit(HCI_INIT, &hdev->flags);
1220
1221 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1222 ret = hdev->setup(hdev);
1223
1224 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001225 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1226 set_bit(HCI_RAW, &hdev->flags);
1227
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001228 if (!test_bit(HCI_RAW, &hdev->flags) &&
1229 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001230 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 }
1232
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001233 clear_bit(HCI_INIT, &hdev->flags);
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (!ret) {
1236 hci_dev_hold(hdev);
1237 set_bit(HCI_UP, &hdev->flags);
1238 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001239 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001240 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001241 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001242 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001243 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001244 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001245 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001246 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001248 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001249 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001250 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001251
1252 skb_queue_purge(&hdev->cmd_q);
1253 skb_queue_purge(&hdev->rx_q);
1254
1255 if (hdev->flush)
1256 hdev->flush(hdev);
1257
1258 if (hdev->sent_cmd) {
1259 kfree_skb(hdev->sent_cmd);
1260 hdev->sent_cmd = NULL;
1261 }
1262
1263 hdev->close(hdev);
1264 hdev->flags = 0;
1265 }
1266
1267done:
1268 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 return ret;
1270}
1271
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001272/* ---- HCI ioctl helpers ---- */
1273
1274int hci_dev_open(__u16 dev)
1275{
1276 struct hci_dev *hdev;
1277 int err;
1278
1279 hdev = hci_dev_get(dev);
1280 if (!hdev)
1281 return -ENODEV;
1282
Johan Hedberge1d08f42013-10-01 22:44:50 +03001283 /* We need to ensure that no other power on/off work is pending
1284 * before proceeding to call hci_dev_do_open. This is
1285 * particularly important if the setup procedure has not yet
1286 * completed.
1287 */
1288 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1289 cancel_delayed_work(&hdev->power_off);
1290
1291 flush_workqueue(hdev->req_workqueue);
1292
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001293 err = hci_dev_do_open(hdev);
1294
1295 hci_dev_put(hdev);
1296
1297 return err;
1298}
1299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300static int hci_dev_do_close(struct hci_dev *hdev)
1301{
1302 BT_DBG("%s %p", hdev->name, hdev);
1303
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001304 cancel_delayed_work(&hdev->power_off);
1305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 hci_req_cancel(hdev, ENODEV);
1307 hci_req_lock(hdev);
1308
1309 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001310 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 hci_req_unlock(hdev);
1312 return 0;
1313 }
1314
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001315 /* Flush RX and TX works */
1316 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001317 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001319 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001320 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001321 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001322 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001323 }
1324
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001325 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001326 cancel_delayed_work(&hdev->service_cache);
1327
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001328 cancel_delayed_work_sync(&hdev->le_scan_disable);
1329
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001330 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001331 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001333 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 hci_notify(hdev, HCI_DEV_DOWN);
1336
1337 if (hdev->flush)
1338 hdev->flush(hdev);
1339
1340 /* Reset device */
1341 skb_queue_purge(&hdev->cmd_q);
1342 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001343 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001344 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001346 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 clear_bit(HCI_INIT, &hdev->flags);
1348 }
1349
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001350 /* flush cmd work */
1351 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353 /* Drop queues */
1354 skb_queue_purge(&hdev->rx_q);
1355 skb_queue_purge(&hdev->cmd_q);
1356 skb_queue_purge(&hdev->raw_q);
1357
1358 /* Drop last sent command */
1359 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001360 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 kfree_skb(hdev->sent_cmd);
1362 hdev->sent_cmd = NULL;
1363 }
1364
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001365 kfree_skb(hdev->recv_evt);
1366 hdev->recv_evt = NULL;
1367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 /* After this point our queues are empty
1369 * and no tasks are scheduled. */
1370 hdev->close(hdev);
1371
Johan Hedberg35b973c2013-03-15 17:06:59 -05001372 /* Clear flags */
1373 hdev->flags = 0;
1374 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1375
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001376 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1377 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001378 hci_dev_lock(hdev);
1379 mgmt_powered(hdev, 0);
1380 hci_dev_unlock(hdev);
1381 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001382
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001383 /* Controller radio is available but is currently powered down */
1384 hdev->amp_status = 0;
1385
Johan Hedberge59fda82012-02-22 18:11:53 +02001386 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001387 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001388
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 hci_req_unlock(hdev);
1390
1391 hci_dev_put(hdev);
1392 return 0;
1393}
1394
1395int hci_dev_close(__u16 dev)
1396{
1397 struct hci_dev *hdev;
1398 int err;
1399
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001400 hdev = hci_dev_get(dev);
1401 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001403
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001404 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1405 err = -EBUSY;
1406 goto done;
1407 }
1408
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001409 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1410 cancel_delayed_work(&hdev->power_off);
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001413
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001414done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 hci_dev_put(hdev);
1416 return err;
1417}
1418
1419int hci_dev_reset(__u16 dev)
1420{
1421 struct hci_dev *hdev;
1422 int ret = 0;
1423
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001424 hdev = hci_dev_get(dev);
1425 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 return -ENODEV;
1427
1428 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429
Marcel Holtmann808a0492013-08-26 20:57:58 -07001430 if (!test_bit(HCI_UP, &hdev->flags)) {
1431 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001432 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001433 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001435 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1436 ret = -EBUSY;
1437 goto done;
1438 }
1439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 /* Drop queues */
1441 skb_queue_purge(&hdev->rx_q);
1442 skb_queue_purge(&hdev->cmd_q);
1443
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001444 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001445 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001447 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 if (hdev->flush)
1450 hdev->flush(hdev);
1451
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001452 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001453 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001456 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001457
1458done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 hci_req_unlock(hdev);
1460 hci_dev_put(hdev);
1461 return ret;
1462}
1463
1464int hci_dev_reset_stat(__u16 dev)
1465{
1466 struct hci_dev *hdev;
1467 int ret = 0;
1468
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001469 hdev = hci_dev_get(dev);
1470 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return -ENODEV;
1472
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001473 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1474 ret = -EBUSY;
1475 goto done;
1476 }
1477
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1479
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001480done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 return ret;
1483}
1484
1485int hci_dev_cmd(unsigned int cmd, void __user *arg)
1486{
1487 struct hci_dev *hdev;
1488 struct hci_dev_req dr;
1489 int err = 0;
1490
1491 if (copy_from_user(&dr, arg, sizeof(dr)))
1492 return -EFAULT;
1493
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001494 hdev = hci_dev_get(dr.dev_id);
1495 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return -ENODEV;
1497
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001498 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1499 err = -EBUSY;
1500 goto done;
1501 }
1502
Johan Hedberg56f87902013-10-02 13:43:13 +03001503 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1504 err = -EOPNOTSUPP;
1505 goto done;
1506 }
1507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 switch (cmd) {
1509 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001510 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1511 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 break;
1513
1514 case HCISETENCRYPT:
1515 if (!lmp_encrypt_capable(hdev)) {
1516 err = -EOPNOTSUPP;
1517 break;
1518 }
1519
1520 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1521 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001522 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1523 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 if (err)
1525 break;
1526 }
1527
Johan Hedberg01178cd2013-03-05 20:37:41 +02001528 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1529 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530 break;
1531
1532 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001533 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1534 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 break;
1536
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001537 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001538 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1539 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001540 break;
1541
1542 case HCISETLINKMODE:
1543 hdev->link_mode = ((__u16) dr.dev_opt) &
1544 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1545 break;
1546
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 case HCISETPTYPE:
1548 hdev->pkt_type = (__u16) dr.dev_opt;
1549 break;
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001552 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1553 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 break;
1555
1556 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001557 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1558 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559 break;
1560
1561 default:
1562 err = -EINVAL;
1563 break;
1564 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001565
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001566done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 hci_dev_put(hdev);
1568 return err;
1569}
1570
1571int hci_get_dev_list(void __user *arg)
1572{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001573 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 struct hci_dev_list_req *dl;
1575 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576 int n = 0, size, err;
1577 __u16 dev_num;
1578
1579 if (get_user(dev_num, (__u16 __user *) arg))
1580 return -EFAULT;
1581
1582 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1583 return -EINVAL;
1584
1585 size = sizeof(*dl) + dev_num * sizeof(*dr);
1586
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001587 dl = kzalloc(size, GFP_KERNEL);
1588 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001589 return -ENOMEM;
1590
1591 dr = dl->dev_req;
1592
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001593 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001594 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001595 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001596 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001597
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001598 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1599 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001600
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 (dr + n)->dev_id = hdev->id;
1602 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001603
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 if (++n >= dev_num)
1605 break;
1606 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001607 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608
1609 dl->dev_num = n;
1610 size = sizeof(*dl) + n * sizeof(*dr);
1611
1612 err = copy_to_user(arg, dl, size);
1613 kfree(dl);
1614
1615 return err ? -EFAULT : 0;
1616}
1617
1618int hci_get_dev_info(void __user *arg)
1619{
1620 struct hci_dev *hdev;
1621 struct hci_dev_info di;
1622 int err = 0;
1623
1624 if (copy_from_user(&di, arg, sizeof(di)))
1625 return -EFAULT;
1626
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001627 hdev = hci_dev_get(di.dev_id);
1628 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001629 return -ENODEV;
1630
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001631 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001632 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001633
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001634 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1635 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001636
Linus Torvalds1da177e2005-04-16 15:20:36 -07001637 strcpy(di.name, hdev->name);
1638 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001639 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001640 di.flags = hdev->flags;
1641 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001642 if (lmp_bredr_capable(hdev)) {
1643 di.acl_mtu = hdev->acl_mtu;
1644 di.acl_pkts = hdev->acl_pkts;
1645 di.sco_mtu = hdev->sco_mtu;
1646 di.sco_pkts = hdev->sco_pkts;
1647 } else {
1648 di.acl_mtu = hdev->le_mtu;
1649 di.acl_pkts = hdev->le_pkts;
1650 di.sco_mtu = 0;
1651 di.sco_pkts = 0;
1652 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 di.link_policy = hdev->link_policy;
1654 di.link_mode = hdev->link_mode;
1655
1656 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1657 memcpy(&di.features, &hdev->features, sizeof(di.features));
1658
1659 if (copy_to_user(arg, &di, sizeof(di)))
1660 err = -EFAULT;
1661
1662 hci_dev_put(hdev);
1663
1664 return err;
1665}
1666
1667/* ---- Interface to HCI drivers ---- */
1668
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001669static int hci_rfkill_set_block(void *data, bool blocked)
1670{
1671 struct hci_dev *hdev = data;
1672
1673 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1674
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001675 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1676 return -EBUSY;
1677
Johan Hedberg5e130362013-09-13 08:58:17 +03001678 if (blocked) {
1679 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001680 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1681 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001682 } else {
1683 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001684 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001685
1686 return 0;
1687}
1688
1689static const struct rfkill_ops hci_rfkill_ops = {
1690 .set_block = hci_rfkill_set_block,
1691};
1692
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001693static void hci_power_on(struct work_struct *work)
1694{
1695 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001696 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001697
1698 BT_DBG("%s", hdev->name);
1699
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001700 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001701 if (err < 0) {
1702 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001703 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001704 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001705
Johan Hedbergbf543032013-09-13 08:58:18 +03001706 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1707 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1708 hci_dev_do_close(hdev);
1709 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001710 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1711 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001712 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001713
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001714 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001715 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001716}
1717
1718static void hci_power_off(struct work_struct *work)
1719{
Johan Hedberg32435532011-11-07 22:16:04 +02001720 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001721 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001722
1723 BT_DBG("%s", hdev->name);
1724
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001725 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001726}
1727
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001728static void hci_discov_off(struct work_struct *work)
1729{
1730 struct hci_dev *hdev;
1731 u8 scan = SCAN_PAGE;
1732
1733 hdev = container_of(work, struct hci_dev, discov_off.work);
1734
1735 BT_DBG("%s", hdev->name);
1736
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001737 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001738
1739 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1740
1741 hdev->discov_timeout = 0;
1742
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001743 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001744}
1745
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001746int hci_uuids_clear(struct hci_dev *hdev)
1747{
Johan Hedberg48210022013-01-27 00:31:28 +02001748 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001749
Johan Hedberg48210022013-01-27 00:31:28 +02001750 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1751 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001752 kfree(uuid);
1753 }
1754
1755 return 0;
1756}
1757
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758int hci_link_keys_clear(struct hci_dev *hdev)
1759{
1760 struct list_head *p, *n;
1761
1762 list_for_each_safe(p, n, &hdev->link_keys) {
1763 struct link_key *key;
1764
1765 key = list_entry(p, struct link_key, list);
1766
1767 list_del(p);
1768 kfree(key);
1769 }
1770
1771 return 0;
1772}
1773
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001774int hci_smp_ltks_clear(struct hci_dev *hdev)
1775{
1776 struct smp_ltk *k, *tmp;
1777
1778 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1779 list_del(&k->list);
1780 kfree(k);
1781 }
1782
1783 return 0;
1784}
1785
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001786struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1787{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001788 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001789
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001790 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001791 if (bacmp(bdaddr, &k->bdaddr) == 0)
1792 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001793
1794 return NULL;
1795}
1796
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301797static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001798 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001799{
1800 /* Legacy key */
1801 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301802 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001803
1804 /* Debug keys are insecure so don't store them persistently */
1805 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301806 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001807
1808 /* Changed combination key and there's no previous one */
1809 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301810 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001811
1812 /* Security mode 3 case */
1813 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301814 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001815
1816 /* Neither local nor remote side had no-bonding as requirement */
1817 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301818 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001819
1820 /* Local side had dedicated bonding as requirement */
1821 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301822 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001823
1824 /* Remote side had dedicated bonding as requirement */
1825 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301826 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001827
1828 /* If none of the above criteria match, then don't store the key
1829 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301830 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001831}
1832
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001833struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001834{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001835 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001836
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001837 list_for_each_entry(k, &hdev->long_term_keys, list) {
1838 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001839 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001840 continue;
1841
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001842 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001843 }
1844
1845 return NULL;
1846}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001847
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001848struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001849 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001850{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001851 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001852
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001853 list_for_each_entry(k, &hdev->long_term_keys, list)
1854 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001855 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001856 return k;
1857
1858 return NULL;
1859}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001860
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001862 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001863{
1864 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301865 u8 old_key_type;
1866 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001867
1868 old_key = hci_find_link_key(hdev, bdaddr);
1869 if (old_key) {
1870 old_key_type = old_key->type;
1871 key = old_key;
1872 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001873 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001874 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1875 if (!key)
1876 return -ENOMEM;
1877 list_add(&key->list, &hdev->link_keys);
1878 }
1879
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001880 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001881
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001882 /* Some buggy controller combinations generate a changed
1883 * combination key for legacy pairing even when there's no
1884 * previous key */
1885 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001886 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001887 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001888 if (conn)
1889 conn->key_type = type;
1890 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001891
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001892 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001893 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001894 key->pin_len = pin_len;
1895
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001896 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001897 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001898 else
1899 key->type = type;
1900
Johan Hedberg4df378a2011-04-28 11:29:03 -07001901 if (!new_key)
1902 return 0;
1903
1904 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1905
Johan Hedberg744cf192011-11-08 20:40:14 +02001906 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001907
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301908 if (conn)
1909 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001910
1911 return 0;
1912}
1913
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001914int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001915 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001916 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001917{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001918 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001919
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001920 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1921 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001922
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001923 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1924 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001925 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001926 else {
1927 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001928 if (!key)
1929 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001930 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001931 }
1932
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001933 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001934 key->bdaddr_type = addr_type;
1935 memcpy(key->val, tk, sizeof(key->val));
1936 key->authenticated = authenticated;
1937 key->ediv = ediv;
1938 key->enc_size = enc_size;
1939 key->type = type;
1940 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001941
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001942 if (!new_key)
1943 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001944
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001945 if (type & HCI_SMP_LTK)
1946 mgmt_new_ltk(hdev, key, 1);
1947
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001948 return 0;
1949}
1950
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001951int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1952{
1953 struct link_key *key;
1954
1955 key = hci_find_link_key(hdev, bdaddr);
1956 if (!key)
1957 return -ENOENT;
1958
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001959 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001960
1961 list_del(&key->list);
1962 kfree(key);
1963
1964 return 0;
1965}
1966
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001967int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1968{
1969 struct smp_ltk *k, *tmp;
1970
1971 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1972 if (bacmp(bdaddr, &k->bdaddr))
1973 continue;
1974
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001975 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001976
1977 list_del(&k->list);
1978 kfree(k);
1979 }
1980
1981 return 0;
1982}
1983
Ville Tervo6bd32322011-02-16 16:32:41 +02001984/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001985static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001986{
1987 struct hci_dev *hdev = (void *) arg;
1988
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001989 if (hdev->sent_cmd) {
1990 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1991 u16 opcode = __le16_to_cpu(sent->opcode);
1992
1993 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1994 } else {
1995 BT_ERR("%s command tx timeout", hdev->name);
1996 }
1997
Ville Tervo6bd32322011-02-16 16:32:41 +02001998 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001999 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002000}
2001
Szymon Janc2763eda2011-03-22 13:12:22 +01002002struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002003 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002004{
2005 struct oob_data *data;
2006
2007 list_for_each_entry(data, &hdev->remote_oob_data, list)
2008 if (bacmp(bdaddr, &data->bdaddr) == 0)
2009 return data;
2010
2011 return NULL;
2012}
2013
2014int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2015{
2016 struct oob_data *data;
2017
2018 data = hci_find_remote_oob_data(hdev, bdaddr);
2019 if (!data)
2020 return -ENOENT;
2021
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002022 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002023
2024 list_del(&data->list);
2025 kfree(data);
2026
2027 return 0;
2028}
2029
2030int hci_remote_oob_data_clear(struct hci_dev *hdev)
2031{
2032 struct oob_data *data, *n;
2033
2034 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2035 list_del(&data->list);
2036 kfree(data);
2037 }
2038
2039 return 0;
2040}
2041
2042int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002043 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002044{
2045 struct oob_data *data;
2046
2047 data = hci_find_remote_oob_data(hdev, bdaddr);
2048
2049 if (!data) {
2050 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2051 if (!data)
2052 return -ENOMEM;
2053
2054 bacpy(&data->bdaddr, bdaddr);
2055 list_add(&data->list, &hdev->remote_oob_data);
2056 }
2057
2058 memcpy(data->hash, hash, sizeof(data->hash));
2059 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2060
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002061 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002062
2063 return 0;
2064}
2065
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002066struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002067{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002068 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002069
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002070 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002071 if (bacmp(bdaddr, &b->bdaddr) == 0)
2072 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002073
2074 return NULL;
2075}
2076
2077int hci_blacklist_clear(struct hci_dev *hdev)
2078{
2079 struct list_head *p, *n;
2080
2081 list_for_each_safe(p, n, &hdev->blacklist) {
2082 struct bdaddr_list *b;
2083
2084 b = list_entry(p, struct bdaddr_list, list);
2085
2086 list_del(p);
2087 kfree(b);
2088 }
2089
2090 return 0;
2091}
2092
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002093int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094{
2095 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002096
2097 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2098 return -EBADF;
2099
Antti Julku5e762442011-08-25 16:48:02 +03002100 if (hci_blacklist_lookup(hdev, bdaddr))
2101 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002102
2103 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002104 if (!entry)
2105 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002106
2107 bacpy(&entry->bdaddr, bdaddr);
2108
2109 list_add(&entry->list, &hdev->blacklist);
2110
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002111 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002112}
2113
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002114int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002115{
2116 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002117
Szymon Janc1ec918c2011-11-16 09:32:21 +01002118 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002119 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002120
2121 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002122 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002123 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002124
2125 list_del(&entry->list);
2126 kfree(entry);
2127
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002128 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002129}
2130
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002131static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002132{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002133 if (status) {
2134 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002135
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002136 hci_dev_lock(hdev);
2137 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2138 hci_dev_unlock(hdev);
2139 return;
2140 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002141}
2142
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002143static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002144{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002145 /* General inquiry access code (GIAC) */
2146 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2147 struct hci_request req;
2148 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002149 int err;
2150
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002151 if (status) {
2152 BT_ERR("Failed to disable LE scanning: status %d", status);
2153 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002154 }
2155
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002156 switch (hdev->discovery.type) {
2157 case DISCOV_TYPE_LE:
2158 hci_dev_lock(hdev);
2159 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2160 hci_dev_unlock(hdev);
2161 break;
2162
2163 case DISCOV_TYPE_INTERLEAVED:
2164 hci_req_init(&req, hdev);
2165
2166 memset(&cp, 0, sizeof(cp));
2167 memcpy(&cp.lap, lap, sizeof(cp.lap));
2168 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2169 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2170
2171 hci_dev_lock(hdev);
2172
2173 hci_inquiry_cache_flush(hdev);
2174
2175 err = hci_req_run(&req, inquiry_complete);
2176 if (err) {
2177 BT_ERR("Inquiry request failed: err %d", err);
2178 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2179 }
2180
2181 hci_dev_unlock(hdev);
2182 break;
2183 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002184}
2185
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002186static void le_scan_disable_work(struct work_struct *work)
2187{
2188 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002189 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002190 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002191 struct hci_request req;
2192 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002193
2194 BT_DBG("%s", hdev->name);
2195
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002196 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002197
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002198 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002199 cp.enable = LE_SCAN_DISABLE;
2200 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002201
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002202 err = hci_req_run(&req, le_scan_disable_work_complete);
2203 if (err)
2204 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002205}
2206
David Herrmann9be0dab2012-04-22 14:39:57 +02002207/* Alloc HCI device */
2208struct hci_dev *hci_alloc_dev(void)
2209{
2210 struct hci_dev *hdev;
2211
2212 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2213 if (!hdev)
2214 return NULL;
2215
David Herrmannb1b813d2012-04-22 14:39:58 +02002216 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2217 hdev->esco_type = (ESCO_HV1);
2218 hdev->link_mode = (HCI_LM_ACCEPT);
2219 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002220 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2221 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002222
David Herrmannb1b813d2012-04-22 14:39:58 +02002223 hdev->sniff_max_interval = 800;
2224 hdev->sniff_min_interval = 80;
2225
2226 mutex_init(&hdev->lock);
2227 mutex_init(&hdev->req_lock);
2228
2229 INIT_LIST_HEAD(&hdev->mgmt_pending);
2230 INIT_LIST_HEAD(&hdev->blacklist);
2231 INIT_LIST_HEAD(&hdev->uuids);
2232 INIT_LIST_HEAD(&hdev->link_keys);
2233 INIT_LIST_HEAD(&hdev->long_term_keys);
2234 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002235 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002236
2237 INIT_WORK(&hdev->rx_work, hci_rx_work);
2238 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2239 INIT_WORK(&hdev->tx_work, hci_tx_work);
2240 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002241
David Herrmannb1b813d2012-04-22 14:39:58 +02002242 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2243 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2244 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2245
David Herrmannb1b813d2012-04-22 14:39:58 +02002246 skb_queue_head_init(&hdev->rx_q);
2247 skb_queue_head_init(&hdev->cmd_q);
2248 skb_queue_head_init(&hdev->raw_q);
2249
2250 init_waitqueue_head(&hdev->req_wait_q);
2251
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002252 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002253
David Herrmannb1b813d2012-04-22 14:39:58 +02002254 hci_init_sysfs(hdev);
2255 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002256
2257 return hdev;
2258}
2259EXPORT_SYMBOL(hci_alloc_dev);
2260
2261/* Free HCI device */
2262void hci_free_dev(struct hci_dev *hdev)
2263{
David Herrmann9be0dab2012-04-22 14:39:57 +02002264 /* will free via device release */
2265 put_device(&hdev->dev);
2266}
2267EXPORT_SYMBOL(hci_free_dev);
2268
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269/* Register HCI device */
2270int hci_register_dev(struct hci_dev *hdev)
2271{
David Herrmannb1b813d2012-04-22 14:39:58 +02002272 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
David Herrmann010666a2012-01-07 15:47:07 +01002274 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 return -EINVAL;
2276
Mat Martineau08add512011-11-02 16:18:36 -07002277 /* Do not allow HCI_AMP devices to register at index 0,
2278 * so the index can be used as the AMP controller ID.
2279 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002280 switch (hdev->dev_type) {
2281 case HCI_BREDR:
2282 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2283 break;
2284 case HCI_AMP:
2285 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2286 break;
2287 default:
2288 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002290
Sasha Levin3df92b32012-05-27 22:36:56 +02002291 if (id < 0)
2292 return id;
2293
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 sprintf(hdev->name, "hci%d", id);
2295 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002296
2297 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2298
Kees Cookd8537542013-07-03 15:04:57 -07002299 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2300 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002301 if (!hdev->workqueue) {
2302 error = -ENOMEM;
2303 goto err;
2304 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002305
Kees Cookd8537542013-07-03 15:04:57 -07002306 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2307 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002308 if (!hdev->req_workqueue) {
2309 destroy_workqueue(hdev->workqueue);
2310 error = -ENOMEM;
2311 goto err;
2312 }
2313
David Herrmann33ca9542011-10-08 14:58:49 +02002314 error = hci_add_sysfs(hdev);
2315 if (error < 0)
2316 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002318 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002319 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2320 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002321 if (hdev->rfkill) {
2322 if (rfkill_register(hdev->rfkill) < 0) {
2323 rfkill_destroy(hdev->rfkill);
2324 hdev->rfkill = NULL;
2325 }
2326 }
2327
Johan Hedberg5e130362013-09-13 08:58:17 +03002328 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2329 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2330
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002331 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002332
Johan Hedberg56f87902013-10-02 13:43:13 +03002333 if (hdev->dev_type != HCI_AMP) {
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002334 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Johan Hedberg56f87902013-10-02 13:43:13 +03002335 /* Assume BR/EDR support until proven otherwise (such as
2336 * through reading supported features during init.
2337 */
2338 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2339 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002340
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002341 write_lock(&hci_dev_list_lock);
2342 list_add(&hdev->list, &hci_dev_list);
2343 write_unlock(&hci_dev_list_lock);
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002346 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347
Johan Hedberg19202572013-01-14 22:33:51 +02002348 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002349
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002351
David Herrmann33ca9542011-10-08 14:58:49 +02002352err_wqueue:
2353 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002354 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002355err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002356 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002357
David Herrmann33ca9542011-10-08 14:58:49 +02002358 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002359}
2360EXPORT_SYMBOL(hci_register_dev);
2361
2362/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002363void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364{
Sasha Levin3df92b32012-05-27 22:36:56 +02002365 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002366
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002367 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368
Johan Hovold94324962012-03-15 14:48:41 +01002369 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2370
Sasha Levin3df92b32012-05-27 22:36:56 +02002371 id = hdev->id;
2372
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002373 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002375 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
2377 hci_dev_do_close(hdev);
2378
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302379 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002380 kfree_skb(hdev->reassembly[i]);
2381
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002382 cancel_work_sync(&hdev->power_on);
2383
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002384 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002385 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002386 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002387 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002388 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002389 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002390
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002391 /* mgmt_index_removed should take care of emptying the
2392 * pending list */
2393 BUG_ON(!list_empty(&hdev->mgmt_pending));
2394
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 hci_notify(hdev, HCI_DEV_UNREG);
2396
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002397 if (hdev->rfkill) {
2398 rfkill_unregister(hdev->rfkill);
2399 rfkill_destroy(hdev->rfkill);
2400 }
2401
David Herrmannce242972011-10-08 14:58:48 +02002402 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002403
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002404 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002405 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002406
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002407 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002408 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002409 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002410 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002411 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002412 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002413 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002414
David Herrmanndc946bd2012-01-07 15:47:24 +01002415 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002416
2417 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002418}
2419EXPORT_SYMBOL(hci_unregister_dev);
2420
2421/* Suspend HCI device */
2422int hci_suspend_dev(struct hci_dev *hdev)
2423{
2424 hci_notify(hdev, HCI_DEV_SUSPEND);
2425 return 0;
2426}
2427EXPORT_SYMBOL(hci_suspend_dev);
2428
2429/* Resume HCI device */
2430int hci_resume_dev(struct hci_dev *hdev)
2431{
2432 hci_notify(hdev, HCI_DEV_RESUME);
2433 return 0;
2434}
2435EXPORT_SYMBOL(hci_resume_dev);
2436
Marcel Holtmann76bca882009-11-18 00:40:39 +01002437/* Receive frame from HCI drivers */
2438int hci_recv_frame(struct sk_buff *skb)
2439{
2440 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2441 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002442 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002443 kfree_skb(skb);
2444 return -ENXIO;
2445 }
2446
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002447 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002448 bt_cb(skb)->incoming = 1;
2449
2450 /* Time stamp */
2451 __net_timestamp(skb);
2452
Marcel Holtmann76bca882009-11-18 00:40:39 +01002453 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002454 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002455
Marcel Holtmann76bca882009-11-18 00:40:39 +01002456 return 0;
2457}
2458EXPORT_SYMBOL(hci_recv_frame);
2459
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302460static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002461 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302462{
2463 int len = 0;
2464 int hlen = 0;
2465 int remain = count;
2466 struct sk_buff *skb;
2467 struct bt_skb_cb *scb;
2468
2469 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002470 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302471 return -EILSEQ;
2472
2473 skb = hdev->reassembly[index];
2474
2475 if (!skb) {
2476 switch (type) {
2477 case HCI_ACLDATA_PKT:
2478 len = HCI_MAX_FRAME_SIZE;
2479 hlen = HCI_ACL_HDR_SIZE;
2480 break;
2481 case HCI_EVENT_PKT:
2482 len = HCI_MAX_EVENT_SIZE;
2483 hlen = HCI_EVENT_HDR_SIZE;
2484 break;
2485 case HCI_SCODATA_PKT:
2486 len = HCI_MAX_SCO_SIZE;
2487 hlen = HCI_SCO_HDR_SIZE;
2488 break;
2489 }
2490
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002491 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302492 if (!skb)
2493 return -ENOMEM;
2494
2495 scb = (void *) skb->cb;
2496 scb->expect = hlen;
2497 scb->pkt_type = type;
2498
2499 skb->dev = (void *) hdev;
2500 hdev->reassembly[index] = skb;
2501 }
2502
2503 while (count) {
2504 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002505 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302506
2507 memcpy(skb_put(skb, len), data, len);
2508
2509 count -= len;
2510 data += len;
2511 scb->expect -= len;
2512 remain = count;
2513
2514 switch (type) {
2515 case HCI_EVENT_PKT:
2516 if (skb->len == HCI_EVENT_HDR_SIZE) {
2517 struct hci_event_hdr *h = hci_event_hdr(skb);
2518 scb->expect = h->plen;
2519
2520 if (skb_tailroom(skb) < scb->expect) {
2521 kfree_skb(skb);
2522 hdev->reassembly[index] = NULL;
2523 return -ENOMEM;
2524 }
2525 }
2526 break;
2527
2528 case HCI_ACLDATA_PKT:
2529 if (skb->len == HCI_ACL_HDR_SIZE) {
2530 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2531 scb->expect = __le16_to_cpu(h->dlen);
2532
2533 if (skb_tailroom(skb) < scb->expect) {
2534 kfree_skb(skb);
2535 hdev->reassembly[index] = NULL;
2536 return -ENOMEM;
2537 }
2538 }
2539 break;
2540
2541 case HCI_SCODATA_PKT:
2542 if (skb->len == HCI_SCO_HDR_SIZE) {
2543 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2544 scb->expect = h->dlen;
2545
2546 if (skb_tailroom(skb) < scb->expect) {
2547 kfree_skb(skb);
2548 hdev->reassembly[index] = NULL;
2549 return -ENOMEM;
2550 }
2551 }
2552 break;
2553 }
2554
2555 if (scb->expect == 0) {
2556 /* Complete frame */
2557
2558 bt_cb(skb)->pkt_type = type;
2559 hci_recv_frame(skb);
2560
2561 hdev->reassembly[index] = NULL;
2562 return remain;
2563 }
2564 }
2565
2566 return remain;
2567}
2568
Marcel Holtmannef222012007-07-11 06:42:04 +02002569int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2570{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302571 int rem = 0;
2572
Marcel Holtmannef222012007-07-11 06:42:04 +02002573 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2574 return -EILSEQ;
2575
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002576 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002577 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302578 if (rem < 0)
2579 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002580
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302581 data += (count - rem);
2582 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002583 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002584
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302585 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002586}
2587EXPORT_SYMBOL(hci_recv_fragment);
2588
Suraj Sumangala99811512010-07-14 13:02:19 +05302589#define STREAM_REASSEMBLY 0
2590
2591int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2592{
2593 int type;
2594 int rem = 0;
2595
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002596 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302597 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2598
2599 if (!skb) {
2600 struct { char type; } *pkt;
2601
2602 /* Start of the frame */
2603 pkt = data;
2604 type = pkt->type;
2605
2606 data++;
2607 count--;
2608 } else
2609 type = bt_cb(skb)->pkt_type;
2610
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002611 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002612 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302613 if (rem < 0)
2614 return rem;
2615
2616 data += (count - rem);
2617 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002618 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302619
2620 return rem;
2621}
2622EXPORT_SYMBOL(hci_recv_stream_fragment);
2623
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624/* ---- Interface to upper protocols ---- */
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626int hci_register_cb(struct hci_cb *cb)
2627{
2628 BT_DBG("%p name %s", cb, cb->name);
2629
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002630 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002632 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
2634 return 0;
2635}
2636EXPORT_SYMBOL(hci_register_cb);
2637
2638int hci_unregister_cb(struct hci_cb *cb)
2639{
2640 BT_DBG("%p name %s", cb, cb->name);
2641
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002642 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002644 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
2646 return 0;
2647}
2648EXPORT_SYMBOL(hci_unregister_cb);
2649
2650static int hci_send_frame(struct sk_buff *skb)
2651{
2652 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2653
2654 if (!hdev) {
2655 kfree_skb(skb);
2656 return -ENODEV;
2657 }
2658
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002659 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002661 /* Time stamp */
2662 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002664 /* Send copy to monitor */
2665 hci_send_to_monitor(hdev, skb);
2666
2667 if (atomic_read(&hdev->promisc)) {
2668 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002669 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670 }
2671
2672 /* Get rid of skb owner, prior to sending to the driver. */
2673 skb_orphan(skb);
2674
2675 return hdev->send(skb);
2676}
2677
Johan Hedberg3119ae92013-03-05 20:37:44 +02002678void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2679{
2680 skb_queue_head_init(&req->cmd_q);
2681 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002682 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002683}
2684
2685int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2686{
2687 struct hci_dev *hdev = req->hdev;
2688 struct sk_buff *skb;
2689 unsigned long flags;
2690
2691 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2692
Andre Guedes5d73e032013-03-08 11:20:16 -03002693 /* If an error occured during request building, remove all HCI
2694 * commands queued on the HCI request queue.
2695 */
2696 if (req->err) {
2697 skb_queue_purge(&req->cmd_q);
2698 return req->err;
2699 }
2700
Johan Hedberg3119ae92013-03-05 20:37:44 +02002701 /* Do not allow empty requests */
2702 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002703 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002704
2705 skb = skb_peek_tail(&req->cmd_q);
2706 bt_cb(skb)->req.complete = complete;
2707
2708 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2709 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2710 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2711
2712 queue_work(hdev->workqueue, &hdev->cmd_work);
2713
2714 return 0;
2715}
2716
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002717static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002718 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719{
2720 int len = HCI_COMMAND_HDR_SIZE + plen;
2721 struct hci_command_hdr *hdr;
2722 struct sk_buff *skb;
2723
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002725 if (!skb)
2726 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002729 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 hdr->plen = plen;
2731
2732 if (plen)
2733 memcpy(skb_put(skb, plen), param, plen);
2734
2735 BT_DBG("skb len %d", skb->len);
2736
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002737 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002739
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002740 return skb;
2741}
2742
2743/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002744int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2745 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002746{
2747 struct sk_buff *skb;
2748
2749 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2750
2751 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2752 if (!skb) {
2753 BT_ERR("%s no memory for command", hdev->name);
2754 return -ENOMEM;
2755 }
2756
Johan Hedberg11714b32013-03-05 20:37:47 +02002757 /* Stand-alone HCI commands must be flaged as
2758 * single-command requests.
2759 */
2760 bt_cb(skb)->req.start = true;
2761
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002763 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765 return 0;
2766}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Johan Hedberg71c76a12013-03-05 20:37:46 +02002768/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002769void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2770 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002771{
2772 struct hci_dev *hdev = req->hdev;
2773 struct sk_buff *skb;
2774
2775 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2776
Andre Guedes34739c12013-03-08 11:20:18 -03002777 /* If an error occured during request building, there is no point in
2778 * queueing the HCI command. We can simply return.
2779 */
2780 if (req->err)
2781 return;
2782
Johan Hedberg71c76a12013-03-05 20:37:46 +02002783 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2784 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002785 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2786 hdev->name, opcode);
2787 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002788 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002789 }
2790
2791 if (skb_queue_empty(&req->cmd_q))
2792 bt_cb(skb)->req.start = true;
2793
Johan Hedberg02350a72013-04-03 21:50:29 +03002794 bt_cb(skb)->req.event = event;
2795
Johan Hedberg71c76a12013-03-05 20:37:46 +02002796 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002797}
2798
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002799void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2800 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002801{
2802 hci_req_add_ev(req, opcode, plen, param, 0);
2803}
2804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002806void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807{
2808 struct hci_command_hdr *hdr;
2809
2810 if (!hdev->sent_cmd)
2811 return NULL;
2812
2813 hdr = (void *) hdev->sent_cmd->data;
2814
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002815 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 return NULL;
2817
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002818 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
2820 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2821}
2822
2823/* Send ACL data */
2824static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2825{
2826 struct hci_acl_hdr *hdr;
2827 int len = skb->len;
2828
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002829 skb_push(skb, HCI_ACL_HDR_SIZE);
2830 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002831 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002832 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2833 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834}
2835
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002836static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002837 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002839 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 struct hci_dev *hdev = conn->hdev;
2841 struct sk_buff *list;
2842
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002843 skb->len = skb_headlen(skb);
2844 skb->data_len = 0;
2845
2846 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002847
2848 switch (hdev->dev_type) {
2849 case HCI_BREDR:
2850 hci_add_acl_hdr(skb, conn->handle, flags);
2851 break;
2852 case HCI_AMP:
2853 hci_add_acl_hdr(skb, chan->handle, flags);
2854 break;
2855 default:
2856 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2857 return;
2858 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002859
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002860 list = skb_shinfo(skb)->frag_list;
2861 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 /* Non fragmented */
2863 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2864
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002865 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 } else {
2867 /* Fragmented */
2868 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2869
2870 skb_shinfo(skb)->frag_list = NULL;
2871
2872 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002873 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002875 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002876
2877 flags &= ~ACL_START;
2878 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 do {
2880 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002881
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002883 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002884 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885
2886 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2887
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002888 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 } while (list);
2890
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002891 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002893}
2894
2895void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2896{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002897 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002898
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002899 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002900
2901 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002902
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002903 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002905 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
2908/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002909void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910{
2911 struct hci_dev *hdev = conn->hdev;
2912 struct hci_sco_hdr hdr;
2913
2914 BT_DBG("%s len %d", hdev->name, skb->len);
2915
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002916 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 hdr.dlen = skb->len;
2918
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002919 skb_push(skb, HCI_SCO_HDR_SIZE);
2920 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002921 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002924 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002925
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002927 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929
2930/* ---- HCI TX task (outgoing data) ---- */
2931
2932/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002933static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2934 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935{
2936 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002937 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002938 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002940 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002942
2943 rcu_read_lock();
2944
2945 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002946 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002948
2949 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2950 continue;
2951
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 num++;
2953
2954 if (c->sent < min) {
2955 min = c->sent;
2956 conn = c;
2957 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002958
2959 if (hci_conn_num(hdev, type) == num)
2960 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 }
2962
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002963 rcu_read_unlock();
2964
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002966 int cnt, q;
2967
2968 switch (conn->type) {
2969 case ACL_LINK:
2970 cnt = hdev->acl_cnt;
2971 break;
2972 case SCO_LINK:
2973 case ESCO_LINK:
2974 cnt = hdev->sco_cnt;
2975 break;
2976 case LE_LINK:
2977 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2978 break;
2979 default:
2980 cnt = 0;
2981 BT_ERR("Unknown link type");
2982 }
2983
2984 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 *quote = q ? q : 1;
2986 } else
2987 *quote = 0;
2988
2989 BT_DBG("conn %p quote %d", conn, *quote);
2990 return conn;
2991}
2992
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002993static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002994{
2995 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002996 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002997
Ville Tervobae1f5d92011-02-10 22:38:53 -03002998 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003000 rcu_read_lock();
3001
Linus Torvalds1da177e2005-04-16 15:20:36 -07003002 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003003 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003004 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003005 BT_ERR("%s killing stalled connection %pMR",
3006 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003007 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008 }
3009 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003010
3011 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012}
3013
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003014static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3015 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003016{
3017 struct hci_conn_hash *h = &hdev->conn_hash;
3018 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003019 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003020 struct hci_conn *conn;
3021 int cnt, q, conn_num = 0;
3022
3023 BT_DBG("%s", hdev->name);
3024
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003025 rcu_read_lock();
3026
3027 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003028 struct hci_chan *tmp;
3029
3030 if (conn->type != type)
3031 continue;
3032
3033 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3034 continue;
3035
3036 conn_num++;
3037
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003038 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003039 struct sk_buff *skb;
3040
3041 if (skb_queue_empty(&tmp->data_q))
3042 continue;
3043
3044 skb = skb_peek(&tmp->data_q);
3045 if (skb->priority < cur_prio)
3046 continue;
3047
3048 if (skb->priority > cur_prio) {
3049 num = 0;
3050 min = ~0;
3051 cur_prio = skb->priority;
3052 }
3053
3054 num++;
3055
3056 if (conn->sent < min) {
3057 min = conn->sent;
3058 chan = tmp;
3059 }
3060 }
3061
3062 if (hci_conn_num(hdev, type) == conn_num)
3063 break;
3064 }
3065
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003066 rcu_read_unlock();
3067
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003068 if (!chan)
3069 return NULL;
3070
3071 switch (chan->conn->type) {
3072 case ACL_LINK:
3073 cnt = hdev->acl_cnt;
3074 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003075 case AMP_LINK:
3076 cnt = hdev->block_cnt;
3077 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003078 case SCO_LINK:
3079 case ESCO_LINK:
3080 cnt = hdev->sco_cnt;
3081 break;
3082 case LE_LINK:
3083 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3084 break;
3085 default:
3086 cnt = 0;
3087 BT_ERR("Unknown link type");
3088 }
3089
3090 q = cnt / num;
3091 *quote = q ? q : 1;
3092 BT_DBG("chan %p quote %d", chan, *quote);
3093 return chan;
3094}
3095
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003096static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3097{
3098 struct hci_conn_hash *h = &hdev->conn_hash;
3099 struct hci_conn *conn;
3100 int num = 0;
3101
3102 BT_DBG("%s", hdev->name);
3103
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003104 rcu_read_lock();
3105
3106 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003107 struct hci_chan *chan;
3108
3109 if (conn->type != type)
3110 continue;
3111
3112 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3113 continue;
3114
3115 num++;
3116
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003117 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003118 struct sk_buff *skb;
3119
3120 if (chan->sent) {
3121 chan->sent = 0;
3122 continue;
3123 }
3124
3125 if (skb_queue_empty(&chan->data_q))
3126 continue;
3127
3128 skb = skb_peek(&chan->data_q);
3129 if (skb->priority >= HCI_PRIO_MAX - 1)
3130 continue;
3131
3132 skb->priority = HCI_PRIO_MAX - 1;
3133
3134 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003135 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003136 }
3137
3138 if (hci_conn_num(hdev, type) == num)
3139 break;
3140 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003141
3142 rcu_read_unlock();
3143
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003144}
3145
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003146static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3147{
3148 /* Calculate count of blocks used by this packet */
3149 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3150}
3151
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003152static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 if (!test_bit(HCI_RAW, &hdev->flags)) {
3155 /* ACL tx timeout must be longer than maximum
3156 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003157 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003158 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003159 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003161}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003162
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003163static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003164{
3165 unsigned int cnt = hdev->acl_cnt;
3166 struct hci_chan *chan;
3167 struct sk_buff *skb;
3168 int quote;
3169
3170 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003171
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003172 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003173 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003174 u32 priority = (skb_peek(&chan->data_q))->priority;
3175 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003176 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003177 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003178
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003179 /* Stop if priority has changed */
3180 if (skb->priority < priority)
3181 break;
3182
3183 skb = skb_dequeue(&chan->data_q);
3184
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003185 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003186 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003187
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 hci_send_frame(skb);
3189 hdev->acl_last_tx = jiffies;
3190
3191 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003192 chan->sent++;
3193 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 }
3195 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003196
3197 if (cnt != hdev->acl_cnt)
3198 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003199}
3200
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003201static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003202{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003203 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003204 struct hci_chan *chan;
3205 struct sk_buff *skb;
3206 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003207 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003208
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003209 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003210
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003211 BT_DBG("%s", hdev->name);
3212
3213 if (hdev->dev_type == HCI_AMP)
3214 type = AMP_LINK;
3215 else
3216 type = ACL_LINK;
3217
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003218 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003219 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003220 u32 priority = (skb_peek(&chan->data_q))->priority;
3221 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3222 int blocks;
3223
3224 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003225 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003226
3227 /* Stop if priority has changed */
3228 if (skb->priority < priority)
3229 break;
3230
3231 skb = skb_dequeue(&chan->data_q);
3232
3233 blocks = __get_blocks(hdev, skb);
3234 if (blocks > hdev->block_cnt)
3235 return;
3236
3237 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003238 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003239
3240 hci_send_frame(skb);
3241 hdev->acl_last_tx = jiffies;
3242
3243 hdev->block_cnt -= blocks;
3244 quote -= blocks;
3245
3246 chan->sent += blocks;
3247 chan->conn->sent += blocks;
3248 }
3249 }
3250
3251 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003252 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253}
3254
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003255static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003256{
3257 BT_DBG("%s", hdev->name);
3258
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003259 /* No ACL link over BR/EDR controller */
3260 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3261 return;
3262
3263 /* No AMP link over AMP controller */
3264 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003265 return;
3266
3267 switch (hdev->flow_ctl_mode) {
3268 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3269 hci_sched_acl_pkt(hdev);
3270 break;
3271
3272 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3273 hci_sched_acl_blk(hdev);
3274 break;
3275 }
3276}
3277
Linus Torvalds1da177e2005-04-16 15:20:36 -07003278/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003279static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280{
3281 struct hci_conn *conn;
3282 struct sk_buff *skb;
3283 int quote;
3284
3285 BT_DBG("%s", hdev->name);
3286
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003287 if (!hci_conn_num(hdev, SCO_LINK))
3288 return;
3289
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3291 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3292 BT_DBG("skb %p len %d", skb, skb->len);
3293 hci_send_frame(skb);
3294
3295 conn->sent++;
3296 if (conn->sent == ~0)
3297 conn->sent = 0;
3298 }
3299 }
3300}
3301
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003302static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003303{
3304 struct hci_conn *conn;
3305 struct sk_buff *skb;
3306 int quote;
3307
3308 BT_DBG("%s", hdev->name);
3309
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003310 if (!hci_conn_num(hdev, ESCO_LINK))
3311 return;
3312
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003313 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3314 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3316 BT_DBG("skb %p len %d", skb, skb->len);
3317 hci_send_frame(skb);
3318
3319 conn->sent++;
3320 if (conn->sent == ~0)
3321 conn->sent = 0;
3322 }
3323 }
3324}
3325
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003326static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003327{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003328 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003329 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003330 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003331
3332 BT_DBG("%s", hdev->name);
3333
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003334 if (!hci_conn_num(hdev, LE_LINK))
3335 return;
3336
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003337 if (!test_bit(HCI_RAW, &hdev->flags)) {
3338 /* LE tx timeout must be longer than maximum
3339 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003340 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003341 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003342 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003343 }
3344
3345 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003346 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003347 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003348 u32 priority = (skb_peek(&chan->data_q))->priority;
3349 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003350 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003351 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003352
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003353 /* Stop if priority has changed */
3354 if (skb->priority < priority)
3355 break;
3356
3357 skb = skb_dequeue(&chan->data_q);
3358
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003359 hci_send_frame(skb);
3360 hdev->le_last_tx = jiffies;
3361
3362 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003363 chan->sent++;
3364 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003365 }
3366 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003367
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003368 if (hdev->le_pkts)
3369 hdev->le_cnt = cnt;
3370 else
3371 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003372
3373 if (cnt != tmp)
3374 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003375}
3376
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003377static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003378{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003379 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003380 struct sk_buff *skb;
3381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003382 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003383 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384
Marcel Holtmann52de5992013-09-03 18:08:38 -07003385 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3386 /* Schedule queues and send stuff to HCI driver */
3387 hci_sched_acl(hdev);
3388 hci_sched_sco(hdev);
3389 hci_sched_esco(hdev);
3390 hci_sched_le(hdev);
3391 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003392
Linus Torvalds1da177e2005-04-16 15:20:36 -07003393 /* Send next queued raw (unknown type) packet */
3394 while ((skb = skb_dequeue(&hdev->raw_q)))
3395 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396}
3397
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003398/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399
3400/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003401static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003402{
3403 struct hci_acl_hdr *hdr = (void *) skb->data;
3404 struct hci_conn *conn;
3405 __u16 handle, flags;
3406
3407 skb_pull(skb, HCI_ACL_HDR_SIZE);
3408
3409 handle = __le16_to_cpu(hdr->handle);
3410 flags = hci_flags(handle);
3411 handle = hci_handle(handle);
3412
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003413 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003414 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003415
3416 hdev->stat.acl_rx++;
3417
3418 hci_dev_lock(hdev);
3419 conn = hci_conn_hash_lookup_handle(hdev, handle);
3420 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003421
Linus Torvalds1da177e2005-04-16 15:20:36 -07003422 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003423 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003424
Linus Torvalds1da177e2005-04-16 15:20:36 -07003425 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003426 l2cap_recv_acldata(conn, skb, flags);
3427 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003429 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003430 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 }
3432
3433 kfree_skb(skb);
3434}
3435
3436/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003437static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003438{
3439 struct hci_sco_hdr *hdr = (void *) skb->data;
3440 struct hci_conn *conn;
3441 __u16 handle;
3442
3443 skb_pull(skb, HCI_SCO_HDR_SIZE);
3444
3445 handle = __le16_to_cpu(hdr->handle);
3446
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003447 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448
3449 hdev->stat.sco_rx++;
3450
3451 hci_dev_lock(hdev);
3452 conn = hci_conn_hash_lookup_handle(hdev, handle);
3453 hci_dev_unlock(hdev);
3454
3455 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003457 sco_recv_scodata(conn, skb);
3458 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003460 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003461 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 }
3463
3464 kfree_skb(skb);
3465}
3466
Johan Hedberg9238f362013-03-05 20:37:48 +02003467static bool hci_req_is_complete(struct hci_dev *hdev)
3468{
3469 struct sk_buff *skb;
3470
3471 skb = skb_peek(&hdev->cmd_q);
3472 if (!skb)
3473 return true;
3474
3475 return bt_cb(skb)->req.start;
3476}
3477
Johan Hedberg42c6b122013-03-05 20:37:49 +02003478static void hci_resend_last(struct hci_dev *hdev)
3479{
3480 struct hci_command_hdr *sent;
3481 struct sk_buff *skb;
3482 u16 opcode;
3483
3484 if (!hdev->sent_cmd)
3485 return;
3486
3487 sent = (void *) hdev->sent_cmd->data;
3488 opcode = __le16_to_cpu(sent->opcode);
3489 if (opcode == HCI_OP_RESET)
3490 return;
3491
3492 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3493 if (!skb)
3494 return;
3495
3496 skb_queue_head(&hdev->cmd_q, skb);
3497 queue_work(hdev->workqueue, &hdev->cmd_work);
3498}
3499
Johan Hedberg9238f362013-03-05 20:37:48 +02003500void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3501{
3502 hci_req_complete_t req_complete = NULL;
3503 struct sk_buff *skb;
3504 unsigned long flags;
3505
3506 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3507
Johan Hedberg42c6b122013-03-05 20:37:49 +02003508 /* If the completed command doesn't match the last one that was
3509 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003510 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003511 if (!hci_sent_cmd_data(hdev, opcode)) {
3512 /* Some CSR based controllers generate a spontaneous
3513 * reset complete event during init and any pending
3514 * command will never be completed. In such a case we
3515 * need to resend whatever was the last sent
3516 * command.
3517 */
3518 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3519 hci_resend_last(hdev);
3520
Johan Hedberg9238f362013-03-05 20:37:48 +02003521 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003522 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003523
3524 /* If the command succeeded and there's still more commands in
3525 * this request the request is not yet complete.
3526 */
3527 if (!status && !hci_req_is_complete(hdev))
3528 return;
3529
3530 /* If this was the last command in a request the complete
3531 * callback would be found in hdev->sent_cmd instead of the
3532 * command queue (hdev->cmd_q).
3533 */
3534 if (hdev->sent_cmd) {
3535 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003536
3537 if (req_complete) {
3538 /* We must set the complete callback to NULL to
3539 * avoid calling the callback more than once if
3540 * this function gets called again.
3541 */
3542 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3543
Johan Hedberg9238f362013-03-05 20:37:48 +02003544 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003545 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003546 }
3547
3548 /* Remove all pending commands belonging to this request */
3549 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3550 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3551 if (bt_cb(skb)->req.start) {
3552 __skb_queue_head(&hdev->cmd_q, skb);
3553 break;
3554 }
3555
3556 req_complete = bt_cb(skb)->req.complete;
3557 kfree_skb(skb);
3558 }
3559 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3560
3561call_complete:
3562 if (req_complete)
3563 req_complete(hdev, status);
3564}
3565
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003566static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003567{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003568 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003569 struct sk_buff *skb;
3570
3571 BT_DBG("%s", hdev->name);
3572
Linus Torvalds1da177e2005-04-16 15:20:36 -07003573 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003574 /* Send copy to monitor */
3575 hci_send_to_monitor(hdev, skb);
3576
Linus Torvalds1da177e2005-04-16 15:20:36 -07003577 if (atomic_read(&hdev->promisc)) {
3578 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003579 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003580 }
3581
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003582 if (test_bit(HCI_RAW, &hdev->flags) ||
3583 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003584 kfree_skb(skb);
3585 continue;
3586 }
3587
3588 if (test_bit(HCI_INIT, &hdev->flags)) {
3589 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003590 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 case HCI_ACLDATA_PKT:
3592 case HCI_SCODATA_PKT:
3593 kfree_skb(skb);
3594 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003595 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596 }
3597
3598 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003599 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003601 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602 hci_event_packet(hdev, skb);
3603 break;
3604
3605 case HCI_ACLDATA_PKT:
3606 BT_DBG("%s ACL data packet", hdev->name);
3607 hci_acldata_packet(hdev, skb);
3608 break;
3609
3610 case HCI_SCODATA_PKT:
3611 BT_DBG("%s SCO data packet", hdev->name);
3612 hci_scodata_packet(hdev, skb);
3613 break;
3614
3615 default:
3616 kfree_skb(skb);
3617 break;
3618 }
3619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003620}
3621
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003622static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003623{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003624 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 struct sk_buff *skb;
3626
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003627 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3628 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003629
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003631 if (atomic_read(&hdev->cmd_cnt)) {
3632 skb = skb_dequeue(&hdev->cmd_q);
3633 if (!skb)
3634 return;
3635
Wei Yongjun7585b972009-02-25 18:29:52 +08003636 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003638 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003639 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003640 atomic_dec(&hdev->cmd_cnt);
3641 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003642 if (test_bit(HCI_RESET, &hdev->flags))
3643 del_timer(&hdev->cmd_timer);
3644 else
3645 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003646 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003647 } else {
3648 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003649 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003650 }
3651 }
3652}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003653
Andre Guedes31f79562012-04-24 21:02:53 -03003654u8 bdaddr_to_le(u8 bdaddr_type)
3655{
3656 switch (bdaddr_type) {
3657 case BDADDR_LE_PUBLIC:
3658 return ADDR_LE_DEV_PUBLIC;
3659
3660 default:
3661 /* Fallback to LE Random address type */
3662 return ADDR_LE_DEV_RANDOM;
3663 }
3664}