blob: 26673d332997392586edcc8d55d72af89066ffba [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
618 *
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700623 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
626
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
631 }
632
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500636 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500638 hci_update_ad(req);
639 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300640
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
648 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
651static int __hci_init(struct hci_dev *hdev)
652{
653 int err;
654
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
658
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
662 */
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
665
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
669
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 __u8 scan = opt;
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 __u8 auth = opt;
686
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 __u8 encrypt = opt;
696
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200699 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200704{
705 __le16 policy = cpu_to_le16(opt);
706
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200708
709 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200711}
712
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900713/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 * Device is held on return. */
715struct hci_dev *hci_dev_get(int index)
716{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200717 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 BT_DBG("%d", index);
720
721 if (index < 0)
722 return NULL;
723
724 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200725 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
729 }
730 }
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
733}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200736
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200737bool hci_discovery_active(struct hci_dev *hdev)
738{
739 struct discovery_state *discov = &hdev->discovery;
740
Andre Guedes6fbe1952012-02-03 17:47:58 -0300741 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300743 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200744 return true;
745
Andre Guedes6fbe1952012-02-03 17:47:58 -0300746 default:
747 return false;
748 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200749}
750
Johan Hedbergff9ef572012-01-04 14:23:45 +0200751void hci_discovery_set_state(struct hci_dev *hdev, int state)
752{
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755 if (hdev->discovery.state == state)
756 return;
757
758 switch (state) {
759 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200762 break;
763 case DISCOVERY_STARTING:
764 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300765 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200766 mgmt_discovering(hdev, 1);
767 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200768 case DISCOVERY_RESOLVING:
769 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200770 case DISCOVERY_STOPPING:
771 break;
772 }
773
774 hdev->discovery.state = state;
775}
776
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300777void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Johan Hedberg30883512012-01-04 14:16:21 +0200779 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200780 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300791struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
Johan Hedberg30883512012-01-04 14:16:21 +0200794 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 struct inquiry_entry *e;
796
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300797 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200801 return e;
802 }
803
804 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
806
Johan Hedberg561aafb2012-01-04 13:31:59 +0200807struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300808 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200809{
Johan Hedberg30883512012-01-04 14:16:21 +0200810 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200811 struct inquiry_entry *e;
812
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300813 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200814
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
818 }
819
820 return NULL;
821}
822
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200823struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300824 bdaddr_t *bdaddr,
825 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200826{
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
829
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200831
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
837 }
838
839 return NULL;
840}
841
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300843 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200844{
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
848
849 list_del(&ie->list);
850
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300853 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200854 break;
855 pos = &p->list;
856 }
857
858 list_add(&ie->list, pos);
859}
860
Johan Hedberg31754052012-01-04 13:39:52 +0200861bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300862 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Johan Hedberg30883512012-01-04 14:16:21 +0200864 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Szymon Janc2b2fec42012-11-20 11:38:54 +0100869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200871 if (ssp)
872 *ssp = data->ssp_mode;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200875 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
878
Johan Hedberga3d4e202012-01-09 00:53:02 +0200879 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300880 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
883 }
884
Johan Hedberg561aafb2012-01-04 13:31:59 +0200885 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200886 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887
Johan Hedberg561aafb2012-01-04 13:31:59 +0200888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200891 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200892
893 list_add(&ie->all, &cache->all);
894
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
900 }
901
902update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300904 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200912
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
915
916 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
919static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920{
Johan Hedberg30883512012-01-04 14:16:21 +0200921 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
925
Johan Hedberg561aafb2012-01-04 13:31:59 +0200926 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200928
929 if (copied >= num)
930 break;
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200940 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
942
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
945}
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct hci_cp_inquiry cp;
952
953 BT_DBG("%s", hdev->name);
954
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
957
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Andre Guedes3e13fa12013-03-27 20:04:56 -0300965static int wait_inquiry(void *word)
966{
967 schedule();
968 return signal_pending(current);
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971int hci_inquiry(void __user *arg)
972{
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
979
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
982
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -ENODEV;
986
Marcel Holtmann0736cfa2013-08-26 21:40:51 -0700987 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
988 err = -EBUSY;
989 goto done;
990 }
991
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300992 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900993 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300994 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300995 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 do_inquiry = 1;
997 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300998 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
Marcel Holtmann04837f62006-07-03 10:02:33 +02001000 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001001
1002 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001003 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1004 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001005 if (err < 0)
1006 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001007
1008 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1009 * cleared). If it is interrupted by a signal, return -EINTR.
1010 */
1011 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1012 TASK_INTERRUPTIBLE))
1013 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001014 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001016 /* for unlimited number of responses we will use buffer with
1017 * 255 entries
1018 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1020
1021 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1022 * copy it to the user space.
1023 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001024 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001025 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 err = -ENOMEM;
1027 goto done;
1028 }
1029
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001030 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001032 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033
1034 BT_DBG("num_rsp %d", ir.num_rsp);
1035
1036 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1037 ptr += sizeof(ir);
1038 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001039 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001041 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 err = -EFAULT;
1043
1044 kfree(buf);
1045
1046done:
1047 hci_dev_put(hdev);
1048 return err;
1049}
1050
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001051static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1052{
1053 u8 ad_len = 0, flags = 0;
1054 size_t name_len;
1055
1056 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1057 flags |= LE_AD_GENERAL;
1058
1059 if (!lmp_bredr_capable(hdev))
1060 flags |= LE_AD_NO_BREDR;
1061
1062 if (lmp_le_br_capable(hdev))
1063 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1064
1065 if (lmp_host_le_br_capable(hdev))
1066 flags |= LE_AD_SIM_LE_BREDR_HOST;
1067
1068 if (flags) {
1069 BT_DBG("adv flags 0x%02x", flags);
1070
1071 ptr[0] = 2;
1072 ptr[1] = EIR_FLAGS;
1073 ptr[2] = flags;
1074
1075 ad_len += 3;
1076 ptr += 3;
1077 }
1078
1079 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1080 ptr[0] = 2;
1081 ptr[1] = EIR_TX_POWER;
1082 ptr[2] = (u8) hdev->adv_tx_power;
1083
1084 ad_len += 3;
1085 ptr += 3;
1086 }
1087
1088 name_len = strlen(hdev->dev_name);
1089 if (name_len > 0) {
1090 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1091
1092 if (name_len > max_len) {
1093 name_len = max_len;
1094 ptr[1] = EIR_NAME_SHORT;
1095 } else
1096 ptr[1] = EIR_NAME_COMPLETE;
1097
1098 ptr[0] = name_len + 1;
1099
1100 memcpy(ptr + 2, hdev->dev_name, name_len);
1101
1102 ad_len += (name_len + 2);
1103 ptr += (name_len + 2);
1104 }
1105
1106 return ad_len;
1107}
1108
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001109void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001110{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001111 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112 struct hci_cp_le_set_adv_data cp;
1113 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001114
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001115 if (!lmp_le_capable(hdev))
1116 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001117
1118 memset(&cp, 0, sizeof(cp));
1119
1120 len = create_ad(hdev, cp.data);
1121
1122 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001123 memcmp(cp.data, hdev->adv_data, len) == 0)
1124 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125
1126 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1127 hdev->adv_data_len = len;
1128
1129 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001130
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001131 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001132}
1133
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134/* ---- HCI ioctl helpers ---- */
1135
1136int hci_dev_open(__u16 dev)
1137{
1138 struct hci_dev *hdev;
1139 int ret = 0;
1140
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001141 hdev = hci_dev_get(dev);
1142 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 return -ENODEV;
1144
1145 BT_DBG("%s %p", hdev->name, hdev);
1146
1147 hci_req_lock(hdev);
1148
Johan Hovold94324962012-03-15 14:48:41 +01001149 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1150 ret = -ENODEV;
1151 goto done;
1152 }
1153
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001154 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1155 ret = -ERFKILL;
1156 goto done;
1157 }
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (test_bit(HCI_UP, &hdev->flags)) {
1160 ret = -EALREADY;
1161 goto done;
1162 }
1163
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 if (hdev->open(hdev)) {
1165 ret = -EIO;
1166 goto done;
1167 }
1168
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001169 atomic_set(&hdev->cmd_cnt, 1);
1170 set_bit(HCI_INIT, &hdev->flags);
1171
1172 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1173 ret = hdev->setup(hdev);
1174
1175 if (!ret) {
1176 /* Treat all non BR/EDR controllers as raw devices if
1177 * enable_hs is not set.
1178 */
1179 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1180 set_bit(HCI_RAW, &hdev->flags);
1181
1182 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1183 set_bit(HCI_RAW, &hdev->flags);
1184
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001185 if (!test_bit(HCI_RAW, &hdev->flags) &&
1186 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001187 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188 }
1189
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001190 clear_bit(HCI_INIT, &hdev->flags);
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 if (!ret) {
1193 hci_dev_hold(hdev);
1194 set_bit(HCI_UP, &hdev->flags);
1195 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001196 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001197 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001198 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001199 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001200 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001201 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001202 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001203 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001205 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001206 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001207 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208
1209 skb_queue_purge(&hdev->cmd_q);
1210 skb_queue_purge(&hdev->rx_q);
1211
1212 if (hdev->flush)
1213 hdev->flush(hdev);
1214
1215 if (hdev->sent_cmd) {
1216 kfree_skb(hdev->sent_cmd);
1217 hdev->sent_cmd = NULL;
1218 }
1219
1220 hdev->close(hdev);
1221 hdev->flags = 0;
1222 }
1223
1224done:
1225 hci_req_unlock(hdev);
1226 hci_dev_put(hdev);
1227 return ret;
1228}
1229
1230static int hci_dev_do_close(struct hci_dev *hdev)
1231{
1232 BT_DBG("%s %p", hdev->name, hdev);
1233
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001234 cancel_delayed_work(&hdev->power_off);
1235
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 hci_req_cancel(hdev, ENODEV);
1237 hci_req_lock(hdev);
1238
1239 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001240 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 hci_req_unlock(hdev);
1242 return 0;
1243 }
1244
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001245 /* Flush RX and TX works */
1246 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001247 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001249 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001250 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001251 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001252 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001253 }
1254
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001255 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001256 cancel_delayed_work(&hdev->service_cache);
1257
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001258 cancel_delayed_work_sync(&hdev->le_scan_disable);
1259
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001260 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001261 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001263 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264
1265 hci_notify(hdev, HCI_DEV_DOWN);
1266
1267 if (hdev->flush)
1268 hdev->flush(hdev);
1269
1270 /* Reset device */
1271 skb_queue_purge(&hdev->cmd_q);
1272 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001273 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001274 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001276 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001277 clear_bit(HCI_INIT, &hdev->flags);
1278 }
1279
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001280 /* flush cmd work */
1281 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
1283 /* Drop queues */
1284 skb_queue_purge(&hdev->rx_q);
1285 skb_queue_purge(&hdev->cmd_q);
1286 skb_queue_purge(&hdev->raw_q);
1287
1288 /* Drop last sent command */
1289 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001290 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 kfree_skb(hdev->sent_cmd);
1292 hdev->sent_cmd = NULL;
1293 }
1294
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001295 kfree_skb(hdev->recv_evt);
1296 hdev->recv_evt = NULL;
1297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 /* After this point our queues are empty
1299 * and no tasks are scheduled. */
1300 hdev->close(hdev);
1301
Johan Hedberg35b973c2013-03-15 17:06:59 -05001302 /* Clear flags */
1303 hdev->flags = 0;
1304 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1305
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001306 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1307 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001308 hci_dev_lock(hdev);
1309 mgmt_powered(hdev, 0);
1310 hci_dev_unlock(hdev);
1311 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001312
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001313 /* Controller radio is available but is currently powered down */
1314 hdev->amp_status = 0;
1315
Johan Hedberge59fda82012-02-22 18:11:53 +02001316 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001317 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001318
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 hci_req_unlock(hdev);
1320
1321 hci_dev_put(hdev);
1322 return 0;
1323}
1324
1325int hci_dev_close(__u16 dev)
1326{
1327 struct hci_dev *hdev;
1328 int err;
1329
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001330 hdev = hci_dev_get(dev);
1331 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001333
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001334 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1335 err = -EBUSY;
1336 goto done;
1337 }
1338
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001339 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1340 cancel_delayed_work(&hdev->power_off);
1341
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001343
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001344done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345 hci_dev_put(hdev);
1346 return err;
1347}
1348
1349int hci_dev_reset(__u16 dev)
1350{
1351 struct hci_dev *hdev;
1352 int ret = 0;
1353
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001354 hdev = hci_dev_get(dev);
1355 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 return -ENODEV;
1357
1358 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001359
Marcel Holtmann808a0492013-08-26 20:57:58 -07001360 if (!test_bit(HCI_UP, &hdev->flags)) {
1361 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001365 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1366 ret = -EBUSY;
1367 goto done;
1368 }
1369
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 /* Drop queues */
1371 skb_queue_purge(&hdev->rx_q);
1372 skb_queue_purge(&hdev->cmd_q);
1373
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001374 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001375 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001377 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
1379 if (hdev->flush)
1380 hdev->flush(hdev);
1381
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001382 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001383 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384
1385 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001386 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387
1388done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 hci_req_unlock(hdev);
1390 hci_dev_put(hdev);
1391 return ret;
1392}
1393
1394int hci_dev_reset_stat(__u16 dev)
1395{
1396 struct hci_dev *hdev;
1397 int ret = 0;
1398
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001399 hdev = hci_dev_get(dev);
1400 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return -ENODEV;
1402
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001403 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1404 ret = -EBUSY;
1405 goto done;
1406 }
1407
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1409
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001410done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 return ret;
1413}
1414
1415int hci_dev_cmd(unsigned int cmd, void __user *arg)
1416{
1417 struct hci_dev *hdev;
1418 struct hci_dev_req dr;
1419 int err = 0;
1420
1421 if (copy_from_user(&dr, arg, sizeof(dr)))
1422 return -EFAULT;
1423
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001424 hdev = hci_dev_get(dr.dev_id);
1425 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 return -ENODEV;
1427
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001428 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1429 err = -EBUSY;
1430 goto done;
1431 }
1432
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 switch (cmd) {
1434 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001435 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1436 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 break;
1438
1439 case HCISETENCRYPT:
1440 if (!lmp_encrypt_capable(hdev)) {
1441 err = -EOPNOTSUPP;
1442 break;
1443 }
1444
1445 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1446 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001447 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1448 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 if (err)
1450 break;
1451 }
1452
Johan Hedberg01178cd2013-03-05 20:37:41 +02001453 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1454 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 break;
1456
1457 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001458 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1459 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 break;
1461
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001462 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001463 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1464 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001465 break;
1466
1467 case HCISETLINKMODE:
1468 hdev->link_mode = ((__u16) dr.dev_opt) &
1469 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1470 break;
1471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 case HCISETPTYPE:
1473 hdev->pkt_type = (__u16) dr.dev_opt;
1474 break;
1475
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001477 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1478 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479 break;
1480
1481 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001482 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1483 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 break;
1485
1486 default:
1487 err = -EINVAL;
1488 break;
1489 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001490
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001491done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492 hci_dev_put(hdev);
1493 return err;
1494}
1495
1496int hci_get_dev_list(void __user *arg)
1497{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001498 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 struct hci_dev_list_req *dl;
1500 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 int n = 0, size, err;
1502 __u16 dev_num;
1503
1504 if (get_user(dev_num, (__u16 __user *) arg))
1505 return -EFAULT;
1506
1507 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1508 return -EINVAL;
1509
1510 size = sizeof(*dl) + dev_num * sizeof(*dr);
1511
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001512 dl = kzalloc(size, GFP_KERNEL);
1513 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return -ENOMEM;
1515
1516 dr = dl->dev_req;
1517
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001518 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001519 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001520 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001521 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001522
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001523 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1524 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001525
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 (dr + n)->dev_id = hdev->id;
1527 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 if (++n >= dev_num)
1530 break;
1531 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001532 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533
1534 dl->dev_num = n;
1535 size = sizeof(*dl) + n * sizeof(*dr);
1536
1537 err = copy_to_user(arg, dl, size);
1538 kfree(dl);
1539
1540 return err ? -EFAULT : 0;
1541}
1542
1543int hci_get_dev_info(void __user *arg)
1544{
1545 struct hci_dev *hdev;
1546 struct hci_dev_info di;
1547 int err = 0;
1548
1549 if (copy_from_user(&di, arg, sizeof(di)))
1550 return -EFAULT;
1551
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001552 hdev = hci_dev_get(di.dev_id);
1553 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001554 return -ENODEV;
1555
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001556 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001557 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001558
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001559 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1560 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 strcpy(di.name, hdev->name);
1563 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001564 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001565 di.flags = hdev->flags;
1566 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001567 if (lmp_bredr_capable(hdev)) {
1568 di.acl_mtu = hdev->acl_mtu;
1569 di.acl_pkts = hdev->acl_pkts;
1570 di.sco_mtu = hdev->sco_mtu;
1571 di.sco_pkts = hdev->sco_pkts;
1572 } else {
1573 di.acl_mtu = hdev->le_mtu;
1574 di.acl_pkts = hdev->le_pkts;
1575 di.sco_mtu = 0;
1576 di.sco_pkts = 0;
1577 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 di.link_policy = hdev->link_policy;
1579 di.link_mode = hdev->link_mode;
1580
1581 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1582 memcpy(&di.features, &hdev->features, sizeof(di.features));
1583
1584 if (copy_to_user(arg, &di, sizeof(di)))
1585 err = -EFAULT;
1586
1587 hci_dev_put(hdev);
1588
1589 return err;
1590}
1591
1592/* ---- Interface to HCI drivers ---- */
1593
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001594static int hci_rfkill_set_block(void *data, bool blocked)
1595{
1596 struct hci_dev *hdev = data;
1597
1598 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1599
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001600 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1601 return -EBUSY;
1602
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001603 if (!blocked)
1604 return 0;
1605
1606 hci_dev_do_close(hdev);
1607
1608 return 0;
1609}
1610
1611static const struct rfkill_ops hci_rfkill_ops = {
1612 .set_block = hci_rfkill_set_block,
1613};
1614
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001615static void hci_power_on(struct work_struct *work)
1616{
1617 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001618 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001619
1620 BT_DBG("%s", hdev->name);
1621
Johan Hedberg96570ff2013-05-29 09:51:29 +03001622 err = hci_dev_open(hdev->id);
1623 if (err < 0) {
1624 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001625 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001626 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001627
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001628 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001629 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1630 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001631
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001632 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001633 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001634}
1635
1636static void hci_power_off(struct work_struct *work)
1637{
Johan Hedberg32435532011-11-07 22:16:04 +02001638 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001639 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001640
1641 BT_DBG("%s", hdev->name);
1642
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001643 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001644}
1645
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001646static void hci_discov_off(struct work_struct *work)
1647{
1648 struct hci_dev *hdev;
1649 u8 scan = SCAN_PAGE;
1650
1651 hdev = container_of(work, struct hci_dev, discov_off.work);
1652
1653 BT_DBG("%s", hdev->name);
1654
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001655 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001656
1657 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1658
1659 hdev->discov_timeout = 0;
1660
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001661 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001662}
1663
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001664int hci_uuids_clear(struct hci_dev *hdev)
1665{
Johan Hedberg48210022013-01-27 00:31:28 +02001666 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001667
Johan Hedberg48210022013-01-27 00:31:28 +02001668 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1669 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001670 kfree(uuid);
1671 }
1672
1673 return 0;
1674}
1675
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001676int hci_link_keys_clear(struct hci_dev *hdev)
1677{
1678 struct list_head *p, *n;
1679
1680 list_for_each_safe(p, n, &hdev->link_keys) {
1681 struct link_key *key;
1682
1683 key = list_entry(p, struct link_key, list);
1684
1685 list_del(p);
1686 kfree(key);
1687 }
1688
1689 return 0;
1690}
1691
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001692int hci_smp_ltks_clear(struct hci_dev *hdev)
1693{
1694 struct smp_ltk *k, *tmp;
1695
1696 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1697 list_del(&k->list);
1698 kfree(k);
1699 }
1700
1701 return 0;
1702}
1703
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001704struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1705{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001706 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001707
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001708 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001709 if (bacmp(bdaddr, &k->bdaddr) == 0)
1710 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001711
1712 return NULL;
1713}
1714
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301715static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001716 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001717{
1718 /* Legacy key */
1719 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301720 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001721
1722 /* Debug keys are insecure so don't store them persistently */
1723 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301724 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001725
1726 /* Changed combination key and there's no previous one */
1727 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301728 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001729
1730 /* Security mode 3 case */
1731 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301732 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001733
1734 /* Neither local nor remote side had no-bonding as requirement */
1735 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301736 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001737
1738 /* Local side had dedicated bonding as requirement */
1739 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301740 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001741
1742 /* Remote side had dedicated bonding as requirement */
1743 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301744 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001745
1746 /* If none of the above criteria match, then don't store the key
1747 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301748 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001749}
1750
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001751struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001752{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001753 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001754
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001755 list_for_each_entry(k, &hdev->long_term_keys, list) {
1756 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001757 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001758 continue;
1759
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001760 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001761 }
1762
1763 return NULL;
1764}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001765
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001766struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001767 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001768{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001769 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001770
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001771 list_for_each_entry(k, &hdev->long_term_keys, list)
1772 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001773 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774 return k;
1775
1776 return NULL;
1777}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001778
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001779int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001780 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001781{
1782 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301783 u8 old_key_type;
1784 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001785
1786 old_key = hci_find_link_key(hdev, bdaddr);
1787 if (old_key) {
1788 old_key_type = old_key->type;
1789 key = old_key;
1790 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001791 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001792 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1793 if (!key)
1794 return -ENOMEM;
1795 list_add(&key->list, &hdev->link_keys);
1796 }
1797
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001798 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001799
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001800 /* Some buggy controller combinations generate a changed
1801 * combination key for legacy pairing even when there's no
1802 * previous key */
1803 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001804 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001805 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001806 if (conn)
1807 conn->key_type = type;
1808 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001809
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001810 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001811 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812 key->pin_len = pin_len;
1813
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001814 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001815 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001816 else
1817 key->type = type;
1818
Johan Hedberg4df378a2011-04-28 11:29:03 -07001819 if (!new_key)
1820 return 0;
1821
1822 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1823
Johan Hedberg744cf192011-11-08 20:40:14 +02001824 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001825
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301826 if (conn)
1827 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001828
1829 return 0;
1830}
1831
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001832int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001833 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001834 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001835{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001836 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001837
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001838 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1839 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001840
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001841 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1842 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001843 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001844 else {
1845 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001846 if (!key)
1847 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001848 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001849 }
1850
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001851 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001852 key->bdaddr_type = addr_type;
1853 memcpy(key->val, tk, sizeof(key->val));
1854 key->authenticated = authenticated;
1855 key->ediv = ediv;
1856 key->enc_size = enc_size;
1857 key->type = type;
1858 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001859
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001860 if (!new_key)
1861 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001862
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001863 if (type & HCI_SMP_LTK)
1864 mgmt_new_ltk(hdev, key, 1);
1865
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001866 return 0;
1867}
1868
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001869int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1870{
1871 struct link_key *key;
1872
1873 key = hci_find_link_key(hdev, bdaddr);
1874 if (!key)
1875 return -ENOENT;
1876
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001877 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001878
1879 list_del(&key->list);
1880 kfree(key);
1881
1882 return 0;
1883}
1884
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001885int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1886{
1887 struct smp_ltk *k, *tmp;
1888
1889 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1890 if (bacmp(bdaddr, &k->bdaddr))
1891 continue;
1892
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001893 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001894
1895 list_del(&k->list);
1896 kfree(k);
1897 }
1898
1899 return 0;
1900}
1901
Ville Tervo6bd32322011-02-16 16:32:41 +02001902/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001903static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001904{
1905 struct hci_dev *hdev = (void *) arg;
1906
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001907 if (hdev->sent_cmd) {
1908 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1909 u16 opcode = __le16_to_cpu(sent->opcode);
1910
1911 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1912 } else {
1913 BT_ERR("%s command tx timeout", hdev->name);
1914 }
1915
Ville Tervo6bd32322011-02-16 16:32:41 +02001916 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001917 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001918}
1919
Szymon Janc2763eda2011-03-22 13:12:22 +01001920struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001921 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001922{
1923 struct oob_data *data;
1924
1925 list_for_each_entry(data, &hdev->remote_oob_data, list)
1926 if (bacmp(bdaddr, &data->bdaddr) == 0)
1927 return data;
1928
1929 return NULL;
1930}
1931
1932int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1933{
1934 struct oob_data *data;
1935
1936 data = hci_find_remote_oob_data(hdev, bdaddr);
1937 if (!data)
1938 return -ENOENT;
1939
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001940 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001941
1942 list_del(&data->list);
1943 kfree(data);
1944
1945 return 0;
1946}
1947
1948int hci_remote_oob_data_clear(struct hci_dev *hdev)
1949{
1950 struct oob_data *data, *n;
1951
1952 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1953 list_del(&data->list);
1954 kfree(data);
1955 }
1956
1957 return 0;
1958}
1959
1960int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001961 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001962{
1963 struct oob_data *data;
1964
1965 data = hci_find_remote_oob_data(hdev, bdaddr);
1966
1967 if (!data) {
1968 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1969 if (!data)
1970 return -ENOMEM;
1971
1972 bacpy(&data->bdaddr, bdaddr);
1973 list_add(&data->list, &hdev->remote_oob_data);
1974 }
1975
1976 memcpy(data->hash, hash, sizeof(data->hash));
1977 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1978
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001979 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001980
1981 return 0;
1982}
1983
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001984struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001985{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001986 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001987
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001988 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001989 if (bacmp(bdaddr, &b->bdaddr) == 0)
1990 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001991
1992 return NULL;
1993}
1994
1995int hci_blacklist_clear(struct hci_dev *hdev)
1996{
1997 struct list_head *p, *n;
1998
1999 list_for_each_safe(p, n, &hdev->blacklist) {
2000 struct bdaddr_list *b;
2001
2002 b = list_entry(p, struct bdaddr_list, list);
2003
2004 list_del(p);
2005 kfree(b);
2006 }
2007
2008 return 0;
2009}
2010
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002011int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002012{
2013 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002014
2015 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2016 return -EBADF;
2017
Antti Julku5e762442011-08-25 16:48:02 +03002018 if (hci_blacklist_lookup(hdev, bdaddr))
2019 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002020
2021 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002022 if (!entry)
2023 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002024
2025 bacpy(&entry->bdaddr, bdaddr);
2026
2027 list_add(&entry->list, &hdev->blacklist);
2028
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002029 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002030}
2031
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002032int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002033{
2034 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002035
Szymon Janc1ec918c2011-11-16 09:32:21 +01002036 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002037 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002038
2039 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002040 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002041 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002042
2043 list_del(&entry->list);
2044 kfree(entry);
2045
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002046 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002047}
2048
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002049static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002050{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002051 if (status) {
2052 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002053
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002054 hci_dev_lock(hdev);
2055 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2056 hci_dev_unlock(hdev);
2057 return;
2058 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002059}
2060
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002061static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002062{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002063 /* General inquiry access code (GIAC) */
2064 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2065 struct hci_request req;
2066 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002067 int err;
2068
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002069 if (status) {
2070 BT_ERR("Failed to disable LE scanning: status %d", status);
2071 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002072 }
2073
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002074 switch (hdev->discovery.type) {
2075 case DISCOV_TYPE_LE:
2076 hci_dev_lock(hdev);
2077 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2078 hci_dev_unlock(hdev);
2079 break;
2080
2081 case DISCOV_TYPE_INTERLEAVED:
2082 hci_req_init(&req, hdev);
2083
2084 memset(&cp, 0, sizeof(cp));
2085 memcpy(&cp.lap, lap, sizeof(cp.lap));
2086 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2087 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2088
2089 hci_dev_lock(hdev);
2090
2091 hci_inquiry_cache_flush(hdev);
2092
2093 err = hci_req_run(&req, inquiry_complete);
2094 if (err) {
2095 BT_ERR("Inquiry request failed: err %d", err);
2096 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2097 }
2098
2099 hci_dev_unlock(hdev);
2100 break;
2101 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002102}
2103
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002104static void le_scan_disable_work(struct work_struct *work)
2105{
2106 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002107 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002108 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002109 struct hci_request req;
2110 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002111
2112 BT_DBG("%s", hdev->name);
2113
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002114 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002115
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002116 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002117 cp.enable = LE_SCAN_DISABLE;
2118 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002119
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002120 err = hci_req_run(&req, le_scan_disable_work_complete);
2121 if (err)
2122 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002123}
2124
David Herrmann9be0dab2012-04-22 14:39:57 +02002125/* Alloc HCI device */
2126struct hci_dev *hci_alloc_dev(void)
2127{
2128 struct hci_dev *hdev;
2129
2130 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2131 if (!hdev)
2132 return NULL;
2133
David Herrmannb1b813d2012-04-22 14:39:58 +02002134 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2135 hdev->esco_type = (ESCO_HV1);
2136 hdev->link_mode = (HCI_LM_ACCEPT);
2137 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002138 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2139 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002140
David Herrmannb1b813d2012-04-22 14:39:58 +02002141 hdev->sniff_max_interval = 800;
2142 hdev->sniff_min_interval = 80;
2143
2144 mutex_init(&hdev->lock);
2145 mutex_init(&hdev->req_lock);
2146
2147 INIT_LIST_HEAD(&hdev->mgmt_pending);
2148 INIT_LIST_HEAD(&hdev->blacklist);
2149 INIT_LIST_HEAD(&hdev->uuids);
2150 INIT_LIST_HEAD(&hdev->link_keys);
2151 INIT_LIST_HEAD(&hdev->long_term_keys);
2152 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002153 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002154
2155 INIT_WORK(&hdev->rx_work, hci_rx_work);
2156 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2157 INIT_WORK(&hdev->tx_work, hci_tx_work);
2158 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002159
David Herrmannb1b813d2012-04-22 14:39:58 +02002160 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2161 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2162 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2163
David Herrmannb1b813d2012-04-22 14:39:58 +02002164 skb_queue_head_init(&hdev->rx_q);
2165 skb_queue_head_init(&hdev->cmd_q);
2166 skb_queue_head_init(&hdev->raw_q);
2167
2168 init_waitqueue_head(&hdev->req_wait_q);
2169
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002170 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002171
David Herrmannb1b813d2012-04-22 14:39:58 +02002172 hci_init_sysfs(hdev);
2173 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002174
2175 return hdev;
2176}
2177EXPORT_SYMBOL(hci_alloc_dev);
2178
2179/* Free HCI device */
2180void hci_free_dev(struct hci_dev *hdev)
2181{
David Herrmann9be0dab2012-04-22 14:39:57 +02002182 /* will free via device release */
2183 put_device(&hdev->dev);
2184}
2185EXPORT_SYMBOL(hci_free_dev);
2186
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187/* Register HCI device */
2188int hci_register_dev(struct hci_dev *hdev)
2189{
David Herrmannb1b813d2012-04-22 14:39:58 +02002190 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
David Herrmann010666a2012-01-07 15:47:07 +01002192 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 return -EINVAL;
2194
Mat Martineau08add512011-11-02 16:18:36 -07002195 /* Do not allow HCI_AMP devices to register at index 0,
2196 * so the index can be used as the AMP controller ID.
2197 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002198 switch (hdev->dev_type) {
2199 case HCI_BREDR:
2200 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2201 break;
2202 case HCI_AMP:
2203 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2204 break;
2205 default:
2206 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002208
Sasha Levin3df92b32012-05-27 22:36:56 +02002209 if (id < 0)
2210 return id;
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 sprintf(hdev->name, "hci%d", id);
2213 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002214
2215 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2216
Kees Cookd8537542013-07-03 15:04:57 -07002217 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2218 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002219 if (!hdev->workqueue) {
2220 error = -ENOMEM;
2221 goto err;
2222 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002223
Kees Cookd8537542013-07-03 15:04:57 -07002224 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2225 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002226 if (!hdev->req_workqueue) {
2227 destroy_workqueue(hdev->workqueue);
2228 error = -ENOMEM;
2229 goto err;
2230 }
2231
David Herrmann33ca9542011-10-08 14:58:49 +02002232 error = hci_add_sysfs(hdev);
2233 if (error < 0)
2234 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002236 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002237 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2238 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002239 if (hdev->rfkill) {
2240 if (rfkill_register(hdev->rfkill) < 0) {
2241 rfkill_destroy(hdev->rfkill);
2242 hdev->rfkill = NULL;
2243 }
2244 }
2245
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002246 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002247
2248 if (hdev->dev_type != HCI_AMP)
2249 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2250
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002251 write_lock(&hci_dev_list_lock);
2252 list_add(&hdev->list, &hci_dev_list);
2253 write_unlock(&hci_dev_list_lock);
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002256 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
Johan Hedberg19202572013-01-14 22:33:51 +02002258 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002261
David Herrmann33ca9542011-10-08 14:58:49 +02002262err_wqueue:
2263 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002264 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002265err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002266 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002267
David Herrmann33ca9542011-10-08 14:58:49 +02002268 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269}
2270EXPORT_SYMBOL(hci_register_dev);
2271
2272/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002273void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274{
Sasha Levin3df92b32012-05-27 22:36:56 +02002275 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002276
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002277 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278
Johan Hovold94324962012-03-15 14:48:41 +01002279 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2280
Sasha Levin3df92b32012-05-27 22:36:56 +02002281 id = hdev->id;
2282
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002283 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002285 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
2287 hci_dev_do_close(hdev);
2288
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302289 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002290 kfree_skb(hdev->reassembly[i]);
2291
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002292 cancel_work_sync(&hdev->power_on);
2293
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002294 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002295 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002296 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002297 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002298 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002299 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002300
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002301 /* mgmt_index_removed should take care of emptying the
2302 * pending list */
2303 BUG_ON(!list_empty(&hdev->mgmt_pending));
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 hci_notify(hdev, HCI_DEV_UNREG);
2306
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002307 if (hdev->rfkill) {
2308 rfkill_unregister(hdev->rfkill);
2309 rfkill_destroy(hdev->rfkill);
2310 }
2311
David Herrmannce242972011-10-08 14:58:48 +02002312 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002313
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002314 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002315 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002316
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002317 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002318 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002319 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002320 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002321 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002322 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002323 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002324
David Herrmanndc946bd2012-01-07 15:47:24 +01002325 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002326
2327 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328}
2329EXPORT_SYMBOL(hci_unregister_dev);
2330
2331/* Suspend HCI device */
2332int hci_suspend_dev(struct hci_dev *hdev)
2333{
2334 hci_notify(hdev, HCI_DEV_SUSPEND);
2335 return 0;
2336}
2337EXPORT_SYMBOL(hci_suspend_dev);
2338
2339/* Resume HCI device */
2340int hci_resume_dev(struct hci_dev *hdev)
2341{
2342 hci_notify(hdev, HCI_DEV_RESUME);
2343 return 0;
2344}
2345EXPORT_SYMBOL(hci_resume_dev);
2346
Marcel Holtmann76bca882009-11-18 00:40:39 +01002347/* Receive frame from HCI drivers */
2348int hci_recv_frame(struct sk_buff *skb)
2349{
2350 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2351 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002352 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002353 kfree_skb(skb);
2354 return -ENXIO;
2355 }
2356
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002357 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002358 bt_cb(skb)->incoming = 1;
2359
2360 /* Time stamp */
2361 __net_timestamp(skb);
2362
Marcel Holtmann76bca882009-11-18 00:40:39 +01002363 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002364 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002365
Marcel Holtmann76bca882009-11-18 00:40:39 +01002366 return 0;
2367}
2368EXPORT_SYMBOL(hci_recv_frame);
2369
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302370static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002371 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302372{
2373 int len = 0;
2374 int hlen = 0;
2375 int remain = count;
2376 struct sk_buff *skb;
2377 struct bt_skb_cb *scb;
2378
2379 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002380 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302381 return -EILSEQ;
2382
2383 skb = hdev->reassembly[index];
2384
2385 if (!skb) {
2386 switch (type) {
2387 case HCI_ACLDATA_PKT:
2388 len = HCI_MAX_FRAME_SIZE;
2389 hlen = HCI_ACL_HDR_SIZE;
2390 break;
2391 case HCI_EVENT_PKT:
2392 len = HCI_MAX_EVENT_SIZE;
2393 hlen = HCI_EVENT_HDR_SIZE;
2394 break;
2395 case HCI_SCODATA_PKT:
2396 len = HCI_MAX_SCO_SIZE;
2397 hlen = HCI_SCO_HDR_SIZE;
2398 break;
2399 }
2400
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002401 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302402 if (!skb)
2403 return -ENOMEM;
2404
2405 scb = (void *) skb->cb;
2406 scb->expect = hlen;
2407 scb->pkt_type = type;
2408
2409 skb->dev = (void *) hdev;
2410 hdev->reassembly[index] = skb;
2411 }
2412
2413 while (count) {
2414 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002415 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302416
2417 memcpy(skb_put(skb, len), data, len);
2418
2419 count -= len;
2420 data += len;
2421 scb->expect -= len;
2422 remain = count;
2423
2424 switch (type) {
2425 case HCI_EVENT_PKT:
2426 if (skb->len == HCI_EVENT_HDR_SIZE) {
2427 struct hci_event_hdr *h = hci_event_hdr(skb);
2428 scb->expect = h->plen;
2429
2430 if (skb_tailroom(skb) < scb->expect) {
2431 kfree_skb(skb);
2432 hdev->reassembly[index] = NULL;
2433 return -ENOMEM;
2434 }
2435 }
2436 break;
2437
2438 case HCI_ACLDATA_PKT:
2439 if (skb->len == HCI_ACL_HDR_SIZE) {
2440 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2441 scb->expect = __le16_to_cpu(h->dlen);
2442
2443 if (skb_tailroom(skb) < scb->expect) {
2444 kfree_skb(skb);
2445 hdev->reassembly[index] = NULL;
2446 return -ENOMEM;
2447 }
2448 }
2449 break;
2450
2451 case HCI_SCODATA_PKT:
2452 if (skb->len == HCI_SCO_HDR_SIZE) {
2453 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2454 scb->expect = h->dlen;
2455
2456 if (skb_tailroom(skb) < scb->expect) {
2457 kfree_skb(skb);
2458 hdev->reassembly[index] = NULL;
2459 return -ENOMEM;
2460 }
2461 }
2462 break;
2463 }
2464
2465 if (scb->expect == 0) {
2466 /* Complete frame */
2467
2468 bt_cb(skb)->pkt_type = type;
2469 hci_recv_frame(skb);
2470
2471 hdev->reassembly[index] = NULL;
2472 return remain;
2473 }
2474 }
2475
2476 return remain;
2477}
2478
Marcel Holtmannef222012007-07-11 06:42:04 +02002479int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2480{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302481 int rem = 0;
2482
Marcel Holtmannef222012007-07-11 06:42:04 +02002483 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2484 return -EILSEQ;
2485
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002486 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002487 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302488 if (rem < 0)
2489 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002490
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302491 data += (count - rem);
2492 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002493 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002494
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302495 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002496}
2497EXPORT_SYMBOL(hci_recv_fragment);
2498
Suraj Sumangala99811512010-07-14 13:02:19 +05302499#define STREAM_REASSEMBLY 0
2500
2501int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2502{
2503 int type;
2504 int rem = 0;
2505
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002506 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302507 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2508
2509 if (!skb) {
2510 struct { char type; } *pkt;
2511
2512 /* Start of the frame */
2513 pkt = data;
2514 type = pkt->type;
2515
2516 data++;
2517 count--;
2518 } else
2519 type = bt_cb(skb)->pkt_type;
2520
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002521 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002522 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302523 if (rem < 0)
2524 return rem;
2525
2526 data += (count - rem);
2527 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002528 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302529
2530 return rem;
2531}
2532EXPORT_SYMBOL(hci_recv_stream_fragment);
2533
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534/* ---- Interface to upper protocols ---- */
2535
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536int hci_register_cb(struct hci_cb *cb)
2537{
2538 BT_DBG("%p name %s", cb, cb->name);
2539
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002540 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002542 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
2544 return 0;
2545}
2546EXPORT_SYMBOL(hci_register_cb);
2547
2548int hci_unregister_cb(struct hci_cb *cb)
2549{
2550 BT_DBG("%p name %s", cb, cb->name);
2551
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002552 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002554 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555
2556 return 0;
2557}
2558EXPORT_SYMBOL(hci_unregister_cb);
2559
2560static int hci_send_frame(struct sk_buff *skb)
2561{
2562 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2563
2564 if (!hdev) {
2565 kfree_skb(skb);
2566 return -ENODEV;
2567 }
2568
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002569 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002571 /* Time stamp */
2572 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002574 /* Send copy to monitor */
2575 hci_send_to_monitor(hdev, skb);
2576
2577 if (atomic_read(&hdev->promisc)) {
2578 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002579 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580 }
2581
2582 /* Get rid of skb owner, prior to sending to the driver. */
2583 skb_orphan(skb);
2584
2585 return hdev->send(skb);
2586}
2587
Johan Hedberg3119ae92013-03-05 20:37:44 +02002588void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2589{
2590 skb_queue_head_init(&req->cmd_q);
2591 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002592 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002593}
2594
2595int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2596{
2597 struct hci_dev *hdev = req->hdev;
2598 struct sk_buff *skb;
2599 unsigned long flags;
2600
2601 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2602
Andre Guedes5d73e032013-03-08 11:20:16 -03002603 /* If an error occured during request building, remove all HCI
2604 * commands queued on the HCI request queue.
2605 */
2606 if (req->err) {
2607 skb_queue_purge(&req->cmd_q);
2608 return req->err;
2609 }
2610
Johan Hedberg3119ae92013-03-05 20:37:44 +02002611 /* Do not allow empty requests */
2612 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002613 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002614
2615 skb = skb_peek_tail(&req->cmd_q);
2616 bt_cb(skb)->req.complete = complete;
2617
2618 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2619 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2620 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2621
2622 queue_work(hdev->workqueue, &hdev->cmd_work);
2623
2624 return 0;
2625}
2626
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002627static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002628 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629{
2630 int len = HCI_COMMAND_HDR_SIZE + plen;
2631 struct hci_command_hdr *hdr;
2632 struct sk_buff *skb;
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002635 if (!skb)
2636 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002637
2638 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002639 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640 hdr->plen = plen;
2641
2642 if (plen)
2643 memcpy(skb_put(skb, plen), param, plen);
2644
2645 BT_DBG("skb len %d", skb->len);
2646
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002647 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002649
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002650 return skb;
2651}
2652
2653/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002654int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2655 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002656{
2657 struct sk_buff *skb;
2658
2659 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2660
2661 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2662 if (!skb) {
2663 BT_ERR("%s no memory for command", hdev->name);
2664 return -ENOMEM;
2665 }
2666
Johan Hedberg11714b32013-03-05 20:37:47 +02002667 /* Stand-alone HCI commands must be flaged as
2668 * single-command requests.
2669 */
2670 bt_cb(skb)->req.start = true;
2671
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002673 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674
2675 return 0;
2676}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
Johan Hedberg71c76a12013-03-05 20:37:46 +02002678/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002679void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2680 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002681{
2682 struct hci_dev *hdev = req->hdev;
2683 struct sk_buff *skb;
2684
2685 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2686
Andre Guedes34739c12013-03-08 11:20:18 -03002687 /* If an error occured during request building, there is no point in
2688 * queueing the HCI command. We can simply return.
2689 */
2690 if (req->err)
2691 return;
2692
Johan Hedberg71c76a12013-03-05 20:37:46 +02002693 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2694 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002695 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2696 hdev->name, opcode);
2697 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002698 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002699 }
2700
2701 if (skb_queue_empty(&req->cmd_q))
2702 bt_cb(skb)->req.start = true;
2703
Johan Hedberg02350a72013-04-03 21:50:29 +03002704 bt_cb(skb)->req.event = event;
2705
Johan Hedberg71c76a12013-03-05 20:37:46 +02002706 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002707}
2708
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002709void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2710 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002711{
2712 hci_req_add_ev(req, opcode, plen, param, 0);
2713}
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002716void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717{
2718 struct hci_command_hdr *hdr;
2719
2720 if (!hdev->sent_cmd)
2721 return NULL;
2722
2723 hdr = (void *) hdev->sent_cmd->data;
2724
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002725 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 return NULL;
2727
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002728 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729
2730 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2731}
2732
2733/* Send ACL data */
2734static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2735{
2736 struct hci_acl_hdr *hdr;
2737 int len = skb->len;
2738
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002739 skb_push(skb, HCI_ACL_HDR_SIZE);
2740 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002741 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002742 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2743 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744}
2745
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002746static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002747 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002749 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 struct hci_dev *hdev = conn->hdev;
2751 struct sk_buff *list;
2752
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002753 skb->len = skb_headlen(skb);
2754 skb->data_len = 0;
2755
2756 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002757
2758 switch (hdev->dev_type) {
2759 case HCI_BREDR:
2760 hci_add_acl_hdr(skb, conn->handle, flags);
2761 break;
2762 case HCI_AMP:
2763 hci_add_acl_hdr(skb, chan->handle, flags);
2764 break;
2765 default:
2766 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2767 return;
2768 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002769
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002770 list = skb_shinfo(skb)->frag_list;
2771 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 /* Non fragmented */
2773 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2774
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002775 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 } else {
2777 /* Fragmented */
2778 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2779
2780 skb_shinfo(skb)->frag_list = NULL;
2781
2782 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002783 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002785 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002786
2787 flags &= ~ACL_START;
2788 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 do {
2790 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002791
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002793 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002794 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
2796 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2797
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002798 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 } while (list);
2800
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002801 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002803}
2804
2805void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2806{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002807 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002808
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002809 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002810
2811 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002812
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002813 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002815 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817
2818/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002819void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820{
2821 struct hci_dev *hdev = conn->hdev;
2822 struct hci_sco_hdr hdr;
2823
2824 BT_DBG("%s len %d", hdev->name, skb->len);
2825
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002826 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 hdr.dlen = skb->len;
2828
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002829 skb_push(skb, HCI_SCO_HDR_SIZE);
2830 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002831 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
2833 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002834 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002835
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002837 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839
2840/* ---- HCI TX task (outgoing data) ---- */
2841
2842/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002843static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2844 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845{
2846 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002847 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002848 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002850 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002852
2853 rcu_read_lock();
2854
2855 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002856 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002858
2859 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2860 continue;
2861
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 num++;
2863
2864 if (c->sent < min) {
2865 min = c->sent;
2866 conn = c;
2867 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002868
2869 if (hci_conn_num(hdev, type) == num)
2870 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 }
2872
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002873 rcu_read_unlock();
2874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002876 int cnt, q;
2877
2878 switch (conn->type) {
2879 case ACL_LINK:
2880 cnt = hdev->acl_cnt;
2881 break;
2882 case SCO_LINK:
2883 case ESCO_LINK:
2884 cnt = hdev->sco_cnt;
2885 break;
2886 case LE_LINK:
2887 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2888 break;
2889 default:
2890 cnt = 0;
2891 BT_ERR("Unknown link type");
2892 }
2893
2894 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 *quote = q ? q : 1;
2896 } else
2897 *quote = 0;
2898
2899 BT_DBG("conn %p quote %d", conn, *quote);
2900 return conn;
2901}
2902
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002903static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904{
2905 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002906 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907
Ville Tervobae1f5d92011-02-10 22:38:53 -03002908 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002910 rcu_read_lock();
2911
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002913 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002914 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002915 BT_ERR("%s killing stalled connection %pMR",
2916 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002917 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918 }
2919 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002920
2921 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922}
2923
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002924static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2925 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002926{
2927 struct hci_conn_hash *h = &hdev->conn_hash;
2928 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002929 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002930 struct hci_conn *conn;
2931 int cnt, q, conn_num = 0;
2932
2933 BT_DBG("%s", hdev->name);
2934
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002935 rcu_read_lock();
2936
2937 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002938 struct hci_chan *tmp;
2939
2940 if (conn->type != type)
2941 continue;
2942
2943 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2944 continue;
2945
2946 conn_num++;
2947
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002948 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002949 struct sk_buff *skb;
2950
2951 if (skb_queue_empty(&tmp->data_q))
2952 continue;
2953
2954 skb = skb_peek(&tmp->data_q);
2955 if (skb->priority < cur_prio)
2956 continue;
2957
2958 if (skb->priority > cur_prio) {
2959 num = 0;
2960 min = ~0;
2961 cur_prio = skb->priority;
2962 }
2963
2964 num++;
2965
2966 if (conn->sent < min) {
2967 min = conn->sent;
2968 chan = tmp;
2969 }
2970 }
2971
2972 if (hci_conn_num(hdev, type) == conn_num)
2973 break;
2974 }
2975
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002976 rcu_read_unlock();
2977
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002978 if (!chan)
2979 return NULL;
2980
2981 switch (chan->conn->type) {
2982 case ACL_LINK:
2983 cnt = hdev->acl_cnt;
2984 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002985 case AMP_LINK:
2986 cnt = hdev->block_cnt;
2987 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002988 case SCO_LINK:
2989 case ESCO_LINK:
2990 cnt = hdev->sco_cnt;
2991 break;
2992 case LE_LINK:
2993 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2994 break;
2995 default:
2996 cnt = 0;
2997 BT_ERR("Unknown link type");
2998 }
2999
3000 q = cnt / num;
3001 *quote = q ? q : 1;
3002 BT_DBG("chan %p quote %d", chan, *quote);
3003 return chan;
3004}
3005
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003006static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3007{
3008 struct hci_conn_hash *h = &hdev->conn_hash;
3009 struct hci_conn *conn;
3010 int num = 0;
3011
3012 BT_DBG("%s", hdev->name);
3013
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003014 rcu_read_lock();
3015
3016 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003017 struct hci_chan *chan;
3018
3019 if (conn->type != type)
3020 continue;
3021
3022 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3023 continue;
3024
3025 num++;
3026
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003027 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003028 struct sk_buff *skb;
3029
3030 if (chan->sent) {
3031 chan->sent = 0;
3032 continue;
3033 }
3034
3035 if (skb_queue_empty(&chan->data_q))
3036 continue;
3037
3038 skb = skb_peek(&chan->data_q);
3039 if (skb->priority >= HCI_PRIO_MAX - 1)
3040 continue;
3041
3042 skb->priority = HCI_PRIO_MAX - 1;
3043
3044 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003045 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003046 }
3047
3048 if (hci_conn_num(hdev, type) == num)
3049 break;
3050 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003051
3052 rcu_read_unlock();
3053
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003054}
3055
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003056static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3057{
3058 /* Calculate count of blocks used by this packet */
3059 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3060}
3061
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003062static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 if (!test_bit(HCI_RAW, &hdev->flags)) {
3065 /* ACL tx timeout must be longer than maximum
3066 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003067 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003068 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003069 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003071}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003072
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003073static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003074{
3075 unsigned int cnt = hdev->acl_cnt;
3076 struct hci_chan *chan;
3077 struct sk_buff *skb;
3078 int quote;
3079
3080 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003081
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003083 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003086 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003087 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003088
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003089 /* Stop if priority has changed */
3090 if (skb->priority < priority)
3091 break;
3092
3093 skb = skb_dequeue(&chan->data_q);
3094
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003095 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003096 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003097
Linus Torvalds1da177e2005-04-16 15:20:36 -07003098 hci_send_frame(skb);
3099 hdev->acl_last_tx = jiffies;
3100
3101 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003102 chan->sent++;
3103 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003104 }
3105 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003106
3107 if (cnt != hdev->acl_cnt)
3108 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003109}
3110
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003111static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003112{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003113 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003114 struct hci_chan *chan;
3115 struct sk_buff *skb;
3116 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003117 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003118
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003119 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003120
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003121 BT_DBG("%s", hdev->name);
3122
3123 if (hdev->dev_type == HCI_AMP)
3124 type = AMP_LINK;
3125 else
3126 type = ACL_LINK;
3127
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003128 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003129 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003130 u32 priority = (skb_peek(&chan->data_q))->priority;
3131 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3132 int blocks;
3133
3134 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003135 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003136
3137 /* Stop if priority has changed */
3138 if (skb->priority < priority)
3139 break;
3140
3141 skb = skb_dequeue(&chan->data_q);
3142
3143 blocks = __get_blocks(hdev, skb);
3144 if (blocks > hdev->block_cnt)
3145 return;
3146
3147 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003148 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003149
3150 hci_send_frame(skb);
3151 hdev->acl_last_tx = jiffies;
3152
3153 hdev->block_cnt -= blocks;
3154 quote -= blocks;
3155
3156 chan->sent += blocks;
3157 chan->conn->sent += blocks;
3158 }
3159 }
3160
3161 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003162 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003163}
3164
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003165static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003166{
3167 BT_DBG("%s", hdev->name);
3168
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003169 /* No ACL link over BR/EDR controller */
3170 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3171 return;
3172
3173 /* No AMP link over AMP controller */
3174 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003175 return;
3176
3177 switch (hdev->flow_ctl_mode) {
3178 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3179 hci_sched_acl_pkt(hdev);
3180 break;
3181
3182 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3183 hci_sched_acl_blk(hdev);
3184 break;
3185 }
3186}
3187
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003189static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
3191 struct hci_conn *conn;
3192 struct sk_buff *skb;
3193 int quote;
3194
3195 BT_DBG("%s", hdev->name);
3196
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003197 if (!hci_conn_num(hdev, SCO_LINK))
3198 return;
3199
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3201 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3202 BT_DBG("skb %p len %d", skb, skb->len);
3203 hci_send_frame(skb);
3204
3205 conn->sent++;
3206 if (conn->sent == ~0)
3207 conn->sent = 0;
3208 }
3209 }
3210}
3211
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003212static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003213{
3214 struct hci_conn *conn;
3215 struct sk_buff *skb;
3216 int quote;
3217
3218 BT_DBG("%s", hdev->name);
3219
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003220 if (!hci_conn_num(hdev, ESCO_LINK))
3221 return;
3222
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003223 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3224 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003225 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3226 BT_DBG("skb %p len %d", skb, skb->len);
3227 hci_send_frame(skb);
3228
3229 conn->sent++;
3230 if (conn->sent == ~0)
3231 conn->sent = 0;
3232 }
3233 }
3234}
3235
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003236static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003237{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003238 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003239 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003240 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003241
3242 BT_DBG("%s", hdev->name);
3243
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003244 if (!hci_conn_num(hdev, LE_LINK))
3245 return;
3246
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003247 if (!test_bit(HCI_RAW, &hdev->flags)) {
3248 /* LE tx timeout must be longer than maximum
3249 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003250 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003251 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003252 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003253 }
3254
3255 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003256 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003257 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003258 u32 priority = (skb_peek(&chan->data_q))->priority;
3259 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003260 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003261 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003262
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003263 /* Stop if priority has changed */
3264 if (skb->priority < priority)
3265 break;
3266
3267 skb = skb_dequeue(&chan->data_q);
3268
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003269 hci_send_frame(skb);
3270 hdev->le_last_tx = jiffies;
3271
3272 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003273 chan->sent++;
3274 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003275 }
3276 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003277
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003278 if (hdev->le_pkts)
3279 hdev->le_cnt = cnt;
3280 else
3281 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003282
3283 if (cnt != tmp)
3284 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003285}
3286
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003287static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003289 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 struct sk_buff *skb;
3291
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003292 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003293 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294
3295 /* Schedule queues and send stuff to HCI driver */
3296
3297 hci_sched_acl(hdev);
3298
3299 hci_sched_sco(hdev);
3300
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003301 hci_sched_esco(hdev);
3302
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003303 hci_sched_le(hdev);
3304
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 /* Send next queued raw (unknown type) packet */
3306 while ((skb = skb_dequeue(&hdev->raw_q)))
3307 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308}
3309
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003310/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
3312/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003313static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314{
3315 struct hci_acl_hdr *hdr = (void *) skb->data;
3316 struct hci_conn *conn;
3317 __u16 handle, flags;
3318
3319 skb_pull(skb, HCI_ACL_HDR_SIZE);
3320
3321 handle = __le16_to_cpu(hdr->handle);
3322 flags = hci_flags(handle);
3323 handle = hci_handle(handle);
3324
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003325 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003326 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327
3328 hdev->stat.acl_rx++;
3329
3330 hci_dev_lock(hdev);
3331 conn = hci_conn_hash_lookup_handle(hdev, handle);
3332 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003333
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003335 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003336
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003338 l2cap_recv_acldata(conn, skb, flags);
3339 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003341 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003342 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 }
3344
3345 kfree_skb(skb);
3346}
3347
3348/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003349static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350{
3351 struct hci_sco_hdr *hdr = (void *) skb->data;
3352 struct hci_conn *conn;
3353 __u16 handle;
3354
3355 skb_pull(skb, HCI_SCO_HDR_SIZE);
3356
3357 handle = __le16_to_cpu(hdr->handle);
3358
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003359 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360
3361 hdev->stat.sco_rx++;
3362
3363 hci_dev_lock(hdev);
3364 conn = hci_conn_hash_lookup_handle(hdev, handle);
3365 hci_dev_unlock(hdev);
3366
3367 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003368 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003369 sco_recv_scodata(conn, skb);
3370 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003372 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003373 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003374 }
3375
3376 kfree_skb(skb);
3377}
3378
Johan Hedberg9238f362013-03-05 20:37:48 +02003379static bool hci_req_is_complete(struct hci_dev *hdev)
3380{
3381 struct sk_buff *skb;
3382
3383 skb = skb_peek(&hdev->cmd_q);
3384 if (!skb)
3385 return true;
3386
3387 return bt_cb(skb)->req.start;
3388}
3389
Johan Hedberg42c6b122013-03-05 20:37:49 +02003390static void hci_resend_last(struct hci_dev *hdev)
3391{
3392 struct hci_command_hdr *sent;
3393 struct sk_buff *skb;
3394 u16 opcode;
3395
3396 if (!hdev->sent_cmd)
3397 return;
3398
3399 sent = (void *) hdev->sent_cmd->data;
3400 opcode = __le16_to_cpu(sent->opcode);
3401 if (opcode == HCI_OP_RESET)
3402 return;
3403
3404 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3405 if (!skb)
3406 return;
3407
3408 skb_queue_head(&hdev->cmd_q, skb);
3409 queue_work(hdev->workqueue, &hdev->cmd_work);
3410}
3411
Johan Hedberg9238f362013-03-05 20:37:48 +02003412void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3413{
3414 hci_req_complete_t req_complete = NULL;
3415 struct sk_buff *skb;
3416 unsigned long flags;
3417
3418 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3419
Johan Hedberg42c6b122013-03-05 20:37:49 +02003420 /* If the completed command doesn't match the last one that was
3421 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003422 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003423 if (!hci_sent_cmd_data(hdev, opcode)) {
3424 /* Some CSR based controllers generate a spontaneous
3425 * reset complete event during init and any pending
3426 * command will never be completed. In such a case we
3427 * need to resend whatever was the last sent
3428 * command.
3429 */
3430 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3431 hci_resend_last(hdev);
3432
Johan Hedberg9238f362013-03-05 20:37:48 +02003433 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003434 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003435
3436 /* If the command succeeded and there's still more commands in
3437 * this request the request is not yet complete.
3438 */
3439 if (!status && !hci_req_is_complete(hdev))
3440 return;
3441
3442 /* If this was the last command in a request the complete
3443 * callback would be found in hdev->sent_cmd instead of the
3444 * command queue (hdev->cmd_q).
3445 */
3446 if (hdev->sent_cmd) {
3447 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003448
3449 if (req_complete) {
3450 /* We must set the complete callback to NULL to
3451 * avoid calling the callback more than once if
3452 * this function gets called again.
3453 */
3454 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3455
Johan Hedberg9238f362013-03-05 20:37:48 +02003456 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003457 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003458 }
3459
3460 /* Remove all pending commands belonging to this request */
3461 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3462 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3463 if (bt_cb(skb)->req.start) {
3464 __skb_queue_head(&hdev->cmd_q, skb);
3465 break;
3466 }
3467
3468 req_complete = bt_cb(skb)->req.complete;
3469 kfree_skb(skb);
3470 }
3471 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3472
3473call_complete:
3474 if (req_complete)
3475 req_complete(hdev, status);
3476}
3477
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003478static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003480 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 struct sk_buff *skb;
3482
3483 BT_DBG("%s", hdev->name);
3484
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003486 /* Send copy to monitor */
3487 hci_send_to_monitor(hdev, skb);
3488
Linus Torvalds1da177e2005-04-16 15:20:36 -07003489 if (atomic_read(&hdev->promisc)) {
3490 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003491 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 }
3493
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003494 if (test_bit(HCI_RAW, &hdev->flags) ||
3495 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 kfree_skb(skb);
3497 continue;
3498 }
3499
3500 if (test_bit(HCI_INIT, &hdev->flags)) {
3501 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003502 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 case HCI_ACLDATA_PKT:
3504 case HCI_SCODATA_PKT:
3505 kfree_skb(skb);
3506 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003507 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003508 }
3509
3510 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003511 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003513 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 hci_event_packet(hdev, skb);
3515 break;
3516
3517 case HCI_ACLDATA_PKT:
3518 BT_DBG("%s ACL data packet", hdev->name);
3519 hci_acldata_packet(hdev, skb);
3520 break;
3521
3522 case HCI_SCODATA_PKT:
3523 BT_DBG("%s SCO data packet", hdev->name);
3524 hci_scodata_packet(hdev, skb);
3525 break;
3526
3527 default:
3528 kfree_skb(skb);
3529 break;
3530 }
3531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532}
3533
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003534static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003535{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003536 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 struct sk_buff *skb;
3538
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003539 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3540 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003543 if (atomic_read(&hdev->cmd_cnt)) {
3544 skb = skb_dequeue(&hdev->cmd_q);
3545 if (!skb)
3546 return;
3547
Wei Yongjun7585b972009-02-25 18:29:52 +08003548 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003550 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003551 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552 atomic_dec(&hdev->cmd_cnt);
3553 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003554 if (test_bit(HCI_RESET, &hdev->flags))
3555 del_timer(&hdev->cmd_timer);
3556 else
3557 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003558 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003559 } else {
3560 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003561 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003562 }
3563 }
3564}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003565
Andre Guedes31f79562012-04-24 21:02:53 -03003566u8 bdaddr_to_le(u8 bdaddr_type)
3567{
3568 switch (bdaddr_type) {
3569 case BDADDR_LE_PUBLIC:
3570 return ADDR_LE_DEV_PUBLIC;
3571
3572 default:
3573 /* Fallback to LE Random address type */
3574 return ADDR_LE_DEV_RANDOM;
3575 }
3576}