blob: 100539fcbfe5a9b99168133c1b1587ca25c7586b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 }
502}
503
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_dev *hdev = req->hdev;
507
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510
511 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524 } else {
525 struct hci_cp_write_eir cp;
526
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
529
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 }
532 }
533
534 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
542
543 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200552 }
553}
554
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200557 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200558 struct hci_cp_write_def_link_policy cp;
559 u16 link_policy = 0;
560
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
569
570 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572}
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 struct hci_cp_write_le_host_supported cp;
578
Johan Hedbergc73eee92013-04-19 18:35:21 +0300579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
581 return;
582
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583 memset(&cp, 0, sizeof(cp));
584
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 cp.le = 0x01;
587 cp.simul = lmp_le_br_capable(hdev);
588 }
589
590 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593}
594
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300598 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599
Johan Hedberg59f45d52013-06-13 11:01:13 +0300600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
603
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
608 }
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500613 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500615 hci_update_ad(req);
616 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300617
618 /* Read features beyond page 1 if available */
619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 struct hci_cp_read_local_ext_features cp;
621
622 cp.page = p;
623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624 sizeof(cp), &cp);
625 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626}
627
628static int __hci_init(struct hci_dev *hdev)
629{
630 int err;
631
632 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633 if (err < 0)
634 return err;
635
636 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 * BR/EDR/LE type controllers. AMP controllers only need the
638 * first stage init.
639 */
640 if (hdev->dev_type != HCI_BREDR)
641 return 0;
642
643 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644 if (err < 0)
645 return err;
646
647 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648}
649
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 __u8 scan = opt;
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
662 __u8 auth = opt;
663
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668}
669
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
672 __u8 encrypt = opt;
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200676 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200681{
682 __le16 policy = cpu_to_le16(opt);
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200685
686 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200688}
689
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900690/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 * Device is held on return. */
692struct hci_dev *hci_dev_get(int index)
693{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200694 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 BT_DBG("%d", index);
697
698 if (index < 0)
699 return NULL;
700
701 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200702 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (d->id == index) {
704 hdev = hci_dev_hold(d);
705 break;
706 }
707 }
708 read_unlock(&hci_dev_list_lock);
709 return hdev;
710}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200713
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200714bool hci_discovery_active(struct hci_dev *hdev)
715{
716 struct discovery_state *discov = &hdev->discovery;
717
Andre Guedes6fbe1952012-02-03 17:47:58 -0300718 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300719 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300720 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200721 return true;
722
Andre Guedes6fbe1952012-02-03 17:47:58 -0300723 default:
724 return false;
725 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200726}
727
Johan Hedbergff9ef572012-01-04 14:23:45 +0200728void hci_discovery_set_state(struct hci_dev *hdev, int state)
729{
730 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731
732 if (hdev->discovery.state == state)
733 return;
734
735 switch (state) {
736 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300737 if (hdev->discovery.state != DISCOVERY_STARTING)
738 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200739 break;
740 case DISCOVERY_STARTING:
741 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200743 mgmt_discovering(hdev, 1);
744 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200745 case DISCOVERY_RESOLVING:
746 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200747 case DISCOVERY_STOPPING:
748 break;
749 }
750
751 hdev->discovery.state = state;
752}
753
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300754void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
Johan Hedberg30883512012-01-04 14:16:21 +0200756 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200757 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Johan Hedberg561aafb2012-01-04 13:31:59 +0200759 list_for_each_entry_safe(p, n, &cache->all, all) {
760 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200761 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200763
764 INIT_LIST_HEAD(&cache->unknown);
765 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300768struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
Johan Hedberg30883512012-01-04 14:16:21 +0200771 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 struct inquiry_entry *e;
773
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300774 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Johan Hedberg561aafb2012-01-04 13:31:59 +0200776 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200778 return e;
779 }
780
781 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782}
783
Johan Hedberg561aafb2012-01-04 13:31:59 +0200784struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300785 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786{
Johan Hedberg30883512012-01-04 14:16:21 +0200787 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200788 struct inquiry_entry *e;
789
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300790 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200791
792 list_for_each_entry(e, &cache->unknown, list) {
793 if (!bacmp(&e->data.bdaddr, bdaddr))
794 return e;
795 }
796
797 return NULL;
798}
799
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200800struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300801 bdaddr_t *bdaddr,
802 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803{
804 struct discovery_state *cache = &hdev->discovery;
805 struct inquiry_entry *e;
806
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300807 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808
809 list_for_each_entry(e, &cache->resolve, list) {
810 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811 return e;
812 if (!bacmp(&e->data.bdaddr, bdaddr))
813 return e;
814 }
815
816 return NULL;
817}
818
Johan Hedberga3d4e202012-01-09 00:53:02 +0200819void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300820 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200821{
822 struct discovery_state *cache = &hdev->discovery;
823 struct list_head *pos = &cache->resolve;
824 struct inquiry_entry *p;
825
826 list_del(&ie->list);
827
828 list_for_each_entry(p, &cache->resolve, list) {
829 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300830 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200831 break;
832 pos = &p->list;
833 }
834
835 list_add(&ie->list, pos);
836}
837
Johan Hedberg31754052012-01-04 13:39:52 +0200838bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300839 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Johan Hedberg30883512012-01-04 14:16:21 +0200841 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300844 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Szymon Janc2b2fec42012-11-20 11:38:54 +0100846 hci_remove_remote_oob_data(hdev, &data->bdaddr);
847
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200848 if (ssp)
849 *ssp = data->ssp_mode;
850
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200851 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200852 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200853 if (ie->data.ssp_mode && ssp)
854 *ssp = true;
855
Johan Hedberga3d4e202012-01-09 00:53:02 +0200856 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300857 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200858 ie->data.rssi = data->rssi;
859 hci_inquiry_cache_update_resolve(hdev, ie);
860 }
861
Johan Hedberg561aafb2012-01-04 13:31:59 +0200862 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200863 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200864
Johan Hedberg561aafb2012-01-04 13:31:59 +0200865 /* Entry not in the cache. Add new one. */
866 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200868 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200869
870 list_add(&ie->all, &cache->all);
871
872 if (name_known) {
873 ie->name_state = NAME_KNOWN;
874 } else {
875 ie->name_state = NAME_NOT_KNOWN;
876 list_add(&ie->list, &cache->unknown);
877 }
878
879update:
880 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300881 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200882 ie->name_state = NAME_KNOWN;
883 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200886 memcpy(&ie->data, data, sizeof(*data));
887 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200889
890 if (ie->name_state == NAME_NOT_KNOWN)
891 return false;
892
893 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
895
896static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897{
Johan Hedberg30883512012-01-04 14:16:21 +0200898 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 struct inquiry_info *info = (struct inquiry_info *) buf;
900 struct inquiry_entry *e;
901 int copied = 0;
902
Johan Hedberg561aafb2012-01-04 13:31:59 +0200903 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200905
906 if (copied >= num)
907 break;
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 bacpy(&info->bdaddr, &data->bdaddr);
910 info->pscan_rep_mode = data->pscan_rep_mode;
911 info->pscan_period_mode = data->pscan_period_mode;
912 info->pscan_mode = data->pscan_mode;
913 memcpy(info->dev_class, data->dev_class, 3);
914 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200917 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 }
919
920 BT_DBG("cache %p, copied %d", cache, copied);
921 return copied;
922}
923
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
926 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 struct hci_cp_inquiry cp;
929
930 BT_DBG("%s", hdev->name);
931
932 if (test_bit(HCI_INQUIRY, &hdev->flags))
933 return;
934
935 /* Start Inquiry */
936 memcpy(&cp.lap, &ir->lap, 3);
937 cp.length = ir->length;
938 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200939 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
941
Andre Guedes3e13fa12013-03-27 20:04:56 -0300942static int wait_inquiry(void *word)
943{
944 schedule();
945 return signal_pending(current);
946}
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948int hci_inquiry(void __user *arg)
949{
950 __u8 __user *ptr = arg;
951 struct hci_inquiry_req ir;
952 struct hci_dev *hdev;
953 int err = 0, do_inquiry = 0, max_rsp;
954 long timeo;
955 __u8 *buf;
956
957 if (copy_from_user(&ir, ptr, sizeof(ir)))
958 return -EFAULT;
959
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200960 hdev = hci_dev_get(ir.dev_id);
961 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return -ENODEV;
963
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300964 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900965 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300966 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300967 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 do_inquiry = 1;
969 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300970 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Marcel Holtmann04837f62006-07-03 10:02:33 +0200972 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200973
974 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200975 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200977 if (err < 0)
978 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300979
980 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 * cleared). If it is interrupted by a signal, return -EINTR.
982 */
983 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984 TASK_INTERRUPTIBLE))
985 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300988 /* for unlimited number of responses we will use buffer with
989 * 255 entries
990 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992
993 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 * copy it to the user space.
995 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100996 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200997 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 err = -ENOMEM;
999 goto done;
1000 }
1001
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001002 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001004 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 BT_DBG("num_rsp %d", ir.num_rsp);
1007
1008 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009 ptr += sizeof(ir);
1010 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001011 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001013 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 err = -EFAULT;
1015
1016 kfree(buf);
1017
1018done:
1019 hci_dev_put(hdev);
1020 return err;
1021}
1022
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001023static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024{
1025 u8 ad_len = 0, flags = 0;
1026 size_t name_len;
1027
1028 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 flags |= LE_AD_GENERAL;
1030
1031 if (!lmp_bredr_capable(hdev))
1032 flags |= LE_AD_NO_BREDR;
1033
1034 if (lmp_le_br_capable(hdev))
1035 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036
1037 if (lmp_host_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_HOST;
1039
1040 if (flags) {
1041 BT_DBG("adv flags 0x%02x", flags);
1042
1043 ptr[0] = 2;
1044 ptr[1] = EIR_FLAGS;
1045 ptr[2] = flags;
1046
1047 ad_len += 3;
1048 ptr += 3;
1049 }
1050
1051 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052 ptr[0] = 2;
1053 ptr[1] = EIR_TX_POWER;
1054 ptr[2] = (u8) hdev->adv_tx_power;
1055
1056 ad_len += 3;
1057 ptr += 3;
1058 }
1059
1060 name_len = strlen(hdev->dev_name);
1061 if (name_len > 0) {
1062 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063
1064 if (name_len > max_len) {
1065 name_len = max_len;
1066 ptr[1] = EIR_NAME_SHORT;
1067 } else
1068 ptr[1] = EIR_NAME_COMPLETE;
1069
1070 ptr[0] = name_len + 1;
1071
1072 memcpy(ptr + 2, hdev->dev_name, name_len);
1073
1074 ad_len += (name_len + 2);
1075 ptr += (name_len + 2);
1076 }
1077
1078 return ad_len;
1079}
1080
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001081void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001082{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001083 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001084 struct hci_cp_le_set_adv_data cp;
1085 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001086
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001087 if (!lmp_le_capable(hdev))
1088 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001089
1090 memset(&cp, 0, sizeof(cp));
1091
1092 len = create_ad(hdev, cp.data);
1093
1094 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001095 memcmp(cp.data, hdev->adv_data, len) == 0)
1096 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001097
1098 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 hdev->adv_data_len = len;
1100
1101 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001102
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001103 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001104}
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* ---- HCI ioctl helpers ---- */
1107
1108int hci_dev_open(__u16 dev)
1109{
1110 struct hci_dev *hdev;
1111 int ret = 0;
1112
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001113 hdev = hci_dev_get(dev);
1114 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return -ENODEV;
1116
1117 BT_DBG("%s %p", hdev->name, hdev);
1118
1119 hci_req_lock(hdev);
1120
Johan Hovold94324962012-03-15 14:48:41 +01001121 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122 ret = -ENODEV;
1123 goto done;
1124 }
1125
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001126 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1127 ret = -ERFKILL;
1128 goto done;
1129 }
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (test_bit(HCI_UP, &hdev->flags)) {
1132 ret = -EALREADY;
1133 goto done;
1134 }
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 if (hdev->open(hdev)) {
1137 ret = -EIO;
1138 goto done;
1139 }
1140
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001141 atomic_set(&hdev->cmd_cnt, 1);
1142 set_bit(HCI_INIT, &hdev->flags);
1143
1144 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145 ret = hdev->setup(hdev);
1146
1147 if (!ret) {
1148 /* Treat all non BR/EDR controllers as raw devices if
1149 * enable_hs is not set.
1150 */
1151 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152 set_bit(HCI_RAW, &hdev->flags);
1153
1154 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155 set_bit(HCI_RAW, &hdev->flags);
1156
1157 if (!test_bit(HCI_RAW, &hdev->flags))
1158 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
1160
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001161 clear_bit(HCI_INIT, &hdev->flags);
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 if (!ret) {
1164 hci_dev_hold(hdev);
1165 set_bit(HCI_UP, &hdev->flags);
1166 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001169 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001170 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001171 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001172 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001173 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001175 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001176 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001177 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
1179 skb_queue_purge(&hdev->cmd_q);
1180 skb_queue_purge(&hdev->rx_q);
1181
1182 if (hdev->flush)
1183 hdev->flush(hdev);
1184
1185 if (hdev->sent_cmd) {
1186 kfree_skb(hdev->sent_cmd);
1187 hdev->sent_cmd = NULL;
1188 }
1189
1190 hdev->close(hdev);
1191 hdev->flags = 0;
1192 }
1193
1194done:
1195 hci_req_unlock(hdev);
1196 hci_dev_put(hdev);
1197 return ret;
1198}
1199
1200static int hci_dev_do_close(struct hci_dev *hdev)
1201{
1202 BT_DBG("%s %p", hdev->name, hdev);
1203
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001204 cancel_delayed_work(&hdev->power_off);
1205
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 hci_req_cancel(hdev, ENODEV);
1207 hci_req_lock(hdev);
1208
1209 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001210 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001211 hci_req_unlock(hdev);
1212 return 0;
1213 }
1214
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001215 /* Flush RX and TX works */
1216 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001217 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001219 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001220 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001221 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001222 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001223 }
1224
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001225 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001226 cancel_delayed_work(&hdev->service_cache);
1227
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001228 cancel_delayed_work_sync(&hdev->le_scan_disable);
1229
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001230 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001231 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001233 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234
1235 hci_notify(hdev, HCI_DEV_DOWN);
1236
1237 if (hdev->flush)
1238 hdev->flush(hdev);
1239
1240 /* Reset device */
1241 skb_queue_purge(&hdev->cmd_q);
1242 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001243 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001244 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001246 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 clear_bit(HCI_INIT, &hdev->flags);
1248 }
1249
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001250 /* flush cmd work */
1251 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
1253 /* Drop queues */
1254 skb_queue_purge(&hdev->rx_q);
1255 skb_queue_purge(&hdev->cmd_q);
1256 skb_queue_purge(&hdev->raw_q);
1257
1258 /* Drop last sent command */
1259 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001260 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 kfree_skb(hdev->sent_cmd);
1262 hdev->sent_cmd = NULL;
1263 }
1264
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001265 kfree_skb(hdev->recv_evt);
1266 hdev->recv_evt = NULL;
1267
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 /* After this point our queues are empty
1269 * and no tasks are scheduled. */
1270 hdev->close(hdev);
1271
Johan Hedberg35b973c2013-03-15 17:06:59 -05001272 /* Clear flags */
1273 hdev->flags = 0;
1274 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1275
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001276 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1277 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001278 hci_dev_lock(hdev);
1279 mgmt_powered(hdev, 0);
1280 hci_dev_unlock(hdev);
1281 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001282
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001283 /* Controller radio is available but is currently powered down */
1284 hdev->amp_status = 0;
1285
Johan Hedberge59fda82012-02-22 18:11:53 +02001286 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001287 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001288
Linus Torvalds1da177e2005-04-16 15:20:36 -07001289 hci_req_unlock(hdev);
1290
1291 hci_dev_put(hdev);
1292 return 0;
1293}
1294
1295int hci_dev_close(__u16 dev)
1296{
1297 struct hci_dev *hdev;
1298 int err;
1299
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001300 hdev = hci_dev_get(dev);
1301 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001302 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001303
1304 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1305 cancel_delayed_work(&hdev->power_off);
1306
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 hci_dev_put(hdev);
1310 return err;
1311}
1312
1313int hci_dev_reset(__u16 dev)
1314{
1315 struct hci_dev *hdev;
1316 int ret = 0;
1317
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001318 hdev = hci_dev_get(dev);
1319 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 return -ENODEV;
1321
1322 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323
1324 if (!test_bit(HCI_UP, &hdev->flags))
1325 goto done;
1326
1327 /* Drop queues */
1328 skb_queue_purge(&hdev->rx_q);
1329 skb_queue_purge(&hdev->cmd_q);
1330
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001331 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001332 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001334 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335
1336 if (hdev->flush)
1337 hdev->flush(hdev);
1338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001339 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001340 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001343 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
1345done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 hci_req_unlock(hdev);
1347 hci_dev_put(hdev);
1348 return ret;
1349}
1350
1351int hci_dev_reset_stat(__u16 dev)
1352{
1353 struct hci_dev *hdev;
1354 int ret = 0;
1355
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001356 hdev = hci_dev_get(dev);
1357 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 return -ENODEV;
1359
1360 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1361
1362 hci_dev_put(hdev);
1363
1364 return ret;
1365}
1366
1367int hci_dev_cmd(unsigned int cmd, void __user *arg)
1368{
1369 struct hci_dev *hdev;
1370 struct hci_dev_req dr;
1371 int err = 0;
1372
1373 if (copy_from_user(&dr, arg, sizeof(dr)))
1374 return -EFAULT;
1375
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001376 hdev = hci_dev_get(dr.dev_id);
1377 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 return -ENODEV;
1379
1380 switch (cmd) {
1381 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001382 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1383 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 break;
1385
1386 case HCISETENCRYPT:
1387 if (!lmp_encrypt_capable(hdev)) {
1388 err = -EOPNOTSUPP;
1389 break;
1390 }
1391
1392 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1393 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001394 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1395 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 if (err)
1397 break;
1398 }
1399
Johan Hedberg01178cd2013-03-05 20:37:41 +02001400 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1401 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 break;
1403
1404 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001405 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001409 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001410 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1411 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001412 break;
1413
1414 case HCISETLINKMODE:
1415 hdev->link_mode = ((__u16) dr.dev_opt) &
1416 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1417 break;
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 case HCISETPTYPE:
1420 hdev->pkt_type = (__u16) dr.dev_opt;
1421 break;
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001424 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1425 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001426 break;
1427
1428 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001429 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1430 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 break;
1432
1433 default:
1434 err = -EINVAL;
1435 break;
1436 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001437
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 hci_dev_put(hdev);
1439 return err;
1440}
1441
1442int hci_get_dev_list(void __user *arg)
1443{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001444 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 struct hci_dev_list_req *dl;
1446 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 int n = 0, size, err;
1448 __u16 dev_num;
1449
1450 if (get_user(dev_num, (__u16 __user *) arg))
1451 return -EFAULT;
1452
1453 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1454 return -EINVAL;
1455
1456 size = sizeof(*dl) + dev_num * sizeof(*dr);
1457
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001458 dl = kzalloc(size, GFP_KERNEL);
1459 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001460 return -ENOMEM;
1461
1462 dr = dl->dev_req;
1463
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001464 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001465 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001466 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001467 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001468
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001469 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1470 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001471
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 (dr + n)->dev_id = hdev->id;
1473 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001474
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475 if (++n >= dev_num)
1476 break;
1477 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001478 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001479
1480 dl->dev_num = n;
1481 size = sizeof(*dl) + n * sizeof(*dr);
1482
1483 err = copy_to_user(arg, dl, size);
1484 kfree(dl);
1485
1486 return err ? -EFAULT : 0;
1487}
1488
1489int hci_get_dev_info(void __user *arg)
1490{
1491 struct hci_dev *hdev;
1492 struct hci_dev_info di;
1493 int err = 0;
1494
1495 if (copy_from_user(&di, arg, sizeof(di)))
1496 return -EFAULT;
1497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001498 hdev = hci_dev_get(di.dev_id);
1499 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return -ENODEV;
1501
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001502 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001503 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001504
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001505 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1506 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001507
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 strcpy(di.name, hdev->name);
1509 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001510 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 di.flags = hdev->flags;
1512 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001513 if (lmp_bredr_capable(hdev)) {
1514 di.acl_mtu = hdev->acl_mtu;
1515 di.acl_pkts = hdev->acl_pkts;
1516 di.sco_mtu = hdev->sco_mtu;
1517 di.sco_pkts = hdev->sco_pkts;
1518 } else {
1519 di.acl_mtu = hdev->le_mtu;
1520 di.acl_pkts = hdev->le_pkts;
1521 di.sco_mtu = 0;
1522 di.sco_pkts = 0;
1523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524 di.link_policy = hdev->link_policy;
1525 di.link_mode = hdev->link_mode;
1526
1527 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1528 memcpy(&di.features, &hdev->features, sizeof(di.features));
1529
1530 if (copy_to_user(arg, &di, sizeof(di)))
1531 err = -EFAULT;
1532
1533 hci_dev_put(hdev);
1534
1535 return err;
1536}
1537
1538/* ---- Interface to HCI drivers ---- */
1539
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001540static int hci_rfkill_set_block(void *data, bool blocked)
1541{
1542 struct hci_dev *hdev = data;
1543
1544 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1545
1546 if (!blocked)
1547 return 0;
1548
1549 hci_dev_do_close(hdev);
1550
1551 return 0;
1552}
1553
1554static const struct rfkill_ops hci_rfkill_ops = {
1555 .set_block = hci_rfkill_set_block,
1556};
1557
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001558static void hci_power_on(struct work_struct *work)
1559{
1560 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001561 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001562
1563 BT_DBG("%s", hdev->name);
1564
Johan Hedberg96570ff2013-05-29 09:51:29 +03001565 err = hci_dev_open(hdev->id);
1566 if (err < 0) {
1567 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001568 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001569 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001570
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001571 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001572 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1573 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001574
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001575 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001576 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001577}
1578
1579static void hci_power_off(struct work_struct *work)
1580{
Johan Hedberg32435532011-11-07 22:16:04 +02001581 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001582 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001583
1584 BT_DBG("%s", hdev->name);
1585
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001586 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001587}
1588
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001589static void hci_discov_off(struct work_struct *work)
1590{
1591 struct hci_dev *hdev;
1592 u8 scan = SCAN_PAGE;
1593
1594 hdev = container_of(work, struct hci_dev, discov_off.work);
1595
1596 BT_DBG("%s", hdev->name);
1597
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001598 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001599
1600 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1601
1602 hdev->discov_timeout = 0;
1603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001604 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001605}
1606
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001607int hci_uuids_clear(struct hci_dev *hdev)
1608{
Johan Hedberg48210022013-01-27 00:31:28 +02001609 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001610
Johan Hedberg48210022013-01-27 00:31:28 +02001611 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1612 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001613 kfree(uuid);
1614 }
1615
1616 return 0;
1617}
1618
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001619int hci_link_keys_clear(struct hci_dev *hdev)
1620{
1621 struct list_head *p, *n;
1622
1623 list_for_each_safe(p, n, &hdev->link_keys) {
1624 struct link_key *key;
1625
1626 key = list_entry(p, struct link_key, list);
1627
1628 list_del(p);
1629 kfree(key);
1630 }
1631
1632 return 0;
1633}
1634
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001635int hci_smp_ltks_clear(struct hci_dev *hdev)
1636{
1637 struct smp_ltk *k, *tmp;
1638
1639 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1640 list_del(&k->list);
1641 kfree(k);
1642 }
1643
1644 return 0;
1645}
1646
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001647struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1648{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001649 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001650
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001651 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001652 if (bacmp(bdaddr, &k->bdaddr) == 0)
1653 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001654
1655 return NULL;
1656}
1657
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301658static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001659 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001660{
1661 /* Legacy key */
1662 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301663 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001664
1665 /* Debug keys are insecure so don't store them persistently */
1666 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301667 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001668
1669 /* Changed combination key and there's no previous one */
1670 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301671 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001672
1673 /* Security mode 3 case */
1674 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301675 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001676
1677 /* Neither local nor remote side had no-bonding as requirement */
1678 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301679 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001680
1681 /* Local side had dedicated bonding as requirement */
1682 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301683 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001684
1685 /* Remote side had dedicated bonding as requirement */
1686 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301687 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001688
1689 /* If none of the above criteria match, then don't store the key
1690 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301691 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001692}
1693
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001694struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001695{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001696 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001697
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001698 list_for_each_entry(k, &hdev->long_term_keys, list) {
1699 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001700 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001701 continue;
1702
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001703 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001704 }
1705
1706 return NULL;
1707}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001708
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001709struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001710 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001711{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001712 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001713
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001714 list_for_each_entry(k, &hdev->long_term_keys, list)
1715 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001716 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001717 return k;
1718
1719 return NULL;
1720}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001721
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001722int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001723 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001724{
1725 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301726 u8 old_key_type;
1727 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001728
1729 old_key = hci_find_link_key(hdev, bdaddr);
1730 if (old_key) {
1731 old_key_type = old_key->type;
1732 key = old_key;
1733 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001734 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001735 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1736 if (!key)
1737 return -ENOMEM;
1738 list_add(&key->list, &hdev->link_keys);
1739 }
1740
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001741 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001742
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001743 /* Some buggy controller combinations generate a changed
1744 * combination key for legacy pairing even when there's no
1745 * previous key */
1746 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001747 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001748 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001749 if (conn)
1750 conn->key_type = type;
1751 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001752
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001753 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001754 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001755 key->pin_len = pin_len;
1756
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001757 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001759 else
1760 key->type = type;
1761
Johan Hedberg4df378a2011-04-28 11:29:03 -07001762 if (!new_key)
1763 return 0;
1764
1765 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1766
Johan Hedberg744cf192011-11-08 20:40:14 +02001767 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001768
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301769 if (conn)
1770 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001771
1772 return 0;
1773}
1774
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001775int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001776 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001777 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001778{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001779 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001780
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001781 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1782 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001783
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001784 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1785 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001786 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001787 else {
1788 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001789 if (!key)
1790 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001791 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001792 }
1793
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001795 key->bdaddr_type = addr_type;
1796 memcpy(key->val, tk, sizeof(key->val));
1797 key->authenticated = authenticated;
1798 key->ediv = ediv;
1799 key->enc_size = enc_size;
1800 key->type = type;
1801 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001802
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001803 if (!new_key)
1804 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001805
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001806 if (type & HCI_SMP_LTK)
1807 mgmt_new_ltk(hdev, key, 1);
1808
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001809 return 0;
1810}
1811
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1813{
1814 struct link_key *key;
1815
1816 key = hci_find_link_key(hdev, bdaddr);
1817 if (!key)
1818 return -ENOENT;
1819
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001820 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001821
1822 list_del(&key->list);
1823 kfree(key);
1824
1825 return 0;
1826}
1827
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001828int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1829{
1830 struct smp_ltk *k, *tmp;
1831
1832 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1833 if (bacmp(bdaddr, &k->bdaddr))
1834 continue;
1835
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001836 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001837
1838 list_del(&k->list);
1839 kfree(k);
1840 }
1841
1842 return 0;
1843}
1844
Ville Tervo6bd32322011-02-16 16:32:41 +02001845/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001846static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001847{
1848 struct hci_dev *hdev = (void *) arg;
1849
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001850 if (hdev->sent_cmd) {
1851 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1852 u16 opcode = __le16_to_cpu(sent->opcode);
1853
1854 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1855 } else {
1856 BT_ERR("%s command tx timeout", hdev->name);
1857 }
1858
Ville Tervo6bd32322011-02-16 16:32:41 +02001859 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001860 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001861}
1862
Szymon Janc2763eda2011-03-22 13:12:22 +01001863struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001864 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001865{
1866 struct oob_data *data;
1867
1868 list_for_each_entry(data, &hdev->remote_oob_data, list)
1869 if (bacmp(bdaddr, &data->bdaddr) == 0)
1870 return data;
1871
1872 return NULL;
1873}
1874
1875int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1876{
1877 struct oob_data *data;
1878
1879 data = hci_find_remote_oob_data(hdev, bdaddr);
1880 if (!data)
1881 return -ENOENT;
1882
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001883 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001884
1885 list_del(&data->list);
1886 kfree(data);
1887
1888 return 0;
1889}
1890
1891int hci_remote_oob_data_clear(struct hci_dev *hdev)
1892{
1893 struct oob_data *data, *n;
1894
1895 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1896 list_del(&data->list);
1897 kfree(data);
1898 }
1899
1900 return 0;
1901}
1902
1903int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001904 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001905{
1906 struct oob_data *data;
1907
1908 data = hci_find_remote_oob_data(hdev, bdaddr);
1909
1910 if (!data) {
1911 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1912 if (!data)
1913 return -ENOMEM;
1914
1915 bacpy(&data->bdaddr, bdaddr);
1916 list_add(&data->list, &hdev->remote_oob_data);
1917 }
1918
1919 memcpy(data->hash, hash, sizeof(data->hash));
1920 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1921
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001922 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001923
1924 return 0;
1925}
1926
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001927struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001928{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001929 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001930
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001931 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001932 if (bacmp(bdaddr, &b->bdaddr) == 0)
1933 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001934
1935 return NULL;
1936}
1937
1938int hci_blacklist_clear(struct hci_dev *hdev)
1939{
1940 struct list_head *p, *n;
1941
1942 list_for_each_safe(p, n, &hdev->blacklist) {
1943 struct bdaddr_list *b;
1944
1945 b = list_entry(p, struct bdaddr_list, list);
1946
1947 list_del(p);
1948 kfree(b);
1949 }
1950
1951 return 0;
1952}
1953
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001954int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001955{
1956 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957
1958 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1959 return -EBADF;
1960
Antti Julku5e762442011-08-25 16:48:02 +03001961 if (hci_blacklist_lookup(hdev, bdaddr))
1962 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001963
1964 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001965 if (!entry)
1966 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001967
1968 bacpy(&entry->bdaddr, bdaddr);
1969
1970 list_add(&entry->list, &hdev->blacklist);
1971
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001972 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001973}
1974
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001975int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001976{
1977 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978
Szymon Janc1ec918c2011-11-16 09:32:21 +01001979 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001980 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001981
1982 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001983 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001984 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001985
1986 list_del(&entry->list);
1987 kfree(entry);
1988
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001989 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001990}
1991
Andre Guedes4c87eaa2013-04-30 15:29:32 -03001992static void inquiry_complete(struct hci_dev *hdev, u8 status)
1993{
1994 if (status) {
1995 BT_ERR("Failed to start inquiry: status %d", status);
1996
1997 hci_dev_lock(hdev);
1998 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1999 hci_dev_unlock(hdev);
2000 return;
2001 }
2002}
2003
2004static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
2005{
2006 /* General inquiry access code (GIAC) */
2007 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2008 struct hci_request req;
2009 struct hci_cp_inquiry cp;
2010 int err;
2011
2012 if (status) {
2013 BT_ERR("Failed to disable LE scanning: status %d", status);
2014 return;
2015 }
2016
2017 switch (hdev->discovery.type) {
2018 case DISCOV_TYPE_LE:
2019 hci_dev_lock(hdev);
2020 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2021 hci_dev_unlock(hdev);
2022 break;
2023
2024 case DISCOV_TYPE_INTERLEAVED:
2025 hci_req_init(&req, hdev);
2026
2027 memset(&cp, 0, sizeof(cp));
2028 memcpy(&cp.lap, lap, sizeof(cp.lap));
2029 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2030 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2031
2032 hci_dev_lock(hdev);
2033
2034 hci_inquiry_cache_flush(hdev);
2035
2036 err = hci_req_run(&req, inquiry_complete);
2037 if (err) {
2038 BT_ERR("Inquiry request failed: err %d", err);
2039 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2040 }
2041
2042 hci_dev_unlock(hdev);
2043 break;
2044 }
2045}
2046
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002047static void le_scan_disable_work(struct work_struct *work)
2048{
2049 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002050 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002051 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002052 struct hci_request req;
2053 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002054
2055 BT_DBG("%s", hdev->name);
2056
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002057 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002058
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002059 memset(&cp, 0, sizeof(cp));
2060 cp.enable = LE_SCAN_DISABLE;
2061 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2062
2063 err = hci_req_run(&req, le_scan_disable_work_complete);
2064 if (err)
2065 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002066}
2067
David Herrmann9be0dab2012-04-22 14:39:57 +02002068/* Alloc HCI device */
2069struct hci_dev *hci_alloc_dev(void)
2070{
2071 struct hci_dev *hdev;
2072
2073 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2074 if (!hdev)
2075 return NULL;
2076
David Herrmannb1b813d2012-04-22 14:39:58 +02002077 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2078 hdev->esco_type = (ESCO_HV1);
2079 hdev->link_mode = (HCI_LM_ACCEPT);
2080 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002081 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2082 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002083
David Herrmannb1b813d2012-04-22 14:39:58 +02002084 hdev->sniff_max_interval = 800;
2085 hdev->sniff_min_interval = 80;
2086
2087 mutex_init(&hdev->lock);
2088 mutex_init(&hdev->req_lock);
2089
2090 INIT_LIST_HEAD(&hdev->mgmt_pending);
2091 INIT_LIST_HEAD(&hdev->blacklist);
2092 INIT_LIST_HEAD(&hdev->uuids);
2093 INIT_LIST_HEAD(&hdev->link_keys);
2094 INIT_LIST_HEAD(&hdev->long_term_keys);
2095 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002096 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002097
2098 INIT_WORK(&hdev->rx_work, hci_rx_work);
2099 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2100 INIT_WORK(&hdev->tx_work, hci_tx_work);
2101 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002102
David Herrmannb1b813d2012-04-22 14:39:58 +02002103 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2104 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2105 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2106
David Herrmannb1b813d2012-04-22 14:39:58 +02002107 skb_queue_head_init(&hdev->rx_q);
2108 skb_queue_head_init(&hdev->cmd_q);
2109 skb_queue_head_init(&hdev->raw_q);
2110
2111 init_waitqueue_head(&hdev->req_wait_q);
2112
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002113 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002114
David Herrmannb1b813d2012-04-22 14:39:58 +02002115 hci_init_sysfs(hdev);
2116 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002117
2118 return hdev;
2119}
2120EXPORT_SYMBOL(hci_alloc_dev);
2121
2122/* Free HCI device */
2123void hci_free_dev(struct hci_dev *hdev)
2124{
David Herrmann9be0dab2012-04-22 14:39:57 +02002125 /* will free via device release */
2126 put_device(&hdev->dev);
2127}
2128EXPORT_SYMBOL(hci_free_dev);
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130/* Register HCI device */
2131int hci_register_dev(struct hci_dev *hdev)
2132{
David Herrmannb1b813d2012-04-22 14:39:58 +02002133 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
David Herrmann010666a2012-01-07 15:47:07 +01002135 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 return -EINVAL;
2137
Mat Martineau08add512011-11-02 16:18:36 -07002138 /* Do not allow HCI_AMP devices to register at index 0,
2139 * so the index can be used as the AMP controller ID.
2140 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002141 switch (hdev->dev_type) {
2142 case HCI_BREDR:
2143 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2144 break;
2145 case HCI_AMP:
2146 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2147 break;
2148 default:
2149 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002151
Sasha Levin3df92b32012-05-27 22:36:56 +02002152 if (id < 0)
2153 return id;
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 sprintf(hdev->name, "hci%d", id);
2156 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002157
2158 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2159
Sasha Levin3df92b32012-05-27 22:36:56 +02002160 write_lock(&hci_dev_list_lock);
2161 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002162 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002164 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002165 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002166 if (!hdev->workqueue) {
2167 error = -ENOMEM;
2168 goto err;
2169 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002170
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002171 hdev->req_workqueue = alloc_workqueue(hdev->name,
2172 WQ_HIGHPRI | WQ_UNBOUND |
2173 WQ_MEM_RECLAIM, 1);
2174 if (!hdev->req_workqueue) {
2175 destroy_workqueue(hdev->workqueue);
2176 error = -ENOMEM;
2177 goto err;
2178 }
2179
David Herrmann33ca9542011-10-08 14:58:49 +02002180 error = hci_add_sysfs(hdev);
2181 if (error < 0)
2182 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002184 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002185 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2186 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002187 if (hdev->rfkill) {
2188 if (rfkill_register(hdev->rfkill) < 0) {
2189 rfkill_destroy(hdev->rfkill);
2190 hdev->rfkill = NULL;
2191 }
2192 }
2193
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002194 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002195
2196 if (hdev->dev_type != HCI_AMP)
2197 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2198
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002200 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Johan Hedberg19202572013-01-14 22:33:51 +02002202 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002203
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002205
David Herrmann33ca9542011-10-08 14:58:49 +02002206err_wqueue:
2207 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002208 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002209err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002210 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002211 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002212 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002213 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002214
David Herrmann33ca9542011-10-08 14:58:49 +02002215 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216}
2217EXPORT_SYMBOL(hci_register_dev);
2218
2219/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002220void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221{
Sasha Levin3df92b32012-05-27 22:36:56 +02002222 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002223
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002224 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
Johan Hovold94324962012-03-15 14:48:41 +01002226 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2227
Sasha Levin3df92b32012-05-27 22:36:56 +02002228 id = hdev->id;
2229
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002230 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002232 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233
2234 hci_dev_do_close(hdev);
2235
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302236 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002237 kfree_skb(hdev->reassembly[i]);
2238
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002239 cancel_work_sync(&hdev->power_on);
2240
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002241 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002242 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002243 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002244 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002245 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002246 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002247
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002248 /* mgmt_index_removed should take care of emptying the
2249 * pending list */
2250 BUG_ON(!list_empty(&hdev->mgmt_pending));
2251
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 hci_notify(hdev, HCI_DEV_UNREG);
2253
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002254 if (hdev->rfkill) {
2255 rfkill_unregister(hdev->rfkill);
2256 rfkill_destroy(hdev->rfkill);
2257 }
2258
David Herrmannce242972011-10-08 14:58:48 +02002259 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002260
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002261 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002262 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002263
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002265 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002266 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002267 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002268 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002269 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002270 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002271
David Herrmanndc946bd2012-01-07 15:47:24 +01002272 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002273
2274 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275}
2276EXPORT_SYMBOL(hci_unregister_dev);
2277
2278/* Suspend HCI device */
2279int hci_suspend_dev(struct hci_dev *hdev)
2280{
2281 hci_notify(hdev, HCI_DEV_SUSPEND);
2282 return 0;
2283}
2284EXPORT_SYMBOL(hci_suspend_dev);
2285
2286/* Resume HCI device */
2287int hci_resume_dev(struct hci_dev *hdev)
2288{
2289 hci_notify(hdev, HCI_DEV_RESUME);
2290 return 0;
2291}
2292EXPORT_SYMBOL(hci_resume_dev);
2293
Marcel Holtmann76bca882009-11-18 00:40:39 +01002294/* Receive frame from HCI drivers */
2295int hci_recv_frame(struct sk_buff *skb)
2296{
2297 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2298 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002299 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002300 kfree_skb(skb);
2301 return -ENXIO;
2302 }
2303
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002304 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002305 bt_cb(skb)->incoming = 1;
2306
2307 /* Time stamp */
2308 __net_timestamp(skb);
2309
Marcel Holtmann76bca882009-11-18 00:40:39 +01002310 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002311 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002312
Marcel Holtmann76bca882009-11-18 00:40:39 +01002313 return 0;
2314}
2315EXPORT_SYMBOL(hci_recv_frame);
2316
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302317static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002318 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302319{
2320 int len = 0;
2321 int hlen = 0;
2322 int remain = count;
2323 struct sk_buff *skb;
2324 struct bt_skb_cb *scb;
2325
2326 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002327 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302328 return -EILSEQ;
2329
2330 skb = hdev->reassembly[index];
2331
2332 if (!skb) {
2333 switch (type) {
2334 case HCI_ACLDATA_PKT:
2335 len = HCI_MAX_FRAME_SIZE;
2336 hlen = HCI_ACL_HDR_SIZE;
2337 break;
2338 case HCI_EVENT_PKT:
2339 len = HCI_MAX_EVENT_SIZE;
2340 hlen = HCI_EVENT_HDR_SIZE;
2341 break;
2342 case HCI_SCODATA_PKT:
2343 len = HCI_MAX_SCO_SIZE;
2344 hlen = HCI_SCO_HDR_SIZE;
2345 break;
2346 }
2347
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002348 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302349 if (!skb)
2350 return -ENOMEM;
2351
2352 scb = (void *) skb->cb;
2353 scb->expect = hlen;
2354 scb->pkt_type = type;
2355
2356 skb->dev = (void *) hdev;
2357 hdev->reassembly[index] = skb;
2358 }
2359
2360 while (count) {
2361 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002362 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302363
2364 memcpy(skb_put(skb, len), data, len);
2365
2366 count -= len;
2367 data += len;
2368 scb->expect -= len;
2369 remain = count;
2370
2371 switch (type) {
2372 case HCI_EVENT_PKT:
2373 if (skb->len == HCI_EVENT_HDR_SIZE) {
2374 struct hci_event_hdr *h = hci_event_hdr(skb);
2375 scb->expect = h->plen;
2376
2377 if (skb_tailroom(skb) < scb->expect) {
2378 kfree_skb(skb);
2379 hdev->reassembly[index] = NULL;
2380 return -ENOMEM;
2381 }
2382 }
2383 break;
2384
2385 case HCI_ACLDATA_PKT:
2386 if (skb->len == HCI_ACL_HDR_SIZE) {
2387 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2388 scb->expect = __le16_to_cpu(h->dlen);
2389
2390 if (skb_tailroom(skb) < scb->expect) {
2391 kfree_skb(skb);
2392 hdev->reassembly[index] = NULL;
2393 return -ENOMEM;
2394 }
2395 }
2396 break;
2397
2398 case HCI_SCODATA_PKT:
2399 if (skb->len == HCI_SCO_HDR_SIZE) {
2400 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2401 scb->expect = h->dlen;
2402
2403 if (skb_tailroom(skb) < scb->expect) {
2404 kfree_skb(skb);
2405 hdev->reassembly[index] = NULL;
2406 return -ENOMEM;
2407 }
2408 }
2409 break;
2410 }
2411
2412 if (scb->expect == 0) {
2413 /* Complete frame */
2414
2415 bt_cb(skb)->pkt_type = type;
2416 hci_recv_frame(skb);
2417
2418 hdev->reassembly[index] = NULL;
2419 return remain;
2420 }
2421 }
2422
2423 return remain;
2424}
2425
Marcel Holtmannef222012007-07-11 06:42:04 +02002426int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2427{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302428 int rem = 0;
2429
Marcel Holtmannef222012007-07-11 06:42:04 +02002430 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2431 return -EILSEQ;
2432
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002433 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002434 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302435 if (rem < 0)
2436 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002437
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302438 data += (count - rem);
2439 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002440 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002441
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302442 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002443}
2444EXPORT_SYMBOL(hci_recv_fragment);
2445
Suraj Sumangala99811512010-07-14 13:02:19 +05302446#define STREAM_REASSEMBLY 0
2447
2448int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2449{
2450 int type;
2451 int rem = 0;
2452
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002453 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302454 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2455
2456 if (!skb) {
2457 struct { char type; } *pkt;
2458
2459 /* Start of the frame */
2460 pkt = data;
2461 type = pkt->type;
2462
2463 data++;
2464 count--;
2465 } else
2466 type = bt_cb(skb)->pkt_type;
2467
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002468 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002469 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302470 if (rem < 0)
2471 return rem;
2472
2473 data += (count - rem);
2474 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002475 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302476
2477 return rem;
2478}
2479EXPORT_SYMBOL(hci_recv_stream_fragment);
2480
Linus Torvalds1da177e2005-04-16 15:20:36 -07002481/* ---- Interface to upper protocols ---- */
2482
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483int hci_register_cb(struct hci_cb *cb)
2484{
2485 BT_DBG("%p name %s", cb, cb->name);
2486
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002487 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002489 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490
2491 return 0;
2492}
2493EXPORT_SYMBOL(hci_register_cb);
2494
2495int hci_unregister_cb(struct hci_cb *cb)
2496{
2497 BT_DBG("%p name %s", cb, cb->name);
2498
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002499 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002501 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502
2503 return 0;
2504}
2505EXPORT_SYMBOL(hci_unregister_cb);
2506
2507static int hci_send_frame(struct sk_buff *skb)
2508{
2509 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2510
2511 if (!hdev) {
2512 kfree_skb(skb);
2513 return -ENODEV;
2514 }
2515
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002516 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002518 /* Time stamp */
2519 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002521 /* Send copy to monitor */
2522 hci_send_to_monitor(hdev, skb);
2523
2524 if (atomic_read(&hdev->promisc)) {
2525 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002526 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527 }
2528
2529 /* Get rid of skb owner, prior to sending to the driver. */
2530 skb_orphan(skb);
2531
2532 return hdev->send(skb);
2533}
2534
Johan Hedberg3119ae92013-03-05 20:37:44 +02002535void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2536{
2537 skb_queue_head_init(&req->cmd_q);
2538 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002539 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002540}
2541
2542int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2543{
2544 struct hci_dev *hdev = req->hdev;
2545 struct sk_buff *skb;
2546 unsigned long flags;
2547
2548 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2549
Andre Guedes5d73e032013-03-08 11:20:16 -03002550 /* If an error occured during request building, remove all HCI
2551 * commands queued on the HCI request queue.
2552 */
2553 if (req->err) {
2554 skb_queue_purge(&req->cmd_q);
2555 return req->err;
2556 }
2557
Johan Hedberg3119ae92013-03-05 20:37:44 +02002558 /* Do not allow empty requests */
2559 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002560 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002561
2562 skb = skb_peek_tail(&req->cmd_q);
2563 bt_cb(skb)->req.complete = complete;
2564
2565 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2566 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2567 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2568
2569 queue_work(hdev->workqueue, &hdev->cmd_work);
2570
2571 return 0;
2572}
2573
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002574static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002575 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576{
2577 int len = HCI_COMMAND_HDR_SIZE + plen;
2578 struct hci_command_hdr *hdr;
2579 struct sk_buff *skb;
2580
Linus Torvalds1da177e2005-04-16 15:20:36 -07002581 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002582 if (!skb)
2583 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584
2585 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002586 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 hdr->plen = plen;
2588
2589 if (plen)
2590 memcpy(skb_put(skb, plen), param, plen);
2591
2592 BT_DBG("skb len %d", skb->len);
2593
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002594 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002596
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002597 return skb;
2598}
2599
2600/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002601int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2602 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002603{
2604 struct sk_buff *skb;
2605
2606 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2607
2608 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2609 if (!skb) {
2610 BT_ERR("%s no memory for command", hdev->name);
2611 return -ENOMEM;
2612 }
2613
Johan Hedberg11714b32013-03-05 20:37:47 +02002614 /* Stand-alone HCI commands must be flaged as
2615 * single-command requests.
2616 */
2617 bt_cb(skb)->req.start = true;
2618
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002620 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621
2622 return 0;
2623}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Johan Hedberg71c76a12013-03-05 20:37:46 +02002625/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002626void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2627 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002628{
2629 struct hci_dev *hdev = req->hdev;
2630 struct sk_buff *skb;
2631
2632 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2633
Andre Guedes34739c12013-03-08 11:20:18 -03002634 /* If an error occured during request building, there is no point in
2635 * queueing the HCI command. We can simply return.
2636 */
2637 if (req->err)
2638 return;
2639
Johan Hedberg71c76a12013-03-05 20:37:46 +02002640 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2641 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002642 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2643 hdev->name, opcode);
2644 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002645 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002646 }
2647
2648 if (skb_queue_empty(&req->cmd_q))
2649 bt_cb(skb)->req.start = true;
2650
Johan Hedberg02350a72013-04-03 21:50:29 +03002651 bt_cb(skb)->req.event = event;
2652
Johan Hedberg71c76a12013-03-05 20:37:46 +02002653 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002654}
2655
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002656void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2657 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002658{
2659 hci_req_add_ev(req, opcode, plen, param, 0);
2660}
2661
Linus Torvalds1da177e2005-04-16 15:20:36 -07002662/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002663void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
2665 struct hci_command_hdr *hdr;
2666
2667 if (!hdev->sent_cmd)
2668 return NULL;
2669
2670 hdr = (void *) hdev->sent_cmd->data;
2671
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002672 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 return NULL;
2674
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002675 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
2677 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2678}
2679
2680/* Send ACL data */
2681static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2682{
2683 struct hci_acl_hdr *hdr;
2684 int len = skb->len;
2685
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002686 skb_push(skb, HCI_ACL_HDR_SIZE);
2687 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002688 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002689 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2690 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691}
2692
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002693static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002694 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002696 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 struct hci_dev *hdev = conn->hdev;
2698 struct sk_buff *list;
2699
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002700 skb->len = skb_headlen(skb);
2701 skb->data_len = 0;
2702
2703 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002704
2705 switch (hdev->dev_type) {
2706 case HCI_BREDR:
2707 hci_add_acl_hdr(skb, conn->handle, flags);
2708 break;
2709 case HCI_AMP:
2710 hci_add_acl_hdr(skb, chan->handle, flags);
2711 break;
2712 default:
2713 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2714 return;
2715 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002716
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002717 list = skb_shinfo(skb)->frag_list;
2718 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719 /* Non fragmented */
2720 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2721
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002722 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 } else {
2724 /* Fragmented */
2725 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2726
2727 skb_shinfo(skb)->frag_list = NULL;
2728
2729 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002730 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002732 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002733
2734 flags &= ~ACL_START;
2735 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 do {
2737 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002738
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002740 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002741 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2744
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002745 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 } while (list);
2747
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002748 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002750}
2751
2752void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2753{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002754 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002755
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002756 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002757
2758 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002759
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002760 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002762 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002766void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767{
2768 struct hci_dev *hdev = conn->hdev;
2769 struct hci_sco_hdr hdr;
2770
2771 BT_DBG("%s len %d", hdev->name, skb->len);
2772
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002773 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 hdr.dlen = skb->len;
2775
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002776 skb_push(skb, HCI_SCO_HDR_SIZE);
2777 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002778 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
2780 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002781 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002782
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002784 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
2787/* ---- HCI TX task (outgoing data) ---- */
2788
2789/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002790static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2791 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792{
2793 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002794 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002795 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002797 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002799
2800 rcu_read_lock();
2801
2802 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002803 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002805
2806 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2807 continue;
2808
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 num++;
2810
2811 if (c->sent < min) {
2812 min = c->sent;
2813 conn = c;
2814 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002815
2816 if (hci_conn_num(hdev, type) == num)
2817 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 }
2819
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002820 rcu_read_unlock();
2821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002823 int cnt, q;
2824
2825 switch (conn->type) {
2826 case ACL_LINK:
2827 cnt = hdev->acl_cnt;
2828 break;
2829 case SCO_LINK:
2830 case ESCO_LINK:
2831 cnt = hdev->sco_cnt;
2832 break;
2833 case LE_LINK:
2834 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2835 break;
2836 default:
2837 cnt = 0;
2838 BT_ERR("Unknown link type");
2839 }
2840
2841 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 *quote = q ? q : 1;
2843 } else
2844 *quote = 0;
2845
2846 BT_DBG("conn %p quote %d", conn, *quote);
2847 return conn;
2848}
2849
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002850static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851{
2852 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002853 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Ville Tervobae1f5d92011-02-10 22:38:53 -03002855 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002857 rcu_read_lock();
2858
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002860 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002861 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002862 BT_ERR("%s killing stalled connection %pMR",
2863 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002864 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 }
2866 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002867
2868 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869}
2870
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002871static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2872 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002873{
2874 struct hci_conn_hash *h = &hdev->conn_hash;
2875 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002876 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002877 struct hci_conn *conn;
2878 int cnt, q, conn_num = 0;
2879
2880 BT_DBG("%s", hdev->name);
2881
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002882 rcu_read_lock();
2883
2884 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002885 struct hci_chan *tmp;
2886
2887 if (conn->type != type)
2888 continue;
2889
2890 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2891 continue;
2892
2893 conn_num++;
2894
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002895 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002896 struct sk_buff *skb;
2897
2898 if (skb_queue_empty(&tmp->data_q))
2899 continue;
2900
2901 skb = skb_peek(&tmp->data_q);
2902 if (skb->priority < cur_prio)
2903 continue;
2904
2905 if (skb->priority > cur_prio) {
2906 num = 0;
2907 min = ~0;
2908 cur_prio = skb->priority;
2909 }
2910
2911 num++;
2912
2913 if (conn->sent < min) {
2914 min = conn->sent;
2915 chan = tmp;
2916 }
2917 }
2918
2919 if (hci_conn_num(hdev, type) == conn_num)
2920 break;
2921 }
2922
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002923 rcu_read_unlock();
2924
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002925 if (!chan)
2926 return NULL;
2927
2928 switch (chan->conn->type) {
2929 case ACL_LINK:
2930 cnt = hdev->acl_cnt;
2931 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002932 case AMP_LINK:
2933 cnt = hdev->block_cnt;
2934 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002935 case SCO_LINK:
2936 case ESCO_LINK:
2937 cnt = hdev->sco_cnt;
2938 break;
2939 case LE_LINK:
2940 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2941 break;
2942 default:
2943 cnt = 0;
2944 BT_ERR("Unknown link type");
2945 }
2946
2947 q = cnt / num;
2948 *quote = q ? q : 1;
2949 BT_DBG("chan %p quote %d", chan, *quote);
2950 return chan;
2951}
2952
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002953static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2954{
2955 struct hci_conn_hash *h = &hdev->conn_hash;
2956 struct hci_conn *conn;
2957 int num = 0;
2958
2959 BT_DBG("%s", hdev->name);
2960
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002961 rcu_read_lock();
2962
2963 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002964 struct hci_chan *chan;
2965
2966 if (conn->type != type)
2967 continue;
2968
2969 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2970 continue;
2971
2972 num++;
2973
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002974 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002975 struct sk_buff *skb;
2976
2977 if (chan->sent) {
2978 chan->sent = 0;
2979 continue;
2980 }
2981
2982 if (skb_queue_empty(&chan->data_q))
2983 continue;
2984
2985 skb = skb_peek(&chan->data_q);
2986 if (skb->priority >= HCI_PRIO_MAX - 1)
2987 continue;
2988
2989 skb->priority = HCI_PRIO_MAX - 1;
2990
2991 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002992 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002993 }
2994
2995 if (hci_conn_num(hdev, type) == num)
2996 break;
2997 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002998
2999 rcu_read_unlock();
3000
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003001}
3002
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003003static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3004{
3005 /* Calculate count of blocks used by this packet */
3006 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3007}
3008
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003009static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011 if (!test_bit(HCI_RAW, &hdev->flags)) {
3012 /* ACL tx timeout must be longer than maximum
3013 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003014 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003015 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003016 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003018}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003020static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003021{
3022 unsigned int cnt = hdev->acl_cnt;
3023 struct hci_chan *chan;
3024 struct sk_buff *skb;
3025 int quote;
3026
3027 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003028
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003029 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003030 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003031 u32 priority = (skb_peek(&chan->data_q))->priority;
3032 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003033 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003034 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003035
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003036 /* Stop if priority has changed */
3037 if (skb->priority < priority)
3038 break;
3039
3040 skb = skb_dequeue(&chan->data_q);
3041
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003042 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003043 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003044
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 hci_send_frame(skb);
3046 hdev->acl_last_tx = jiffies;
3047
3048 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003049 chan->sent++;
3050 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 }
3052 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003053
3054 if (cnt != hdev->acl_cnt)
3055 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003056}
3057
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003058static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003059{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003060 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003061 struct hci_chan *chan;
3062 struct sk_buff *skb;
3063 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003064 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003065
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003066 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003067
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003068 BT_DBG("%s", hdev->name);
3069
3070 if (hdev->dev_type == HCI_AMP)
3071 type = AMP_LINK;
3072 else
3073 type = ACL_LINK;
3074
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003075 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003076 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003077 u32 priority = (skb_peek(&chan->data_q))->priority;
3078 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3079 int blocks;
3080
3081 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003082 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003083
3084 /* Stop if priority has changed */
3085 if (skb->priority < priority)
3086 break;
3087
3088 skb = skb_dequeue(&chan->data_q);
3089
3090 blocks = __get_blocks(hdev, skb);
3091 if (blocks > hdev->block_cnt)
3092 return;
3093
3094 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003095 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003096
3097 hci_send_frame(skb);
3098 hdev->acl_last_tx = jiffies;
3099
3100 hdev->block_cnt -= blocks;
3101 quote -= blocks;
3102
3103 chan->sent += blocks;
3104 chan->conn->sent += blocks;
3105 }
3106 }
3107
3108 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003109 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003110}
3111
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003112static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003113{
3114 BT_DBG("%s", hdev->name);
3115
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003116 /* No ACL link over BR/EDR controller */
3117 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3118 return;
3119
3120 /* No AMP link over AMP controller */
3121 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003122 return;
3123
3124 switch (hdev->flow_ctl_mode) {
3125 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3126 hci_sched_acl_pkt(hdev);
3127 break;
3128
3129 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3130 hci_sched_acl_blk(hdev);
3131 break;
3132 }
3133}
3134
Linus Torvalds1da177e2005-04-16 15:20:36 -07003135/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003136static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003137{
3138 struct hci_conn *conn;
3139 struct sk_buff *skb;
3140 int quote;
3141
3142 BT_DBG("%s", hdev->name);
3143
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003144 if (!hci_conn_num(hdev, SCO_LINK))
3145 return;
3146
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3148 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3149 BT_DBG("skb %p len %d", skb, skb->len);
3150 hci_send_frame(skb);
3151
3152 conn->sent++;
3153 if (conn->sent == ~0)
3154 conn->sent = 0;
3155 }
3156 }
3157}
3158
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003159static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003160{
3161 struct hci_conn *conn;
3162 struct sk_buff *skb;
3163 int quote;
3164
3165 BT_DBG("%s", hdev->name);
3166
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003167 if (!hci_conn_num(hdev, ESCO_LINK))
3168 return;
3169
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003170 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3171 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3173 BT_DBG("skb %p len %d", skb, skb->len);
3174 hci_send_frame(skb);
3175
3176 conn->sent++;
3177 if (conn->sent == ~0)
3178 conn->sent = 0;
3179 }
3180 }
3181}
3182
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003183static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003184{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003185 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003186 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003187 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003188
3189 BT_DBG("%s", hdev->name);
3190
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003191 if (!hci_conn_num(hdev, LE_LINK))
3192 return;
3193
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003194 if (!test_bit(HCI_RAW, &hdev->flags)) {
3195 /* LE tx timeout must be longer than maximum
3196 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003197 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003198 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003199 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003200 }
3201
3202 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003203 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003204 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003205 u32 priority = (skb_peek(&chan->data_q))->priority;
3206 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003207 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003208 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003209
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003210 /* Stop if priority has changed */
3211 if (skb->priority < priority)
3212 break;
3213
3214 skb = skb_dequeue(&chan->data_q);
3215
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003216 hci_send_frame(skb);
3217 hdev->le_last_tx = jiffies;
3218
3219 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003220 chan->sent++;
3221 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003222 }
3223 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003224
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003225 if (hdev->le_pkts)
3226 hdev->le_cnt = cnt;
3227 else
3228 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003229
3230 if (cnt != tmp)
3231 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003232}
3233
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003234static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003235{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003236 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003237 struct sk_buff *skb;
3238
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003239 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003240 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003241
3242 /* Schedule queues and send stuff to HCI driver */
3243
3244 hci_sched_acl(hdev);
3245
3246 hci_sched_sco(hdev);
3247
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003248 hci_sched_esco(hdev);
3249
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003250 hci_sched_le(hdev);
3251
Linus Torvalds1da177e2005-04-16 15:20:36 -07003252 /* Send next queued raw (unknown type) packet */
3253 while ((skb = skb_dequeue(&hdev->raw_q)))
3254 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003255}
3256
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003257/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258
3259/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003260static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261{
3262 struct hci_acl_hdr *hdr = (void *) skb->data;
3263 struct hci_conn *conn;
3264 __u16 handle, flags;
3265
3266 skb_pull(skb, HCI_ACL_HDR_SIZE);
3267
3268 handle = __le16_to_cpu(hdr->handle);
3269 flags = hci_flags(handle);
3270 handle = hci_handle(handle);
3271
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003272 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003273 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 hdev->stat.acl_rx++;
3276
3277 hci_dev_lock(hdev);
3278 conn = hci_conn_hash_lookup_handle(hdev, handle);
3279 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003280
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003282 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003283
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003285 l2cap_recv_acldata(conn, skb, flags);
3286 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003288 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003289 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290 }
3291
3292 kfree_skb(skb);
3293}
3294
3295/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003296static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297{
3298 struct hci_sco_hdr *hdr = (void *) skb->data;
3299 struct hci_conn *conn;
3300 __u16 handle;
3301
3302 skb_pull(skb, HCI_SCO_HDR_SIZE);
3303
3304 handle = __le16_to_cpu(hdr->handle);
3305
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003306 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
3308 hdev->stat.sco_rx++;
3309
3310 hci_dev_lock(hdev);
3311 conn = hci_conn_hash_lookup_handle(hdev, handle);
3312 hci_dev_unlock(hdev);
3313
3314 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003316 sco_recv_scodata(conn, skb);
3317 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003319 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003320 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 }
3322
3323 kfree_skb(skb);
3324}
3325
Johan Hedberg9238f362013-03-05 20:37:48 +02003326static bool hci_req_is_complete(struct hci_dev *hdev)
3327{
3328 struct sk_buff *skb;
3329
3330 skb = skb_peek(&hdev->cmd_q);
3331 if (!skb)
3332 return true;
3333
3334 return bt_cb(skb)->req.start;
3335}
3336
Johan Hedberg42c6b122013-03-05 20:37:49 +02003337static void hci_resend_last(struct hci_dev *hdev)
3338{
3339 struct hci_command_hdr *sent;
3340 struct sk_buff *skb;
3341 u16 opcode;
3342
3343 if (!hdev->sent_cmd)
3344 return;
3345
3346 sent = (void *) hdev->sent_cmd->data;
3347 opcode = __le16_to_cpu(sent->opcode);
3348 if (opcode == HCI_OP_RESET)
3349 return;
3350
3351 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3352 if (!skb)
3353 return;
3354
3355 skb_queue_head(&hdev->cmd_q, skb);
3356 queue_work(hdev->workqueue, &hdev->cmd_work);
3357}
3358
Johan Hedberg9238f362013-03-05 20:37:48 +02003359void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3360{
3361 hci_req_complete_t req_complete = NULL;
3362 struct sk_buff *skb;
3363 unsigned long flags;
3364
3365 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3366
Johan Hedberg42c6b122013-03-05 20:37:49 +02003367 /* If the completed command doesn't match the last one that was
3368 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003369 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003370 if (!hci_sent_cmd_data(hdev, opcode)) {
3371 /* Some CSR based controllers generate a spontaneous
3372 * reset complete event during init and any pending
3373 * command will never be completed. In such a case we
3374 * need to resend whatever was the last sent
3375 * command.
3376 */
3377 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3378 hci_resend_last(hdev);
3379
Johan Hedberg9238f362013-03-05 20:37:48 +02003380 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003381 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003382
3383 /* If the command succeeded and there's still more commands in
3384 * this request the request is not yet complete.
3385 */
3386 if (!status && !hci_req_is_complete(hdev))
3387 return;
3388
3389 /* If this was the last command in a request the complete
3390 * callback would be found in hdev->sent_cmd instead of the
3391 * command queue (hdev->cmd_q).
3392 */
3393 if (hdev->sent_cmd) {
3394 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3395 if (req_complete)
3396 goto call_complete;
3397 }
3398
3399 /* Remove all pending commands belonging to this request */
3400 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3401 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3402 if (bt_cb(skb)->req.start) {
3403 __skb_queue_head(&hdev->cmd_q, skb);
3404 break;
3405 }
3406
3407 req_complete = bt_cb(skb)->req.complete;
3408 kfree_skb(skb);
3409 }
3410 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3411
3412call_complete:
3413 if (req_complete)
3414 req_complete(hdev, status);
3415}
3416
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003417static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003419 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003420 struct sk_buff *skb;
3421
3422 BT_DBG("%s", hdev->name);
3423
Linus Torvalds1da177e2005-04-16 15:20:36 -07003424 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003425 /* Send copy to monitor */
3426 hci_send_to_monitor(hdev, skb);
3427
Linus Torvalds1da177e2005-04-16 15:20:36 -07003428 if (atomic_read(&hdev->promisc)) {
3429 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003430 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431 }
3432
3433 if (test_bit(HCI_RAW, &hdev->flags)) {
3434 kfree_skb(skb);
3435 continue;
3436 }
3437
3438 if (test_bit(HCI_INIT, &hdev->flags)) {
3439 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003440 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 case HCI_ACLDATA_PKT:
3442 case HCI_SCODATA_PKT:
3443 kfree_skb(skb);
3444 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 }
3447
3448 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003449 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003451 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 hci_event_packet(hdev, skb);
3453 break;
3454
3455 case HCI_ACLDATA_PKT:
3456 BT_DBG("%s ACL data packet", hdev->name);
3457 hci_acldata_packet(hdev, skb);
3458 break;
3459
3460 case HCI_SCODATA_PKT:
3461 BT_DBG("%s SCO data packet", hdev->name);
3462 hci_scodata_packet(hdev, skb);
3463 break;
3464
3465 default:
3466 kfree_skb(skb);
3467 break;
3468 }
3469 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470}
3471
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003472static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003474 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 struct sk_buff *skb;
3476
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003477 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3478 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003481 if (atomic_read(&hdev->cmd_cnt)) {
3482 skb = skb_dequeue(&hdev->cmd_q);
3483 if (!skb)
3484 return;
3485
Wei Yongjun7585b972009-02-25 18:29:52 +08003486 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003488 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3489 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 atomic_dec(&hdev->cmd_cnt);
3491 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003492 if (test_bit(HCI_RESET, &hdev->flags))
3493 del_timer(&hdev->cmd_timer);
3494 else
3495 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003496 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497 } else {
3498 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003499 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500 }
3501 }
3502}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003503
3504int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3505{
3506 /* General inquiry access code (GIAC) */
3507 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3508 struct hci_cp_inquiry cp;
3509
3510 BT_DBG("%s", hdev->name);
3511
3512 if (test_bit(HCI_INQUIRY, &hdev->flags))
3513 return -EINPROGRESS;
3514
Andre Guedes1f9b9a52013-04-30 15:29:27 -03003515 hci_inquiry_cache_flush(hdev);
Johan Hedberg46632622012-01-02 16:06:08 +02003516
Andre Guedes2519a1f2011-11-07 11:45:24 -03003517 memset(&cp, 0, sizeof(cp));
3518 memcpy(&cp.lap, lap, sizeof(cp.lap));
3519 cp.length = length;
3520
3521 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3522}
Andre Guedes023d50492011-11-04 14:16:52 -03003523
3524int hci_cancel_inquiry(struct hci_dev *hdev)
3525{
3526 BT_DBG("%s", hdev->name);
3527
3528 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003529 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003530
3531 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3532}
Andre Guedes31f79562012-04-24 21:02:53 -03003533
3534u8 bdaddr_to_le(u8 bdaddr_type)
3535{
3536 switch (bdaddr_type) {
3537 case BDADDR_LE_PUBLIC:
3538 return ADDR_LE_DEV_PUBLIC;
3539
3540 default:
3541 /* Fallback to LE Random address type */
3542 return ADDR_LE_DEV_RANDOM;
3543 }
3544}