blob: 634debab4d54582f04c69a4664b00c7541696a93 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
618 *
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700623 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
626
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
631 }
632
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500636 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500638 hci_update_ad(req);
639 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300640
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
648 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
651static int __hci_init(struct hci_dev *hdev)
652{
653 int err;
654
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
658
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
662 */
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
665
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
669
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 __u8 scan = opt;
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 __u8 auth = opt;
686
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 __u8 encrypt = opt;
696
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200699 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200704{
705 __le16 policy = cpu_to_le16(opt);
706
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200708
709 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200711}
712
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900713/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 * Device is held on return. */
715struct hci_dev *hci_dev_get(int index)
716{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200717 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 BT_DBG("%d", index);
720
721 if (index < 0)
722 return NULL;
723
724 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200725 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
729 }
730 }
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
733}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200736
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200737bool hci_discovery_active(struct hci_dev *hdev)
738{
739 struct discovery_state *discov = &hdev->discovery;
740
Andre Guedes6fbe1952012-02-03 17:47:58 -0300741 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300743 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200744 return true;
745
Andre Guedes6fbe1952012-02-03 17:47:58 -0300746 default:
747 return false;
748 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200749}
750
Johan Hedbergff9ef572012-01-04 14:23:45 +0200751void hci_discovery_set_state(struct hci_dev *hdev, int state)
752{
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755 if (hdev->discovery.state == state)
756 return;
757
758 switch (state) {
759 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200762 break;
763 case DISCOVERY_STARTING:
764 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300765 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200766 mgmt_discovering(hdev, 1);
767 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200768 case DISCOVERY_RESOLVING:
769 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200770 case DISCOVERY_STOPPING:
771 break;
772 }
773
774 hdev->discovery.state = state;
775}
776
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300777void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Johan Hedberg30883512012-01-04 14:16:21 +0200779 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200780 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300791struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
Johan Hedberg30883512012-01-04 14:16:21 +0200794 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 struct inquiry_entry *e;
796
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300797 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200801 return e;
802 }
803
804 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
806
Johan Hedberg561aafb2012-01-04 13:31:59 +0200807struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300808 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200809{
Johan Hedberg30883512012-01-04 14:16:21 +0200810 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200811 struct inquiry_entry *e;
812
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300813 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200814
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
818 }
819
820 return NULL;
821}
822
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200823struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300824 bdaddr_t *bdaddr,
825 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200826{
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
829
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200831
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
837 }
838
839 return NULL;
840}
841
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300843 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200844{
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
848
849 list_del(&ie->list);
850
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300853 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200854 break;
855 pos = &p->list;
856 }
857
858 list_add(&ie->list, pos);
859}
860
Johan Hedberg31754052012-01-04 13:39:52 +0200861bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300862 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Johan Hedberg30883512012-01-04 14:16:21 +0200864 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Szymon Janc2b2fec42012-11-20 11:38:54 +0100869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200871 if (ssp)
872 *ssp = data->ssp_mode;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200875 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
878
Johan Hedberga3d4e202012-01-09 00:53:02 +0200879 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300880 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
883 }
884
Johan Hedberg561aafb2012-01-04 13:31:59 +0200885 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200886 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887
Johan Hedberg561aafb2012-01-04 13:31:59 +0200888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200891 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200892
893 list_add(&ie->all, &cache->all);
894
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
900 }
901
902update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300904 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200912
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
915
916 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
919static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920{
Johan Hedberg30883512012-01-04 14:16:21 +0200921 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
925
Johan Hedberg561aafb2012-01-04 13:31:59 +0200926 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200928
929 if (copied >= num)
930 break;
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200940 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
942
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
945}
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct hci_cp_inquiry cp;
952
953 BT_DBG("%s", hdev->name);
954
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
957
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Andre Guedes3e13fa12013-03-27 20:04:56 -0300965static int wait_inquiry(void *word)
966{
967 schedule();
968 return signal_pending(current);
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971int hci_inquiry(void __user *arg)
972{
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
979
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
982
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -ENODEV;
986
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300987 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900988 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300989 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300990 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 do_inquiry = 1;
992 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300993 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Marcel Holtmann04837f62006-07-03 10:02:33 +0200995 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200996
997 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200998 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 if (err < 0)
1001 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001002
1003 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004 * cleared). If it is interrupted by a signal, return -EINTR.
1005 */
1006 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007 TASK_INTERRUPTIBLE))
1008 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001011 /* for unlimited number of responses we will use buffer with
1012 * 255 entries
1013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1015
1016 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017 * copy it to the user space.
1018 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001019 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001020 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 err = -ENOMEM;
1022 goto done;
1023 }
1024
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001025 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001027 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 BT_DBG("num_rsp %d", ir.num_rsp);
1030
1031 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032 ptr += sizeof(ir);
1033 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001034 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001036 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 err = -EFAULT;
1038
1039 kfree(buf);
1040
1041done:
1042 hci_dev_put(hdev);
1043 return err;
1044}
1045
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001046static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1047{
1048 u8 ad_len = 0, flags = 0;
1049 size_t name_len;
1050
1051 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052 flags |= LE_AD_GENERAL;
1053
1054 if (!lmp_bredr_capable(hdev))
1055 flags |= LE_AD_NO_BREDR;
1056
1057 if (lmp_le_br_capable(hdev))
1058 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1059
1060 if (lmp_host_le_br_capable(hdev))
1061 flags |= LE_AD_SIM_LE_BREDR_HOST;
1062
1063 if (flags) {
1064 BT_DBG("adv flags 0x%02x", flags);
1065
1066 ptr[0] = 2;
1067 ptr[1] = EIR_FLAGS;
1068 ptr[2] = flags;
1069
1070 ad_len += 3;
1071 ptr += 3;
1072 }
1073
1074 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075 ptr[0] = 2;
1076 ptr[1] = EIR_TX_POWER;
1077 ptr[2] = (u8) hdev->adv_tx_power;
1078
1079 ad_len += 3;
1080 ptr += 3;
1081 }
1082
1083 name_len = strlen(hdev->dev_name);
1084 if (name_len > 0) {
1085 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1086
1087 if (name_len > max_len) {
1088 name_len = max_len;
1089 ptr[1] = EIR_NAME_SHORT;
1090 } else
1091 ptr[1] = EIR_NAME_COMPLETE;
1092
1093 ptr[0] = name_len + 1;
1094
1095 memcpy(ptr + 2, hdev->dev_name, name_len);
1096
1097 ad_len += (name_len + 2);
1098 ptr += (name_len + 2);
1099 }
1100
1101 return ad_len;
1102}
1103
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001104void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001105{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001106 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001107 struct hci_cp_le_set_adv_data cp;
1108 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001110 if (!lmp_le_capable(hdev))
1111 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112
1113 memset(&cp, 0, sizeof(cp));
1114
1115 len = create_ad(hdev, cp.data);
1116
1117 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001118 memcmp(cp.data, hdev->adv_data, len) == 0)
1119 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001120
1121 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122 hdev->adv_data_len = len;
1123
1124 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001126 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001127}
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129/* ---- HCI ioctl helpers ---- */
1130
1131int hci_dev_open(__u16 dev)
1132{
1133 struct hci_dev *hdev;
1134 int ret = 0;
1135
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001136 hdev = hci_dev_get(dev);
1137 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 return -ENODEV;
1139
1140 BT_DBG("%s %p", hdev->name, hdev);
1141
1142 hci_req_lock(hdev);
1143
Johan Hovold94324962012-03-15 14:48:41 +01001144 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145 ret = -ENODEV;
1146 goto done;
1147 }
1148
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001149 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1150 ret = -ERFKILL;
1151 goto done;
1152 }
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if (test_bit(HCI_UP, &hdev->flags)) {
1155 ret = -EALREADY;
1156 goto done;
1157 }
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (hdev->open(hdev)) {
1160 ret = -EIO;
1161 goto done;
1162 }
1163
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001164 atomic_set(&hdev->cmd_cnt, 1);
1165 set_bit(HCI_INIT, &hdev->flags);
1166
1167 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1168 ret = hdev->setup(hdev);
1169
1170 if (!ret) {
1171 /* Treat all non BR/EDR controllers as raw devices if
1172 * enable_hs is not set.
1173 */
1174 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1175 set_bit(HCI_RAW, &hdev->flags);
1176
1177 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1178 set_bit(HCI_RAW, &hdev->flags);
1179
1180 if (!test_bit(HCI_RAW, &hdev->flags))
1181 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
1183
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001184 clear_bit(HCI_INIT, &hdev->flags);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 if (!ret) {
1187 hci_dev_hold(hdev);
1188 set_bit(HCI_UP, &hdev->flags);
1189 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001190 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1191 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001192 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001193 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001194 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001195 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001196 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001198 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001199 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001200 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 skb_queue_purge(&hdev->cmd_q);
1203 skb_queue_purge(&hdev->rx_q);
1204
1205 if (hdev->flush)
1206 hdev->flush(hdev);
1207
1208 if (hdev->sent_cmd) {
1209 kfree_skb(hdev->sent_cmd);
1210 hdev->sent_cmd = NULL;
1211 }
1212
1213 hdev->close(hdev);
1214 hdev->flags = 0;
1215 }
1216
1217done:
1218 hci_req_unlock(hdev);
1219 hci_dev_put(hdev);
1220 return ret;
1221}
1222
1223static int hci_dev_do_close(struct hci_dev *hdev)
1224{
1225 BT_DBG("%s %p", hdev->name, hdev);
1226
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001227 cancel_delayed_work(&hdev->power_off);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hci_req_cancel(hdev, ENODEV);
1230 hci_req_lock(hdev);
1231
1232 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001233 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 hci_req_unlock(hdev);
1235 return 0;
1236 }
1237
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001238 /* Flush RX and TX works */
1239 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001240 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001242 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001243 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001244 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001245 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001246 }
1247
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001248 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001249 cancel_delayed_work(&hdev->service_cache);
1250
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001251 cancel_delayed_work_sync(&hdev->le_scan_disable);
1252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001253 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001254 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001256 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 hci_notify(hdev, HCI_DEV_DOWN);
1259
1260 if (hdev->flush)
1261 hdev->flush(hdev);
1262
1263 /* Reset device */
1264 skb_queue_purge(&hdev->cmd_q);
1265 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001266 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001267 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001269 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 clear_bit(HCI_INIT, &hdev->flags);
1271 }
1272
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001273 /* flush cmd work */
1274 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 /* Drop queues */
1277 skb_queue_purge(&hdev->rx_q);
1278 skb_queue_purge(&hdev->cmd_q);
1279 skb_queue_purge(&hdev->raw_q);
1280
1281 /* Drop last sent command */
1282 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001283 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 kfree_skb(hdev->sent_cmd);
1285 hdev->sent_cmd = NULL;
1286 }
1287
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001288 kfree_skb(hdev->recv_evt);
1289 hdev->recv_evt = NULL;
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* After this point our queues are empty
1292 * and no tasks are scheduled. */
1293 hdev->close(hdev);
1294
Johan Hedberg35b973c2013-03-15 17:06:59 -05001295 /* Clear flags */
1296 hdev->flags = 0;
1297 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1298
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001299 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1300 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001301 hci_dev_lock(hdev);
1302 mgmt_powered(hdev, 0);
1303 hci_dev_unlock(hdev);
1304 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001305
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001306 /* Controller radio is available but is currently powered down */
1307 hdev->amp_status = 0;
1308
Johan Hedberge59fda82012-02-22 18:11:53 +02001309 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001310 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 hci_req_unlock(hdev);
1313
1314 hci_dev_put(hdev);
1315 return 0;
1316}
1317
1318int hci_dev_close(__u16 dev)
1319{
1320 struct hci_dev *hdev;
1321 int err;
1322
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001323 hdev = hci_dev_get(dev);
1324 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001326
1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328 cancel_delayed_work(&hdev->power_off);
1329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_dev_put(hdev);
1333 return err;
1334}
1335
1336int hci_dev_reset(__u16 dev)
1337{
1338 struct hci_dev *hdev;
1339 int ret = 0;
1340
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001341 hdev = hci_dev_get(dev);
1342 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return -ENODEV;
1344
1345 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 if (!test_bit(HCI_UP, &hdev->flags))
1348 goto done;
1349
1350 /* Drop queues */
1351 skb_queue_purge(&hdev->rx_q);
1352 skb_queue_purge(&hdev->cmd_q);
1353
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001354 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001355 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001357 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
1359 if (hdev->flush)
1360 hdev->flush(hdev);
1361
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001362 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001363 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
1365 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001366 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 hci_req_unlock(hdev);
1370 hci_dev_put(hdev);
1371 return ret;
1372}
1373
1374int hci_dev_reset_stat(__u16 dev)
1375{
1376 struct hci_dev *hdev;
1377 int ret = 0;
1378
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001379 hdev = hci_dev_get(dev);
1380 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 return -ENODEV;
1382
1383 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1384
1385 hci_dev_put(hdev);
1386
1387 return ret;
1388}
1389
1390int hci_dev_cmd(unsigned int cmd, void __user *arg)
1391{
1392 struct hci_dev *hdev;
1393 struct hci_dev_req dr;
1394 int err = 0;
1395
1396 if (copy_from_user(&dr, arg, sizeof(dr)))
1397 return -EFAULT;
1398
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001399 hdev = hci_dev_get(dr.dev_id);
1400 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return -ENODEV;
1402
1403 switch (cmd) {
1404 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
1409 case HCISETENCRYPT:
1410 if (!lmp_encrypt_capable(hdev)) {
1411 err = -EOPNOTSUPP;
1412 break;
1413 }
1414
1415 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1416 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001417 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1418 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 if (err)
1420 break;
1421 }
1422
Johan Hedberg01178cd2013-03-05 20:37:41 +02001423 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1424 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 break;
1426
1427 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001428 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1429 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 break;
1431
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001432 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001433 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1434 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001435 break;
1436
1437 case HCISETLINKMODE:
1438 hdev->link_mode = ((__u16) dr.dev_opt) &
1439 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1440 break;
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 case HCISETPTYPE:
1443 hdev->pkt_type = (__u16) dr.dev_opt;
1444 break;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001447 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1448 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 break;
1450
1451 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001452 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1453 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 break;
1455
1456 default:
1457 err = -EINVAL;
1458 break;
1459 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_dev_put(hdev);
1462 return err;
1463}
1464
1465int hci_get_dev_list(void __user *arg)
1466{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001467 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 struct hci_dev_list_req *dl;
1469 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 int n = 0, size, err;
1471 __u16 dev_num;
1472
1473 if (get_user(dev_num, (__u16 __user *) arg))
1474 return -EFAULT;
1475
1476 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1477 return -EINVAL;
1478
1479 size = sizeof(*dl) + dev_num * sizeof(*dr);
1480
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001481 dl = kzalloc(size, GFP_KERNEL);
1482 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 return -ENOMEM;
1484
1485 dr = dl->dev_req;
1486
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001487 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001488 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001489 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001490 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001491
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001492 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1493 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 (dr + n)->dev_id = hdev->id;
1496 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (++n >= dev_num)
1499 break;
1500 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001501 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 dl->dev_num = n;
1504 size = sizeof(*dl) + n * sizeof(*dr);
1505
1506 err = copy_to_user(arg, dl, size);
1507 kfree(dl);
1508
1509 return err ? -EFAULT : 0;
1510}
1511
1512int hci_get_dev_info(void __user *arg)
1513{
1514 struct hci_dev *hdev;
1515 struct hci_dev_info di;
1516 int err = 0;
1517
1518 if (copy_from_user(&di, arg, sizeof(di)))
1519 return -EFAULT;
1520
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001521 hdev = hci_dev_get(di.dev_id);
1522 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 return -ENODEV;
1524
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001525 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001526 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001527
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001528 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1529 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 strcpy(di.name, hdev->name);
1532 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001533 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 di.flags = hdev->flags;
1535 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001536 if (lmp_bredr_capable(hdev)) {
1537 di.acl_mtu = hdev->acl_mtu;
1538 di.acl_pkts = hdev->acl_pkts;
1539 di.sco_mtu = hdev->sco_mtu;
1540 di.sco_pkts = hdev->sco_pkts;
1541 } else {
1542 di.acl_mtu = hdev->le_mtu;
1543 di.acl_pkts = hdev->le_pkts;
1544 di.sco_mtu = 0;
1545 di.sco_pkts = 0;
1546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 di.link_policy = hdev->link_policy;
1548 di.link_mode = hdev->link_mode;
1549
1550 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1551 memcpy(&di.features, &hdev->features, sizeof(di.features));
1552
1553 if (copy_to_user(arg, &di, sizeof(di)))
1554 err = -EFAULT;
1555
1556 hci_dev_put(hdev);
1557
1558 return err;
1559}
1560
1561/* ---- Interface to HCI drivers ---- */
1562
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001563static int hci_rfkill_set_block(void *data, bool blocked)
1564{
1565 struct hci_dev *hdev = data;
1566
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568
1569 if (!blocked)
1570 return 0;
1571
1572 hci_dev_do_close(hdev);
1573
1574 return 0;
1575}
1576
1577static const struct rfkill_ops hci_rfkill_ops = {
1578 .set_block = hci_rfkill_set_block,
1579};
1580
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001581static void hci_power_on(struct work_struct *work)
1582{
1583 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001584 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001585
1586 BT_DBG("%s", hdev->name);
1587
Johan Hedberg96570ff2013-05-29 09:51:29 +03001588 err = hci_dev_open(hdev->id);
1589 if (err < 0) {
1590 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001591 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001592 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001593
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001594 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001595 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1596 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001597
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001598 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001599 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001600}
1601
1602static void hci_power_off(struct work_struct *work)
1603{
Johan Hedberg32435532011-11-07 22:16:04 +02001604 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001605 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001606
1607 BT_DBG("%s", hdev->name);
1608
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001609 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001610}
1611
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001612static void hci_discov_off(struct work_struct *work)
1613{
1614 struct hci_dev *hdev;
1615 u8 scan = SCAN_PAGE;
1616
1617 hdev = container_of(work, struct hci_dev, discov_off.work);
1618
1619 BT_DBG("%s", hdev->name);
1620
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001621 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001622
1623 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1624
1625 hdev->discov_timeout = 0;
1626
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001627 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001628}
1629
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001630int hci_uuids_clear(struct hci_dev *hdev)
1631{
Johan Hedberg48210022013-01-27 00:31:28 +02001632 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001633
Johan Hedberg48210022013-01-27 00:31:28 +02001634 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1635 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001636 kfree(uuid);
1637 }
1638
1639 return 0;
1640}
1641
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001642int hci_link_keys_clear(struct hci_dev *hdev)
1643{
1644 struct list_head *p, *n;
1645
1646 list_for_each_safe(p, n, &hdev->link_keys) {
1647 struct link_key *key;
1648
1649 key = list_entry(p, struct link_key, list);
1650
1651 list_del(p);
1652 kfree(key);
1653 }
1654
1655 return 0;
1656}
1657
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001658int hci_smp_ltks_clear(struct hci_dev *hdev)
1659{
1660 struct smp_ltk *k, *tmp;
1661
1662 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1663 list_del(&k->list);
1664 kfree(k);
1665 }
1666
1667 return 0;
1668}
1669
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001670struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1671{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001672 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001673
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001674 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001675 if (bacmp(bdaddr, &k->bdaddr) == 0)
1676 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001677
1678 return NULL;
1679}
1680
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301681static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001682 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001683{
1684 /* Legacy key */
1685 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301686 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001687
1688 /* Debug keys are insecure so don't store them persistently */
1689 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301690 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001691
1692 /* Changed combination key and there's no previous one */
1693 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301694 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001695
1696 /* Security mode 3 case */
1697 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301698 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001699
1700 /* Neither local nor remote side had no-bonding as requirement */
1701 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301702 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001703
1704 /* Local side had dedicated bonding as requirement */
1705 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301706 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001707
1708 /* Remote side had dedicated bonding as requirement */
1709 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301710 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001711
1712 /* If none of the above criteria match, then don't store the key
1713 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301714 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001715}
1716
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001717struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001718{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001719 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001720
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001721 list_for_each_entry(k, &hdev->long_term_keys, list) {
1722 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001723 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001724 continue;
1725
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001726 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001727 }
1728
1729 return NULL;
1730}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001731
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001732struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001733 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001734{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001735 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001736
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001737 list_for_each_entry(k, &hdev->long_term_keys, list)
1738 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001739 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001740 return k;
1741
1742 return NULL;
1743}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001744
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001745int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001746 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001747{
1748 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301749 u8 old_key_type;
1750 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001751
1752 old_key = hci_find_link_key(hdev, bdaddr);
1753 if (old_key) {
1754 old_key_type = old_key->type;
1755 key = old_key;
1756 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001757 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1759 if (!key)
1760 return -ENOMEM;
1761 list_add(&key->list, &hdev->link_keys);
1762 }
1763
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001764 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001765
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001766 /* Some buggy controller combinations generate a changed
1767 * combination key for legacy pairing even when there's no
1768 * previous key */
1769 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001770 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001771 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001772 if (conn)
1773 conn->key_type = type;
1774 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001775
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001777 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001778 key->pin_len = pin_len;
1779
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001780 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001781 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001782 else
1783 key->type = type;
1784
Johan Hedberg4df378a2011-04-28 11:29:03 -07001785 if (!new_key)
1786 return 0;
1787
1788 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1789
Johan Hedberg744cf192011-11-08 20:40:14 +02001790 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001791
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301792 if (conn)
1793 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001794
1795 return 0;
1796}
1797
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001798int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001799 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001800 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001801{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001802 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001803
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001804 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1805 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001806
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001807 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1808 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001809 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001810 else {
1811 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001812 if (!key)
1813 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001814 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001815 }
1816
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001817 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001818 key->bdaddr_type = addr_type;
1819 memcpy(key->val, tk, sizeof(key->val));
1820 key->authenticated = authenticated;
1821 key->ediv = ediv;
1822 key->enc_size = enc_size;
1823 key->type = type;
1824 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001825
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001826 if (!new_key)
1827 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001828
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001829 if (type & HCI_SMP_LTK)
1830 mgmt_new_ltk(hdev, key, 1);
1831
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001832 return 0;
1833}
1834
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001835int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1836{
1837 struct link_key *key;
1838
1839 key = hci_find_link_key(hdev, bdaddr);
1840 if (!key)
1841 return -ENOENT;
1842
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001843 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001844
1845 list_del(&key->list);
1846 kfree(key);
1847
1848 return 0;
1849}
1850
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001851int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1852{
1853 struct smp_ltk *k, *tmp;
1854
1855 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1856 if (bacmp(bdaddr, &k->bdaddr))
1857 continue;
1858
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001859 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001860
1861 list_del(&k->list);
1862 kfree(k);
1863 }
1864
1865 return 0;
1866}
1867
Ville Tervo6bd32322011-02-16 16:32:41 +02001868/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001869static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001870{
1871 struct hci_dev *hdev = (void *) arg;
1872
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001873 if (hdev->sent_cmd) {
1874 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1875 u16 opcode = __le16_to_cpu(sent->opcode);
1876
1877 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1878 } else {
1879 BT_ERR("%s command tx timeout", hdev->name);
1880 }
1881
Ville Tervo6bd32322011-02-16 16:32:41 +02001882 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001883 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001884}
1885
Szymon Janc2763eda2011-03-22 13:12:22 +01001886struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001887 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001888{
1889 struct oob_data *data;
1890
1891 list_for_each_entry(data, &hdev->remote_oob_data, list)
1892 if (bacmp(bdaddr, &data->bdaddr) == 0)
1893 return data;
1894
1895 return NULL;
1896}
1897
1898int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1899{
1900 struct oob_data *data;
1901
1902 data = hci_find_remote_oob_data(hdev, bdaddr);
1903 if (!data)
1904 return -ENOENT;
1905
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001906 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001907
1908 list_del(&data->list);
1909 kfree(data);
1910
1911 return 0;
1912}
1913
1914int hci_remote_oob_data_clear(struct hci_dev *hdev)
1915{
1916 struct oob_data *data, *n;
1917
1918 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1919 list_del(&data->list);
1920 kfree(data);
1921 }
1922
1923 return 0;
1924}
1925
1926int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001927 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001928{
1929 struct oob_data *data;
1930
1931 data = hci_find_remote_oob_data(hdev, bdaddr);
1932
1933 if (!data) {
1934 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1935 if (!data)
1936 return -ENOMEM;
1937
1938 bacpy(&data->bdaddr, bdaddr);
1939 list_add(&data->list, &hdev->remote_oob_data);
1940 }
1941
1942 memcpy(data->hash, hash, sizeof(data->hash));
1943 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1944
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001945 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001946
1947 return 0;
1948}
1949
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001950struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001951{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001952 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001953
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001954 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001955 if (bacmp(bdaddr, &b->bdaddr) == 0)
1956 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957
1958 return NULL;
1959}
1960
1961int hci_blacklist_clear(struct hci_dev *hdev)
1962{
1963 struct list_head *p, *n;
1964
1965 list_for_each_safe(p, n, &hdev->blacklist) {
1966 struct bdaddr_list *b;
1967
1968 b = list_entry(p, struct bdaddr_list, list);
1969
1970 list_del(p);
1971 kfree(b);
1972 }
1973
1974 return 0;
1975}
1976
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001977int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978{
1979 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001980
1981 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1982 return -EBADF;
1983
Antti Julku5e762442011-08-25 16:48:02 +03001984 if (hci_blacklist_lookup(hdev, bdaddr))
1985 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001986
1987 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001988 if (!entry)
1989 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001990
1991 bacpy(&entry->bdaddr, bdaddr);
1992
1993 list_add(&entry->list, &hdev->blacklist);
1994
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001995 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001996}
1997
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001998int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001999{
2000 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002001
Szymon Janc1ec918c2011-11-16 09:32:21 +01002002 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002003 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002004
2005 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002006 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002007 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002008
2009 list_del(&entry->list);
2010 kfree(entry);
2011
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002012 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002013}
2014
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002015static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002016{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002017 if (status) {
2018 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002019
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002020 hci_dev_lock(hdev);
2021 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2022 hci_dev_unlock(hdev);
2023 return;
2024 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002025}
2026
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002027static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002028{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002029 /* General inquiry access code (GIAC) */
2030 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2031 struct hci_request req;
2032 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002033 int err;
2034
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002035 if (status) {
2036 BT_ERR("Failed to disable LE scanning: status %d", status);
2037 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002038 }
2039
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002040 switch (hdev->discovery.type) {
2041 case DISCOV_TYPE_LE:
2042 hci_dev_lock(hdev);
2043 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2044 hci_dev_unlock(hdev);
2045 break;
2046
2047 case DISCOV_TYPE_INTERLEAVED:
2048 hci_req_init(&req, hdev);
2049
2050 memset(&cp, 0, sizeof(cp));
2051 memcpy(&cp.lap, lap, sizeof(cp.lap));
2052 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2053 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2054
2055 hci_dev_lock(hdev);
2056
2057 hci_inquiry_cache_flush(hdev);
2058
2059 err = hci_req_run(&req, inquiry_complete);
2060 if (err) {
2061 BT_ERR("Inquiry request failed: err %d", err);
2062 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2063 }
2064
2065 hci_dev_unlock(hdev);
2066 break;
2067 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002068}
2069
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002070static void le_scan_disable_work(struct work_struct *work)
2071{
2072 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002073 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002074 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002075 struct hci_request req;
2076 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002077
2078 BT_DBG("%s", hdev->name);
2079
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002080 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002081
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002082 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002083 cp.enable = LE_SCAN_DISABLE;
2084 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002085
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002086 err = hci_req_run(&req, le_scan_disable_work_complete);
2087 if (err)
2088 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002089}
2090
David Herrmann9be0dab2012-04-22 14:39:57 +02002091/* Alloc HCI device */
2092struct hci_dev *hci_alloc_dev(void)
2093{
2094 struct hci_dev *hdev;
2095
2096 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2097 if (!hdev)
2098 return NULL;
2099
David Herrmannb1b813d2012-04-22 14:39:58 +02002100 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2101 hdev->esco_type = (ESCO_HV1);
2102 hdev->link_mode = (HCI_LM_ACCEPT);
2103 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002104 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2105 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002106
David Herrmannb1b813d2012-04-22 14:39:58 +02002107 hdev->sniff_max_interval = 800;
2108 hdev->sniff_min_interval = 80;
2109
2110 mutex_init(&hdev->lock);
2111 mutex_init(&hdev->req_lock);
2112
2113 INIT_LIST_HEAD(&hdev->mgmt_pending);
2114 INIT_LIST_HEAD(&hdev->blacklist);
2115 INIT_LIST_HEAD(&hdev->uuids);
2116 INIT_LIST_HEAD(&hdev->link_keys);
2117 INIT_LIST_HEAD(&hdev->long_term_keys);
2118 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002119 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002120
2121 INIT_WORK(&hdev->rx_work, hci_rx_work);
2122 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2123 INIT_WORK(&hdev->tx_work, hci_tx_work);
2124 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002125
David Herrmannb1b813d2012-04-22 14:39:58 +02002126 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2127 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2128 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2129
David Herrmannb1b813d2012-04-22 14:39:58 +02002130 skb_queue_head_init(&hdev->rx_q);
2131 skb_queue_head_init(&hdev->cmd_q);
2132 skb_queue_head_init(&hdev->raw_q);
2133
2134 init_waitqueue_head(&hdev->req_wait_q);
2135
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002136 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002137
David Herrmannb1b813d2012-04-22 14:39:58 +02002138 hci_init_sysfs(hdev);
2139 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002140
2141 return hdev;
2142}
2143EXPORT_SYMBOL(hci_alloc_dev);
2144
2145/* Free HCI device */
2146void hci_free_dev(struct hci_dev *hdev)
2147{
David Herrmann9be0dab2012-04-22 14:39:57 +02002148 /* will free via device release */
2149 put_device(&hdev->dev);
2150}
2151EXPORT_SYMBOL(hci_free_dev);
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153/* Register HCI device */
2154int hci_register_dev(struct hci_dev *hdev)
2155{
David Herrmannb1b813d2012-04-22 14:39:58 +02002156 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
David Herrmann010666a2012-01-07 15:47:07 +01002158 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 return -EINVAL;
2160
Mat Martineau08add512011-11-02 16:18:36 -07002161 /* Do not allow HCI_AMP devices to register at index 0,
2162 * so the index can be used as the AMP controller ID.
2163 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002164 switch (hdev->dev_type) {
2165 case HCI_BREDR:
2166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167 break;
2168 case HCI_AMP:
2169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170 break;
2171 default:
2172 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002174
Sasha Levin3df92b32012-05-27 22:36:56 +02002175 if (id < 0)
2176 return id;
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 sprintf(hdev->name, "hci%d", id);
2179 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002180
2181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2182
Kees Cookd8537542013-07-03 15:04:57 -07002183 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2184 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002185 if (!hdev->workqueue) {
2186 error = -ENOMEM;
2187 goto err;
2188 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002189
Kees Cookd8537542013-07-03 15:04:57 -07002190 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2191 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002192 if (!hdev->req_workqueue) {
2193 destroy_workqueue(hdev->workqueue);
2194 error = -ENOMEM;
2195 goto err;
2196 }
2197
David Herrmann33ca9542011-10-08 14:58:49 +02002198 error = hci_add_sysfs(hdev);
2199 if (error < 0)
2200 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002202 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002203 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2204 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002205 if (hdev->rfkill) {
2206 if (rfkill_register(hdev->rfkill) < 0) {
2207 rfkill_destroy(hdev->rfkill);
2208 hdev->rfkill = NULL;
2209 }
2210 }
2211
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002212 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002213
2214 if (hdev->dev_type != HCI_AMP)
2215 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2216
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002217 write_lock(&hci_dev_list_lock);
2218 list_add(&hdev->list, &hci_dev_list);
2219 write_unlock(&hci_dev_list_lock);
2220
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002222 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Johan Hedberg19202572013-01-14 22:33:51 +02002224 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002227
David Herrmann33ca9542011-10-08 14:58:49 +02002228err_wqueue:
2229 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002230 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002231err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002232 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002233
David Herrmann33ca9542011-10-08 14:58:49 +02002234 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235}
2236EXPORT_SYMBOL(hci_register_dev);
2237
2238/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002239void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
Sasha Levin3df92b32012-05-27 22:36:56 +02002241 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002242
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002243 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
Johan Hovold94324962012-03-15 14:48:41 +01002245 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2246
Sasha Levin3df92b32012-05-27 22:36:56 +02002247 id = hdev->id;
2248
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002249 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002251 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252
2253 hci_dev_do_close(hdev);
2254
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302255 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002256 kfree_skb(hdev->reassembly[i]);
2257
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002258 cancel_work_sync(&hdev->power_on);
2259
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002260 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002261 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002262 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002263 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002264 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002265 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002266
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002267 /* mgmt_index_removed should take care of emptying the
2268 * pending list */
2269 BUG_ON(!list_empty(&hdev->mgmt_pending));
2270
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 hci_notify(hdev, HCI_DEV_UNREG);
2272
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002273 if (hdev->rfkill) {
2274 rfkill_unregister(hdev->rfkill);
2275 rfkill_destroy(hdev->rfkill);
2276 }
2277
David Herrmannce242972011-10-08 14:58:48 +02002278 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002279
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002280 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002281 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002282
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002283 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002284 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002285 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002286 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002287 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002288 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002289 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002290
David Herrmanndc946bd2012-01-07 15:47:24 +01002291 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002292
2293 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294}
2295EXPORT_SYMBOL(hci_unregister_dev);
2296
2297/* Suspend HCI device */
2298int hci_suspend_dev(struct hci_dev *hdev)
2299{
2300 hci_notify(hdev, HCI_DEV_SUSPEND);
2301 return 0;
2302}
2303EXPORT_SYMBOL(hci_suspend_dev);
2304
2305/* Resume HCI device */
2306int hci_resume_dev(struct hci_dev *hdev)
2307{
2308 hci_notify(hdev, HCI_DEV_RESUME);
2309 return 0;
2310}
2311EXPORT_SYMBOL(hci_resume_dev);
2312
Marcel Holtmann76bca882009-11-18 00:40:39 +01002313/* Receive frame from HCI drivers */
2314int hci_recv_frame(struct sk_buff *skb)
2315{
2316 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2317 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002318 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002319 kfree_skb(skb);
2320 return -ENXIO;
2321 }
2322
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002323 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002324 bt_cb(skb)->incoming = 1;
2325
2326 /* Time stamp */
2327 __net_timestamp(skb);
2328
Marcel Holtmann76bca882009-11-18 00:40:39 +01002329 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002330 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002331
Marcel Holtmann76bca882009-11-18 00:40:39 +01002332 return 0;
2333}
2334EXPORT_SYMBOL(hci_recv_frame);
2335
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302336static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002337 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302338{
2339 int len = 0;
2340 int hlen = 0;
2341 int remain = count;
2342 struct sk_buff *skb;
2343 struct bt_skb_cb *scb;
2344
2345 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002346 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302347 return -EILSEQ;
2348
2349 skb = hdev->reassembly[index];
2350
2351 if (!skb) {
2352 switch (type) {
2353 case HCI_ACLDATA_PKT:
2354 len = HCI_MAX_FRAME_SIZE;
2355 hlen = HCI_ACL_HDR_SIZE;
2356 break;
2357 case HCI_EVENT_PKT:
2358 len = HCI_MAX_EVENT_SIZE;
2359 hlen = HCI_EVENT_HDR_SIZE;
2360 break;
2361 case HCI_SCODATA_PKT:
2362 len = HCI_MAX_SCO_SIZE;
2363 hlen = HCI_SCO_HDR_SIZE;
2364 break;
2365 }
2366
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002367 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302368 if (!skb)
2369 return -ENOMEM;
2370
2371 scb = (void *) skb->cb;
2372 scb->expect = hlen;
2373 scb->pkt_type = type;
2374
2375 skb->dev = (void *) hdev;
2376 hdev->reassembly[index] = skb;
2377 }
2378
2379 while (count) {
2380 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002381 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302382
2383 memcpy(skb_put(skb, len), data, len);
2384
2385 count -= len;
2386 data += len;
2387 scb->expect -= len;
2388 remain = count;
2389
2390 switch (type) {
2391 case HCI_EVENT_PKT:
2392 if (skb->len == HCI_EVENT_HDR_SIZE) {
2393 struct hci_event_hdr *h = hci_event_hdr(skb);
2394 scb->expect = h->plen;
2395
2396 if (skb_tailroom(skb) < scb->expect) {
2397 kfree_skb(skb);
2398 hdev->reassembly[index] = NULL;
2399 return -ENOMEM;
2400 }
2401 }
2402 break;
2403
2404 case HCI_ACLDATA_PKT:
2405 if (skb->len == HCI_ACL_HDR_SIZE) {
2406 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2407 scb->expect = __le16_to_cpu(h->dlen);
2408
2409 if (skb_tailroom(skb) < scb->expect) {
2410 kfree_skb(skb);
2411 hdev->reassembly[index] = NULL;
2412 return -ENOMEM;
2413 }
2414 }
2415 break;
2416
2417 case HCI_SCODATA_PKT:
2418 if (skb->len == HCI_SCO_HDR_SIZE) {
2419 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2420 scb->expect = h->dlen;
2421
2422 if (skb_tailroom(skb) < scb->expect) {
2423 kfree_skb(skb);
2424 hdev->reassembly[index] = NULL;
2425 return -ENOMEM;
2426 }
2427 }
2428 break;
2429 }
2430
2431 if (scb->expect == 0) {
2432 /* Complete frame */
2433
2434 bt_cb(skb)->pkt_type = type;
2435 hci_recv_frame(skb);
2436
2437 hdev->reassembly[index] = NULL;
2438 return remain;
2439 }
2440 }
2441
2442 return remain;
2443}
2444
Marcel Holtmannef222012007-07-11 06:42:04 +02002445int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2446{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302447 int rem = 0;
2448
Marcel Holtmannef222012007-07-11 06:42:04 +02002449 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2450 return -EILSEQ;
2451
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002452 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002453 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302454 if (rem < 0)
2455 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002456
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302457 data += (count - rem);
2458 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002459 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002460
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302461 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002462}
2463EXPORT_SYMBOL(hci_recv_fragment);
2464
Suraj Sumangala99811512010-07-14 13:02:19 +05302465#define STREAM_REASSEMBLY 0
2466
2467int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2468{
2469 int type;
2470 int rem = 0;
2471
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002472 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302473 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2474
2475 if (!skb) {
2476 struct { char type; } *pkt;
2477
2478 /* Start of the frame */
2479 pkt = data;
2480 type = pkt->type;
2481
2482 data++;
2483 count--;
2484 } else
2485 type = bt_cb(skb)->pkt_type;
2486
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002487 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002488 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302489 if (rem < 0)
2490 return rem;
2491
2492 data += (count - rem);
2493 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002494 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302495
2496 return rem;
2497}
2498EXPORT_SYMBOL(hci_recv_stream_fragment);
2499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500/* ---- Interface to upper protocols ---- */
2501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502int hci_register_cb(struct hci_cb *cb)
2503{
2504 BT_DBG("%p name %s", cb, cb->name);
2505
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002506 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002508 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510 return 0;
2511}
2512EXPORT_SYMBOL(hci_register_cb);
2513
2514int hci_unregister_cb(struct hci_cb *cb)
2515{
2516 BT_DBG("%p name %s", cb, cb->name);
2517
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002518 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002520 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521
2522 return 0;
2523}
2524EXPORT_SYMBOL(hci_unregister_cb);
2525
2526static int hci_send_frame(struct sk_buff *skb)
2527{
2528 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2529
2530 if (!hdev) {
2531 kfree_skb(skb);
2532 return -ENODEV;
2533 }
2534
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002535 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002537 /* Time stamp */
2538 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002540 /* Send copy to monitor */
2541 hci_send_to_monitor(hdev, skb);
2542
2543 if (atomic_read(&hdev->promisc)) {
2544 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002545 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002546 }
2547
2548 /* Get rid of skb owner, prior to sending to the driver. */
2549 skb_orphan(skb);
2550
2551 return hdev->send(skb);
2552}
2553
Johan Hedberg3119ae92013-03-05 20:37:44 +02002554void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2555{
2556 skb_queue_head_init(&req->cmd_q);
2557 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002558 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002559}
2560
2561int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2562{
2563 struct hci_dev *hdev = req->hdev;
2564 struct sk_buff *skb;
2565 unsigned long flags;
2566
2567 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2568
Andre Guedes5d73e032013-03-08 11:20:16 -03002569 /* If an error occured during request building, remove all HCI
2570 * commands queued on the HCI request queue.
2571 */
2572 if (req->err) {
2573 skb_queue_purge(&req->cmd_q);
2574 return req->err;
2575 }
2576
Johan Hedberg3119ae92013-03-05 20:37:44 +02002577 /* Do not allow empty requests */
2578 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002579 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002580
2581 skb = skb_peek_tail(&req->cmd_q);
2582 bt_cb(skb)->req.complete = complete;
2583
2584 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2585 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2586 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2587
2588 queue_work(hdev->workqueue, &hdev->cmd_work);
2589
2590 return 0;
2591}
2592
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002593static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002594 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595{
2596 int len = HCI_COMMAND_HDR_SIZE + plen;
2597 struct hci_command_hdr *hdr;
2598 struct sk_buff *skb;
2599
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002601 if (!skb)
2602 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002603
2604 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002605 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606 hdr->plen = plen;
2607
2608 if (plen)
2609 memcpy(skb_put(skb, plen), param, plen);
2610
2611 BT_DBG("skb len %d", skb->len);
2612
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002613 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002615
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002616 return skb;
2617}
2618
2619/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002620int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2621 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002622{
2623 struct sk_buff *skb;
2624
2625 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2626
2627 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2628 if (!skb) {
2629 BT_ERR("%s no memory for command", hdev->name);
2630 return -ENOMEM;
2631 }
2632
Johan Hedberg11714b32013-03-05 20:37:47 +02002633 /* Stand-alone HCI commands must be flaged as
2634 * single-command requests.
2635 */
2636 bt_cb(skb)->req.start = true;
2637
Linus Torvalds1da177e2005-04-16 15:20:36 -07002638 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002639 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640
2641 return 0;
2642}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
Johan Hedberg71c76a12013-03-05 20:37:46 +02002644/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002645void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2646 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002647{
2648 struct hci_dev *hdev = req->hdev;
2649 struct sk_buff *skb;
2650
2651 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2652
Andre Guedes34739c12013-03-08 11:20:18 -03002653 /* If an error occured during request building, there is no point in
2654 * queueing the HCI command. We can simply return.
2655 */
2656 if (req->err)
2657 return;
2658
Johan Hedberg71c76a12013-03-05 20:37:46 +02002659 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2660 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002661 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2662 hdev->name, opcode);
2663 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002664 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002665 }
2666
2667 if (skb_queue_empty(&req->cmd_q))
2668 bt_cb(skb)->req.start = true;
2669
Johan Hedberg02350a72013-04-03 21:50:29 +03002670 bt_cb(skb)->req.event = event;
2671
Johan Hedberg71c76a12013-03-05 20:37:46 +02002672 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002673}
2674
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002675void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2676 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002677{
2678 hci_req_add_ev(req, opcode, plen, param, 0);
2679}
2680
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002682void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
2684 struct hci_command_hdr *hdr;
2685
2686 if (!hdev->sent_cmd)
2687 return NULL;
2688
2689 hdr = (void *) hdev->sent_cmd->data;
2690
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002691 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 return NULL;
2693
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002694 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695
2696 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2697}
2698
2699/* Send ACL data */
2700static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2701{
2702 struct hci_acl_hdr *hdr;
2703 int len = skb->len;
2704
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002705 skb_push(skb, HCI_ACL_HDR_SIZE);
2706 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002707 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002708 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2709 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710}
2711
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002712static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002713 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002715 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 struct hci_dev *hdev = conn->hdev;
2717 struct sk_buff *list;
2718
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002719 skb->len = skb_headlen(skb);
2720 skb->data_len = 0;
2721
2722 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002723
2724 switch (hdev->dev_type) {
2725 case HCI_BREDR:
2726 hci_add_acl_hdr(skb, conn->handle, flags);
2727 break;
2728 case HCI_AMP:
2729 hci_add_acl_hdr(skb, chan->handle, flags);
2730 break;
2731 default:
2732 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2733 return;
2734 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002735
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002736 list = skb_shinfo(skb)->frag_list;
2737 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738 /* Non fragmented */
2739 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2740
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002741 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 } else {
2743 /* Fragmented */
2744 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2745
2746 skb_shinfo(skb)->frag_list = NULL;
2747
2748 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002749 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002751 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002752
2753 flags &= ~ACL_START;
2754 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 do {
2756 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002759 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002760 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
2762 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2763
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002764 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 } while (list);
2766
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002767 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002769}
2770
2771void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2772{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002773 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002774
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002775 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002776
2777 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002778
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002779 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002781 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
2784/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002785void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786{
2787 struct hci_dev *hdev = conn->hdev;
2788 struct hci_sco_hdr hdr;
2789
2790 BT_DBG("%s len %d", hdev->name, skb->len);
2791
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002792 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 hdr.dlen = skb->len;
2794
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002795 skb_push(skb, HCI_SCO_HDR_SIZE);
2796 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002797 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002800 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002801
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002803 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805
2806/* ---- HCI TX task (outgoing data) ---- */
2807
2808/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002809static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2810 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811{
2812 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002813 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002814 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002816 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002818
2819 rcu_read_lock();
2820
2821 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002822 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002824
2825 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2826 continue;
2827
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 num++;
2829
2830 if (c->sent < min) {
2831 min = c->sent;
2832 conn = c;
2833 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002834
2835 if (hci_conn_num(hdev, type) == num)
2836 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 }
2838
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002839 rcu_read_unlock();
2840
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002842 int cnt, q;
2843
2844 switch (conn->type) {
2845 case ACL_LINK:
2846 cnt = hdev->acl_cnt;
2847 break;
2848 case SCO_LINK:
2849 case ESCO_LINK:
2850 cnt = hdev->sco_cnt;
2851 break;
2852 case LE_LINK:
2853 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2854 break;
2855 default:
2856 cnt = 0;
2857 BT_ERR("Unknown link type");
2858 }
2859
2860 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 *quote = q ? q : 1;
2862 } else
2863 *quote = 0;
2864
2865 BT_DBG("conn %p quote %d", conn, *quote);
2866 return conn;
2867}
2868
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002869static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870{
2871 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002872 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873
Ville Tervobae1f5d92011-02-10 22:38:53 -03002874 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002876 rcu_read_lock();
2877
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002879 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002880 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002881 BT_ERR("%s killing stalled connection %pMR",
2882 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002883 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 }
2885 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002886
2887 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888}
2889
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002890static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2891 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002892{
2893 struct hci_conn_hash *h = &hdev->conn_hash;
2894 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002895 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002896 struct hci_conn *conn;
2897 int cnt, q, conn_num = 0;
2898
2899 BT_DBG("%s", hdev->name);
2900
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002901 rcu_read_lock();
2902
2903 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002904 struct hci_chan *tmp;
2905
2906 if (conn->type != type)
2907 continue;
2908
2909 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2910 continue;
2911
2912 conn_num++;
2913
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002914 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915 struct sk_buff *skb;
2916
2917 if (skb_queue_empty(&tmp->data_q))
2918 continue;
2919
2920 skb = skb_peek(&tmp->data_q);
2921 if (skb->priority < cur_prio)
2922 continue;
2923
2924 if (skb->priority > cur_prio) {
2925 num = 0;
2926 min = ~0;
2927 cur_prio = skb->priority;
2928 }
2929
2930 num++;
2931
2932 if (conn->sent < min) {
2933 min = conn->sent;
2934 chan = tmp;
2935 }
2936 }
2937
2938 if (hci_conn_num(hdev, type) == conn_num)
2939 break;
2940 }
2941
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002942 rcu_read_unlock();
2943
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002944 if (!chan)
2945 return NULL;
2946
2947 switch (chan->conn->type) {
2948 case ACL_LINK:
2949 cnt = hdev->acl_cnt;
2950 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002951 case AMP_LINK:
2952 cnt = hdev->block_cnt;
2953 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002954 case SCO_LINK:
2955 case ESCO_LINK:
2956 cnt = hdev->sco_cnt;
2957 break;
2958 case LE_LINK:
2959 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2960 break;
2961 default:
2962 cnt = 0;
2963 BT_ERR("Unknown link type");
2964 }
2965
2966 q = cnt / num;
2967 *quote = q ? q : 1;
2968 BT_DBG("chan %p quote %d", chan, *quote);
2969 return chan;
2970}
2971
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002972static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2973{
2974 struct hci_conn_hash *h = &hdev->conn_hash;
2975 struct hci_conn *conn;
2976 int num = 0;
2977
2978 BT_DBG("%s", hdev->name);
2979
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002980 rcu_read_lock();
2981
2982 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002983 struct hci_chan *chan;
2984
2985 if (conn->type != type)
2986 continue;
2987
2988 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2989 continue;
2990
2991 num++;
2992
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002993 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002994 struct sk_buff *skb;
2995
2996 if (chan->sent) {
2997 chan->sent = 0;
2998 continue;
2999 }
3000
3001 if (skb_queue_empty(&chan->data_q))
3002 continue;
3003
3004 skb = skb_peek(&chan->data_q);
3005 if (skb->priority >= HCI_PRIO_MAX - 1)
3006 continue;
3007
3008 skb->priority = HCI_PRIO_MAX - 1;
3009
3010 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003011 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003012 }
3013
3014 if (hci_conn_num(hdev, type) == num)
3015 break;
3016 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003017
3018 rcu_read_unlock();
3019
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003020}
3021
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003022static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3023{
3024 /* Calculate count of blocks used by this packet */
3025 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3026}
3027
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003028static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003029{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 if (!test_bit(HCI_RAW, &hdev->flags)) {
3031 /* ACL tx timeout must be longer than maximum
3032 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003033 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003034 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003035 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003037}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003038
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003039static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003040{
3041 unsigned int cnt = hdev->acl_cnt;
3042 struct hci_chan *chan;
3043 struct sk_buff *skb;
3044 int quote;
3045
3046 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003047
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003048 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003049 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003050 u32 priority = (skb_peek(&chan->data_q))->priority;
3051 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003052 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003053 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003054
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003055 /* Stop if priority has changed */
3056 if (skb->priority < priority)
3057 break;
3058
3059 skb = skb_dequeue(&chan->data_q);
3060
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003061 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003062 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003063
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 hci_send_frame(skb);
3065 hdev->acl_last_tx = jiffies;
3066
3067 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003068 chan->sent++;
3069 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003070 }
3071 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003072
3073 if (cnt != hdev->acl_cnt)
3074 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075}
3076
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003077static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003078{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003079 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003080 struct hci_chan *chan;
3081 struct sk_buff *skb;
3082 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003083 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003084
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003085 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003086
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003087 BT_DBG("%s", hdev->name);
3088
3089 if (hdev->dev_type == HCI_AMP)
3090 type = AMP_LINK;
3091 else
3092 type = ACL_LINK;
3093
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003094 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003095 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003096 u32 priority = (skb_peek(&chan->data_q))->priority;
3097 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3098 int blocks;
3099
3100 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003101 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003102
3103 /* Stop if priority has changed */
3104 if (skb->priority < priority)
3105 break;
3106
3107 skb = skb_dequeue(&chan->data_q);
3108
3109 blocks = __get_blocks(hdev, skb);
3110 if (blocks > hdev->block_cnt)
3111 return;
3112
3113 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003114 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003115
3116 hci_send_frame(skb);
3117 hdev->acl_last_tx = jiffies;
3118
3119 hdev->block_cnt -= blocks;
3120 quote -= blocks;
3121
3122 chan->sent += blocks;
3123 chan->conn->sent += blocks;
3124 }
3125 }
3126
3127 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003128 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003129}
3130
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003131static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003132{
3133 BT_DBG("%s", hdev->name);
3134
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003135 /* No ACL link over BR/EDR controller */
3136 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3137 return;
3138
3139 /* No AMP link over AMP controller */
3140 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003141 return;
3142
3143 switch (hdev->flow_ctl_mode) {
3144 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3145 hci_sched_acl_pkt(hdev);
3146 break;
3147
3148 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3149 hci_sched_acl_blk(hdev);
3150 break;
3151 }
3152}
3153
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003155static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156{
3157 struct hci_conn *conn;
3158 struct sk_buff *skb;
3159 int quote;
3160
3161 BT_DBG("%s", hdev->name);
3162
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003163 if (!hci_conn_num(hdev, SCO_LINK))
3164 return;
3165
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3167 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3168 BT_DBG("skb %p len %d", skb, skb->len);
3169 hci_send_frame(skb);
3170
3171 conn->sent++;
3172 if (conn->sent == ~0)
3173 conn->sent = 0;
3174 }
3175 }
3176}
3177
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003178static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003179{
3180 struct hci_conn *conn;
3181 struct sk_buff *skb;
3182 int quote;
3183
3184 BT_DBG("%s", hdev->name);
3185
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003186 if (!hci_conn_num(hdev, ESCO_LINK))
3187 return;
3188
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003189 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3190 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003191 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3192 BT_DBG("skb %p len %d", skb, skb->len);
3193 hci_send_frame(skb);
3194
3195 conn->sent++;
3196 if (conn->sent == ~0)
3197 conn->sent = 0;
3198 }
3199 }
3200}
3201
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003202static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003203{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003204 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003205 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003206 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003207
3208 BT_DBG("%s", hdev->name);
3209
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003210 if (!hci_conn_num(hdev, LE_LINK))
3211 return;
3212
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003213 if (!test_bit(HCI_RAW, &hdev->flags)) {
3214 /* LE tx timeout must be longer than maximum
3215 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003216 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003217 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003218 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003219 }
3220
3221 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003222 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003223 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003224 u32 priority = (skb_peek(&chan->data_q))->priority;
3225 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003226 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003227 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003228
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003229 /* Stop if priority has changed */
3230 if (skb->priority < priority)
3231 break;
3232
3233 skb = skb_dequeue(&chan->data_q);
3234
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003235 hci_send_frame(skb);
3236 hdev->le_last_tx = jiffies;
3237
3238 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003239 chan->sent++;
3240 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003241 }
3242 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003243
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003244 if (hdev->le_pkts)
3245 hdev->le_cnt = cnt;
3246 else
3247 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003248
3249 if (cnt != tmp)
3250 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003251}
3252
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003253static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003255 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256 struct sk_buff *skb;
3257
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003258 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003259 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003260
3261 /* Schedule queues and send stuff to HCI driver */
3262
3263 hci_sched_acl(hdev);
3264
3265 hci_sched_sco(hdev);
3266
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003267 hci_sched_esco(hdev);
3268
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003269 hci_sched_le(hdev);
3270
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 /* Send next queued raw (unknown type) packet */
3272 while ((skb = skb_dequeue(&hdev->raw_q)))
3273 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274}
3275
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003276/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277
3278/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003279static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280{
3281 struct hci_acl_hdr *hdr = (void *) skb->data;
3282 struct hci_conn *conn;
3283 __u16 handle, flags;
3284
3285 skb_pull(skb, HCI_ACL_HDR_SIZE);
3286
3287 handle = __le16_to_cpu(hdr->handle);
3288 flags = hci_flags(handle);
3289 handle = hci_handle(handle);
3290
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003291 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003292 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003293
3294 hdev->stat.acl_rx++;
3295
3296 hci_dev_lock(hdev);
3297 conn = hci_conn_hash_lookup_handle(hdev, handle);
3298 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003299
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003301 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003302
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003304 l2cap_recv_acldata(conn, skb, flags);
3305 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003306 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003307 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003308 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003309 }
3310
3311 kfree_skb(skb);
3312}
3313
3314/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003315static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003316{
3317 struct hci_sco_hdr *hdr = (void *) skb->data;
3318 struct hci_conn *conn;
3319 __u16 handle;
3320
3321 skb_pull(skb, HCI_SCO_HDR_SIZE);
3322
3323 handle = __le16_to_cpu(hdr->handle);
3324
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003325 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003326
3327 hdev->stat.sco_rx++;
3328
3329 hci_dev_lock(hdev);
3330 conn = hci_conn_hash_lookup_handle(hdev, handle);
3331 hci_dev_unlock(hdev);
3332
3333 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003335 sco_recv_scodata(conn, skb);
3336 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003338 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003339 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340 }
3341
3342 kfree_skb(skb);
3343}
3344
Johan Hedberg9238f362013-03-05 20:37:48 +02003345static bool hci_req_is_complete(struct hci_dev *hdev)
3346{
3347 struct sk_buff *skb;
3348
3349 skb = skb_peek(&hdev->cmd_q);
3350 if (!skb)
3351 return true;
3352
3353 return bt_cb(skb)->req.start;
3354}
3355
Johan Hedberg42c6b122013-03-05 20:37:49 +02003356static void hci_resend_last(struct hci_dev *hdev)
3357{
3358 struct hci_command_hdr *sent;
3359 struct sk_buff *skb;
3360 u16 opcode;
3361
3362 if (!hdev->sent_cmd)
3363 return;
3364
3365 sent = (void *) hdev->sent_cmd->data;
3366 opcode = __le16_to_cpu(sent->opcode);
3367 if (opcode == HCI_OP_RESET)
3368 return;
3369
3370 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3371 if (!skb)
3372 return;
3373
3374 skb_queue_head(&hdev->cmd_q, skb);
3375 queue_work(hdev->workqueue, &hdev->cmd_work);
3376}
3377
Johan Hedberg9238f362013-03-05 20:37:48 +02003378void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3379{
3380 hci_req_complete_t req_complete = NULL;
3381 struct sk_buff *skb;
3382 unsigned long flags;
3383
3384 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3385
Johan Hedberg42c6b122013-03-05 20:37:49 +02003386 /* If the completed command doesn't match the last one that was
3387 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003388 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003389 if (!hci_sent_cmd_data(hdev, opcode)) {
3390 /* Some CSR based controllers generate a spontaneous
3391 * reset complete event during init and any pending
3392 * command will never be completed. In such a case we
3393 * need to resend whatever was the last sent
3394 * command.
3395 */
3396 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3397 hci_resend_last(hdev);
3398
Johan Hedberg9238f362013-03-05 20:37:48 +02003399 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003400 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003401
3402 /* If the command succeeded and there's still more commands in
3403 * this request the request is not yet complete.
3404 */
3405 if (!status && !hci_req_is_complete(hdev))
3406 return;
3407
3408 /* If this was the last command in a request the complete
3409 * callback would be found in hdev->sent_cmd instead of the
3410 * command queue (hdev->cmd_q).
3411 */
3412 if (hdev->sent_cmd) {
3413 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003414
3415 if (req_complete) {
3416 /* We must set the complete callback to NULL to
3417 * avoid calling the callback more than once if
3418 * this function gets called again.
3419 */
3420 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3421
Johan Hedberg9238f362013-03-05 20:37:48 +02003422 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003423 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003424 }
3425
3426 /* Remove all pending commands belonging to this request */
3427 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3428 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3429 if (bt_cb(skb)->req.start) {
3430 __skb_queue_head(&hdev->cmd_q, skb);
3431 break;
3432 }
3433
3434 req_complete = bt_cb(skb)->req.complete;
3435 kfree_skb(skb);
3436 }
3437 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3438
3439call_complete:
3440 if (req_complete)
3441 req_complete(hdev, status);
3442}
3443
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003444static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003446 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003447 struct sk_buff *skb;
3448
3449 BT_DBG("%s", hdev->name);
3450
Linus Torvalds1da177e2005-04-16 15:20:36 -07003451 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003452 /* Send copy to monitor */
3453 hci_send_to_monitor(hdev, skb);
3454
Linus Torvalds1da177e2005-04-16 15:20:36 -07003455 if (atomic_read(&hdev->promisc)) {
3456 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003457 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 }
3459
3460 if (test_bit(HCI_RAW, &hdev->flags)) {
3461 kfree_skb(skb);
3462 continue;
3463 }
3464
3465 if (test_bit(HCI_INIT, &hdev->flags)) {
3466 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003467 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 case HCI_ACLDATA_PKT:
3469 case HCI_SCODATA_PKT:
3470 kfree_skb(skb);
3471 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003472 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 }
3474
3475 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003476 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003478 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003479 hci_event_packet(hdev, skb);
3480 break;
3481
3482 case HCI_ACLDATA_PKT:
3483 BT_DBG("%s ACL data packet", hdev->name);
3484 hci_acldata_packet(hdev, skb);
3485 break;
3486
3487 case HCI_SCODATA_PKT:
3488 BT_DBG("%s SCO data packet", hdev->name);
3489 hci_scodata_packet(hdev, skb);
3490 break;
3491
3492 default:
3493 kfree_skb(skb);
3494 break;
3495 }
3496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003497}
3498
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003499static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003501 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502 struct sk_buff *skb;
3503
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003504 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3505 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003508 if (atomic_read(&hdev->cmd_cnt)) {
3509 skb = skb_dequeue(&hdev->cmd_q);
3510 if (!skb)
3511 return;
3512
Wei Yongjun7585b972009-02-25 18:29:52 +08003513 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003515 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3516 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517 atomic_dec(&hdev->cmd_cnt);
3518 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003519 if (test_bit(HCI_RESET, &hdev->flags))
3520 del_timer(&hdev->cmd_timer);
3521 else
3522 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003523 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 } else {
3525 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003526 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 }
3528 }
3529}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003530
Andre Guedes31f79562012-04-24 21:02:53 -03003531u8 bdaddr_to_le(u8 bdaddr_type)
3532{
3533 switch (bdaddr_type) {
3534 case BDADDR_LE_PUBLIC:
3535 return ADDR_LE_DEV_PUBLIC;
3536
3537 default:
3538 /* Fallback to LE Random address type */
3539 return ADDR_LE_DEV_RANDOM;
3540 }
3541}