blob: 1b66547a3ca66281ee5b012ed1bc542f57e9b910 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
618 *
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700623 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
626
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
631 }
632
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500636 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500638 hci_update_ad(req);
639 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300640
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
648 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
651static int __hci_init(struct hci_dev *hdev)
652{
653 int err;
654
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
658
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
662 */
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
665
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
669
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 __u8 scan = opt;
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 __u8 auth = opt;
686
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 __u8 encrypt = opt;
696
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200699 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200704{
705 __le16 policy = cpu_to_le16(opt);
706
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200708
709 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200711}
712
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900713/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 * Device is held on return. */
715struct hci_dev *hci_dev_get(int index)
716{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200717 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 BT_DBG("%d", index);
720
721 if (index < 0)
722 return NULL;
723
724 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200725 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
729 }
730 }
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
733}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200736
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200737bool hci_discovery_active(struct hci_dev *hdev)
738{
739 struct discovery_state *discov = &hdev->discovery;
740
Andre Guedes6fbe1952012-02-03 17:47:58 -0300741 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300743 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200744 return true;
745
Andre Guedes6fbe1952012-02-03 17:47:58 -0300746 default:
747 return false;
748 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200749}
750
Johan Hedbergff9ef572012-01-04 14:23:45 +0200751void hci_discovery_set_state(struct hci_dev *hdev, int state)
752{
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755 if (hdev->discovery.state == state)
756 return;
757
758 switch (state) {
759 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200762 break;
763 case DISCOVERY_STARTING:
764 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300765 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200766 mgmt_discovering(hdev, 1);
767 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200768 case DISCOVERY_RESOLVING:
769 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200770 case DISCOVERY_STOPPING:
771 break;
772 }
773
774 hdev->discovery.state = state;
775}
776
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300777void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Johan Hedberg30883512012-01-04 14:16:21 +0200779 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200780 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300791struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
Johan Hedberg30883512012-01-04 14:16:21 +0200794 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 struct inquiry_entry *e;
796
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300797 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200801 return e;
802 }
803
804 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
806
Johan Hedberg561aafb2012-01-04 13:31:59 +0200807struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300808 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200809{
Johan Hedberg30883512012-01-04 14:16:21 +0200810 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200811 struct inquiry_entry *e;
812
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300813 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200814
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
818 }
819
820 return NULL;
821}
822
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200823struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300824 bdaddr_t *bdaddr,
825 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200826{
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
829
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200831
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
837 }
838
839 return NULL;
840}
841
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300843 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200844{
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
848
849 list_del(&ie->list);
850
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300853 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200854 break;
855 pos = &p->list;
856 }
857
858 list_add(&ie->list, pos);
859}
860
Johan Hedberg31754052012-01-04 13:39:52 +0200861bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300862 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Johan Hedberg30883512012-01-04 14:16:21 +0200864 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Szymon Janc2b2fec42012-11-20 11:38:54 +0100869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200871 if (ssp)
872 *ssp = data->ssp_mode;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200875 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
878
Johan Hedberga3d4e202012-01-09 00:53:02 +0200879 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300880 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
883 }
884
Johan Hedberg561aafb2012-01-04 13:31:59 +0200885 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200886 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887
Johan Hedberg561aafb2012-01-04 13:31:59 +0200888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200891 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200892
893 list_add(&ie->all, &cache->all);
894
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
900 }
901
902update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300904 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200912
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
915
916 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
919static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920{
Johan Hedberg30883512012-01-04 14:16:21 +0200921 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
925
Johan Hedberg561aafb2012-01-04 13:31:59 +0200926 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200928
929 if (copied >= num)
930 break;
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200940 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
942
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
945}
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct hci_cp_inquiry cp;
952
953 BT_DBG("%s", hdev->name);
954
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
957
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Andre Guedes3e13fa12013-03-27 20:04:56 -0300965static int wait_inquiry(void *word)
966{
967 schedule();
968 return signal_pending(current);
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971int hci_inquiry(void __user *arg)
972{
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
979
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
982
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -ENODEV;
986
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300987 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900988 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300989 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300990 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 do_inquiry = 1;
992 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300993 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Marcel Holtmann04837f62006-07-03 10:02:33 +0200995 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200996
997 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200998 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 if (err < 0)
1001 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001002
1003 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004 * cleared). If it is interrupted by a signal, return -EINTR.
1005 */
1006 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007 TASK_INTERRUPTIBLE))
1008 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001011 /* for unlimited number of responses we will use buffer with
1012 * 255 entries
1013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1015
1016 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017 * copy it to the user space.
1018 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001019 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001020 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 err = -ENOMEM;
1022 goto done;
1023 }
1024
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001025 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001027 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 BT_DBG("num_rsp %d", ir.num_rsp);
1030
1031 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032 ptr += sizeof(ir);
1033 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001034 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001036 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 err = -EFAULT;
1038
1039 kfree(buf);
1040
1041done:
1042 hci_dev_put(hdev);
1043 return err;
1044}
1045
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001046static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1047{
1048 u8 ad_len = 0, flags = 0;
1049 size_t name_len;
1050
1051 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052 flags |= LE_AD_GENERAL;
1053
1054 if (!lmp_bredr_capable(hdev))
1055 flags |= LE_AD_NO_BREDR;
1056
1057 if (lmp_le_br_capable(hdev))
1058 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1059
1060 if (lmp_host_le_br_capable(hdev))
1061 flags |= LE_AD_SIM_LE_BREDR_HOST;
1062
1063 if (flags) {
1064 BT_DBG("adv flags 0x%02x", flags);
1065
1066 ptr[0] = 2;
1067 ptr[1] = EIR_FLAGS;
1068 ptr[2] = flags;
1069
1070 ad_len += 3;
1071 ptr += 3;
1072 }
1073
1074 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075 ptr[0] = 2;
1076 ptr[1] = EIR_TX_POWER;
1077 ptr[2] = (u8) hdev->adv_tx_power;
1078
1079 ad_len += 3;
1080 ptr += 3;
1081 }
1082
1083 name_len = strlen(hdev->dev_name);
1084 if (name_len > 0) {
1085 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1086
1087 if (name_len > max_len) {
1088 name_len = max_len;
1089 ptr[1] = EIR_NAME_SHORT;
1090 } else
1091 ptr[1] = EIR_NAME_COMPLETE;
1092
1093 ptr[0] = name_len + 1;
1094
1095 memcpy(ptr + 2, hdev->dev_name, name_len);
1096
1097 ad_len += (name_len + 2);
1098 ptr += (name_len + 2);
1099 }
1100
1101 return ad_len;
1102}
1103
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001104void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001105{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001106 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001107 struct hci_cp_le_set_adv_data cp;
1108 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001110 if (!lmp_le_capable(hdev))
1111 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112
1113 memset(&cp, 0, sizeof(cp));
1114
1115 len = create_ad(hdev, cp.data);
1116
1117 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001118 memcmp(cp.data, hdev->adv_data, len) == 0)
1119 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001120
1121 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122 hdev->adv_data_len = len;
1123
1124 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001126 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001127}
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129/* ---- HCI ioctl helpers ---- */
1130
1131int hci_dev_open(__u16 dev)
1132{
1133 struct hci_dev *hdev;
1134 int ret = 0;
1135
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001136 hdev = hci_dev_get(dev);
1137 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 return -ENODEV;
1139
1140 BT_DBG("%s %p", hdev->name, hdev);
1141
1142 hci_req_lock(hdev);
1143
Johan Hovold94324962012-03-15 14:48:41 +01001144 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145 ret = -ENODEV;
1146 goto done;
1147 }
1148
Johan Hedbergbf543032013-09-13 08:58:18 +03001149 /* Check for rfkill but allow the HCI setup stage to proceed
1150 * (which in itself doesn't cause any RF activity).
1151 */
1152 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) &&
1153 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001154 ret = -ERFKILL;
1155 goto done;
1156 }
1157
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 if (test_bit(HCI_UP, &hdev->flags)) {
1159 ret = -EALREADY;
1160 goto done;
1161 }
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 if (hdev->open(hdev)) {
1164 ret = -EIO;
1165 goto done;
1166 }
1167
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001168 atomic_set(&hdev->cmd_cnt, 1);
1169 set_bit(HCI_INIT, &hdev->flags);
1170
1171 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1172 ret = hdev->setup(hdev);
1173
1174 if (!ret) {
1175 /* Treat all non BR/EDR controllers as raw devices if
1176 * enable_hs is not set.
1177 */
1178 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1179 set_bit(HCI_RAW, &hdev->flags);
1180
1181 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1182 set_bit(HCI_RAW, &hdev->flags);
1183
1184 if (!test_bit(HCI_RAW, &hdev->flags))
1185 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 }
1187
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001188 clear_bit(HCI_INIT, &hdev->flags);
1189
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190 if (!ret) {
1191 hci_dev_hold(hdev);
1192 set_bit(HCI_UP, &hdev->flags);
1193 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001194 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1195 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001196 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001197 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001198 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001199 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001200 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001202 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001203 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001204 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205
1206 skb_queue_purge(&hdev->cmd_q);
1207 skb_queue_purge(&hdev->rx_q);
1208
1209 if (hdev->flush)
1210 hdev->flush(hdev);
1211
1212 if (hdev->sent_cmd) {
1213 kfree_skb(hdev->sent_cmd);
1214 hdev->sent_cmd = NULL;
1215 }
1216
1217 hdev->close(hdev);
1218 hdev->flags = 0;
1219 }
1220
1221done:
1222 hci_req_unlock(hdev);
1223 hci_dev_put(hdev);
1224 return ret;
1225}
1226
1227static int hci_dev_do_close(struct hci_dev *hdev)
1228{
1229 BT_DBG("%s %p", hdev->name, hdev);
1230
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001231 cancel_delayed_work(&hdev->power_off);
1232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 hci_req_cancel(hdev, ENODEV);
1234 hci_req_lock(hdev);
1235
1236 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001237 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 hci_req_unlock(hdev);
1239 return 0;
1240 }
1241
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001242 /* Flush RX and TX works */
1243 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001244 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001246 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001247 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001248 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001249 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001250 }
1251
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001252 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001253 cancel_delayed_work(&hdev->service_cache);
1254
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001255 cancel_delayed_work_sync(&hdev->le_scan_disable);
1256
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001257 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001258 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001259 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001260 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
1262 hci_notify(hdev, HCI_DEV_DOWN);
1263
1264 if (hdev->flush)
1265 hdev->flush(hdev);
1266
1267 /* Reset device */
1268 skb_queue_purge(&hdev->cmd_q);
1269 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001270 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001271 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001273 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001274 clear_bit(HCI_INIT, &hdev->flags);
1275 }
1276
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001277 /* flush cmd work */
1278 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279
1280 /* Drop queues */
1281 skb_queue_purge(&hdev->rx_q);
1282 skb_queue_purge(&hdev->cmd_q);
1283 skb_queue_purge(&hdev->raw_q);
1284
1285 /* Drop last sent command */
1286 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001287 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 kfree_skb(hdev->sent_cmd);
1289 hdev->sent_cmd = NULL;
1290 }
1291
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001292 kfree_skb(hdev->recv_evt);
1293 hdev->recv_evt = NULL;
1294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 /* After this point our queues are empty
1296 * and no tasks are scheduled. */
1297 hdev->close(hdev);
1298
Johan Hedberg35b973c2013-03-15 17:06:59 -05001299 /* Clear flags */
1300 hdev->flags = 0;
1301 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1302
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001303 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1304 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001305 hci_dev_lock(hdev);
1306 mgmt_powered(hdev, 0);
1307 hci_dev_unlock(hdev);
1308 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001309
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001310 /* Controller radio is available but is currently powered down */
1311 hdev->amp_status = 0;
1312
Johan Hedberge59fda82012-02-22 18:11:53 +02001313 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001314 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001315
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 hci_req_unlock(hdev);
1317
1318 hci_dev_put(hdev);
1319 return 0;
1320}
1321
1322int hci_dev_close(__u16 dev)
1323{
1324 struct hci_dev *hdev;
1325 int err;
1326
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001327 hdev = hci_dev_get(dev);
1328 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001330
1331 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1332 cancel_delayed_work(&hdev->power_off);
1333
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001335
Linus Torvalds1da177e2005-04-16 15:20:36 -07001336 hci_dev_put(hdev);
1337 return err;
1338}
1339
1340int hci_dev_reset(__u16 dev)
1341{
1342 struct hci_dev *hdev;
1343 int ret = 0;
1344
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 hdev = hci_dev_get(dev);
1346 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 return -ENODEV;
1348
1349 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
1351 if (!test_bit(HCI_UP, &hdev->flags))
1352 goto done;
1353
1354 /* Drop queues */
1355 skb_queue_purge(&hdev->rx_q);
1356 skb_queue_purge(&hdev->cmd_q);
1357
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001358 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001359 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001361 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362
1363 if (hdev->flush)
1364 hdev->flush(hdev);
1365
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001366 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001367 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368
1369 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001370 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371
1372done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 hci_req_unlock(hdev);
1374 hci_dev_put(hdev);
1375 return ret;
1376}
1377
1378int hci_dev_reset_stat(__u16 dev)
1379{
1380 struct hci_dev *hdev;
1381 int ret = 0;
1382
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001383 hdev = hci_dev_get(dev);
1384 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 return -ENODEV;
1386
1387 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1388
1389 hci_dev_put(hdev);
1390
1391 return ret;
1392}
1393
1394int hci_dev_cmd(unsigned int cmd, void __user *arg)
1395{
1396 struct hci_dev *hdev;
1397 struct hci_dev_req dr;
1398 int err = 0;
1399
1400 if (copy_from_user(&dr, arg, sizeof(dr)))
1401 return -EFAULT;
1402
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001403 hdev = hci_dev_get(dr.dev_id);
1404 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 return -ENODEV;
1406
1407 switch (cmd) {
1408 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001409 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1410 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001411 break;
1412
1413 case HCISETENCRYPT:
1414 if (!lmp_encrypt_capable(hdev)) {
1415 err = -EOPNOTSUPP;
1416 break;
1417 }
1418
1419 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1420 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001421 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1422 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 if (err)
1424 break;
1425 }
1426
Johan Hedberg01178cd2013-03-05 20:37:41 +02001427 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1428 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 break;
1430
1431 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001432 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1433 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 break;
1435
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001436 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001437 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1438 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001439 break;
1440
1441 case HCISETLINKMODE:
1442 hdev->link_mode = ((__u16) dr.dev_opt) &
1443 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1444 break;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 case HCISETPTYPE:
1447 hdev->pkt_type = (__u16) dr.dev_opt;
1448 break;
1449
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001451 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1452 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 break;
1454
1455 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001456 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1457 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 break;
1459
1460 default:
1461 err = -EINVAL;
1462 break;
1463 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001464
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465 hci_dev_put(hdev);
1466 return err;
1467}
1468
1469int hci_get_dev_list(void __user *arg)
1470{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001471 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472 struct hci_dev_list_req *dl;
1473 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 int n = 0, size, err;
1475 __u16 dev_num;
1476
1477 if (get_user(dev_num, (__u16 __user *) arg))
1478 return -EFAULT;
1479
1480 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1481 return -EINVAL;
1482
1483 size = sizeof(*dl) + dev_num * sizeof(*dr);
1484
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001485 dl = kzalloc(size, GFP_KERNEL);
1486 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 return -ENOMEM;
1488
1489 dr = dl->dev_req;
1490
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001491 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001492 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001493 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001494 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001495
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001496 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1497 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001498
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 (dr + n)->dev_id = hdev->id;
1500 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001501
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 if (++n >= dev_num)
1503 break;
1504 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001505 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
1507 dl->dev_num = n;
1508 size = sizeof(*dl) + n * sizeof(*dr);
1509
1510 err = copy_to_user(arg, dl, size);
1511 kfree(dl);
1512
1513 return err ? -EFAULT : 0;
1514}
1515
1516int hci_get_dev_info(void __user *arg)
1517{
1518 struct hci_dev *hdev;
1519 struct hci_dev_info di;
1520 int err = 0;
1521
1522 if (copy_from_user(&di, arg, sizeof(di)))
1523 return -EFAULT;
1524
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001525 hdev = hci_dev_get(di.dev_id);
1526 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001527 return -ENODEV;
1528
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001529 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001530 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001531
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001532 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1533 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 strcpy(di.name, hdev->name);
1536 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001537 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001538 di.flags = hdev->flags;
1539 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001540 if (lmp_bredr_capable(hdev)) {
1541 di.acl_mtu = hdev->acl_mtu;
1542 di.acl_pkts = hdev->acl_pkts;
1543 di.sco_mtu = hdev->sco_mtu;
1544 di.sco_pkts = hdev->sco_pkts;
1545 } else {
1546 di.acl_mtu = hdev->le_mtu;
1547 di.acl_pkts = hdev->le_pkts;
1548 di.sco_mtu = 0;
1549 di.sco_pkts = 0;
1550 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 di.link_policy = hdev->link_policy;
1552 di.link_mode = hdev->link_mode;
1553
1554 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1555 memcpy(&di.features, &hdev->features, sizeof(di.features));
1556
1557 if (copy_to_user(arg, &di, sizeof(di)))
1558 err = -EFAULT;
1559
1560 hci_dev_put(hdev);
1561
1562 return err;
1563}
1564
1565/* ---- Interface to HCI drivers ---- */
1566
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001567static int hci_rfkill_set_block(void *data, bool blocked)
1568{
1569 struct hci_dev *hdev = data;
1570
1571 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1572
Johan Hedberg5e130362013-09-13 08:58:17 +03001573 if (blocked) {
1574 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001575 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1576 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001577 } else {
1578 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1579}
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001580
1581 return 0;
1582}
1583
1584static const struct rfkill_ops hci_rfkill_ops = {
1585 .set_block = hci_rfkill_set_block,
1586};
1587
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001588static void hci_power_on(struct work_struct *work)
1589{
1590 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001591 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001592
1593 BT_DBG("%s", hdev->name);
1594
Johan Hedberg96570ff2013-05-29 09:51:29 +03001595 err = hci_dev_open(hdev->id);
1596 if (err < 0) {
1597 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001598 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001599 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001600
Johan Hedbergbf543032013-09-13 08:58:18 +03001601 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1602 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1603 hci_dev_do_close(hdev);
1604 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001605 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1606 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001607 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001608
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001609 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001610 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001611}
1612
1613static void hci_power_off(struct work_struct *work)
1614{
Johan Hedberg32435532011-11-07 22:16:04 +02001615 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001616 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001617
1618 BT_DBG("%s", hdev->name);
1619
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001620 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001621}
1622
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001623static void hci_discov_off(struct work_struct *work)
1624{
1625 struct hci_dev *hdev;
1626 u8 scan = SCAN_PAGE;
1627
1628 hdev = container_of(work, struct hci_dev, discov_off.work);
1629
1630 BT_DBG("%s", hdev->name);
1631
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001632 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001633
1634 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1635
1636 hdev->discov_timeout = 0;
1637
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001638 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001639}
1640
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001641int hci_uuids_clear(struct hci_dev *hdev)
1642{
Johan Hedberg48210022013-01-27 00:31:28 +02001643 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001644
Johan Hedberg48210022013-01-27 00:31:28 +02001645 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1646 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001647 kfree(uuid);
1648 }
1649
1650 return 0;
1651}
1652
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001653int hci_link_keys_clear(struct hci_dev *hdev)
1654{
1655 struct list_head *p, *n;
1656
1657 list_for_each_safe(p, n, &hdev->link_keys) {
1658 struct link_key *key;
1659
1660 key = list_entry(p, struct link_key, list);
1661
1662 list_del(p);
1663 kfree(key);
1664 }
1665
1666 return 0;
1667}
1668
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001669int hci_smp_ltks_clear(struct hci_dev *hdev)
1670{
1671 struct smp_ltk *k, *tmp;
1672
1673 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1674 list_del(&k->list);
1675 kfree(k);
1676 }
1677
1678 return 0;
1679}
1680
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001681struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1682{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001683 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001684
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001685 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001686 if (bacmp(bdaddr, &k->bdaddr) == 0)
1687 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001688
1689 return NULL;
1690}
1691
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301692static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001693 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001694{
1695 /* Legacy key */
1696 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301697 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001698
1699 /* Debug keys are insecure so don't store them persistently */
1700 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301701 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001702
1703 /* Changed combination key and there's no previous one */
1704 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301705 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001706
1707 /* Security mode 3 case */
1708 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301709 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001710
1711 /* Neither local nor remote side had no-bonding as requirement */
1712 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301713 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001714
1715 /* Local side had dedicated bonding as requirement */
1716 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301717 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001718
1719 /* Remote side had dedicated bonding as requirement */
1720 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301721 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001722
1723 /* If none of the above criteria match, then don't store the key
1724 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301725 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001726}
1727
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001728struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001729{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001730 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001731
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001732 list_for_each_entry(k, &hdev->long_term_keys, list) {
1733 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001734 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001735 continue;
1736
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001737 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001738 }
1739
1740 return NULL;
1741}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001742
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001743struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001744 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001745{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001746 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001747
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001748 list_for_each_entry(k, &hdev->long_term_keys, list)
1749 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001750 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001751 return k;
1752
1753 return NULL;
1754}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001755
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001756int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001757 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001758{
1759 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301760 u8 old_key_type;
1761 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001762
1763 old_key = hci_find_link_key(hdev, bdaddr);
1764 if (old_key) {
1765 old_key_type = old_key->type;
1766 key = old_key;
1767 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001768 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001769 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1770 if (!key)
1771 return -ENOMEM;
1772 list_add(&key->list, &hdev->link_keys);
1773 }
1774
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001775 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001776
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001777 /* Some buggy controller combinations generate a changed
1778 * combination key for legacy pairing even when there's no
1779 * previous key */
1780 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001781 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001782 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001783 if (conn)
1784 conn->key_type = type;
1785 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001786
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001787 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001788 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001789 key->pin_len = pin_len;
1790
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001791 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001792 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001793 else
1794 key->type = type;
1795
Johan Hedberg4df378a2011-04-28 11:29:03 -07001796 if (!new_key)
1797 return 0;
1798
1799 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1800
Johan Hedberg744cf192011-11-08 20:40:14 +02001801 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001802
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301803 if (conn)
1804 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001805
1806 return 0;
1807}
1808
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001809int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001810 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001811 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001812{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001813 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001814
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001815 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1816 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001817
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001818 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1819 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001820 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001821 else {
1822 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001823 if (!key)
1824 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001825 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001826 }
1827
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001828 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001829 key->bdaddr_type = addr_type;
1830 memcpy(key->val, tk, sizeof(key->val));
1831 key->authenticated = authenticated;
1832 key->ediv = ediv;
1833 key->enc_size = enc_size;
1834 key->type = type;
1835 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001836
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001837 if (!new_key)
1838 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001839
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001840 if (type & HCI_SMP_LTK)
1841 mgmt_new_ltk(hdev, key, 1);
1842
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001843 return 0;
1844}
1845
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001846int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1847{
1848 struct link_key *key;
1849
1850 key = hci_find_link_key(hdev, bdaddr);
1851 if (!key)
1852 return -ENOENT;
1853
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001854 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001855
1856 list_del(&key->list);
1857 kfree(key);
1858
1859 return 0;
1860}
1861
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001862int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1863{
1864 struct smp_ltk *k, *tmp;
1865
1866 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1867 if (bacmp(bdaddr, &k->bdaddr))
1868 continue;
1869
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001870 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001871
1872 list_del(&k->list);
1873 kfree(k);
1874 }
1875
1876 return 0;
1877}
1878
Ville Tervo6bd32322011-02-16 16:32:41 +02001879/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001880static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001881{
1882 struct hci_dev *hdev = (void *) arg;
1883
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001884 if (hdev->sent_cmd) {
1885 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1886 u16 opcode = __le16_to_cpu(sent->opcode);
1887
1888 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1889 } else {
1890 BT_ERR("%s command tx timeout", hdev->name);
1891 }
1892
Ville Tervo6bd32322011-02-16 16:32:41 +02001893 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001894 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001895}
1896
Szymon Janc2763eda2011-03-22 13:12:22 +01001897struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001898 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001899{
1900 struct oob_data *data;
1901
1902 list_for_each_entry(data, &hdev->remote_oob_data, list)
1903 if (bacmp(bdaddr, &data->bdaddr) == 0)
1904 return data;
1905
1906 return NULL;
1907}
1908
1909int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1910{
1911 struct oob_data *data;
1912
1913 data = hci_find_remote_oob_data(hdev, bdaddr);
1914 if (!data)
1915 return -ENOENT;
1916
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001917 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001918
1919 list_del(&data->list);
1920 kfree(data);
1921
1922 return 0;
1923}
1924
1925int hci_remote_oob_data_clear(struct hci_dev *hdev)
1926{
1927 struct oob_data *data, *n;
1928
1929 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1930 list_del(&data->list);
1931 kfree(data);
1932 }
1933
1934 return 0;
1935}
1936
1937int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001938 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001939{
1940 struct oob_data *data;
1941
1942 data = hci_find_remote_oob_data(hdev, bdaddr);
1943
1944 if (!data) {
1945 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1946 if (!data)
1947 return -ENOMEM;
1948
1949 bacpy(&data->bdaddr, bdaddr);
1950 list_add(&data->list, &hdev->remote_oob_data);
1951 }
1952
1953 memcpy(data->hash, hash, sizeof(data->hash));
1954 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1955
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001956 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001957
1958 return 0;
1959}
1960
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001961struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001962{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001963 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001964
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001965 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001966 if (bacmp(bdaddr, &b->bdaddr) == 0)
1967 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001968
1969 return NULL;
1970}
1971
1972int hci_blacklist_clear(struct hci_dev *hdev)
1973{
1974 struct list_head *p, *n;
1975
1976 list_for_each_safe(p, n, &hdev->blacklist) {
1977 struct bdaddr_list *b;
1978
1979 b = list_entry(p, struct bdaddr_list, list);
1980
1981 list_del(p);
1982 kfree(b);
1983 }
1984
1985 return 0;
1986}
1987
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001988int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001989{
1990 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001991
1992 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1993 return -EBADF;
1994
Antti Julku5e762442011-08-25 16:48:02 +03001995 if (hci_blacklist_lookup(hdev, bdaddr))
1996 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001997
1998 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001999 if (!entry)
2000 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002001
2002 bacpy(&entry->bdaddr, bdaddr);
2003
2004 list_add(&entry->list, &hdev->blacklist);
2005
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002006 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002007}
2008
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002009int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002010{
2011 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002012
Szymon Janc1ec918c2011-11-16 09:32:21 +01002013 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002014 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002015
2016 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002017 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002018 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002019
2020 list_del(&entry->list);
2021 kfree(entry);
2022
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002023 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002024}
2025
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002026static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002027{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002028 if (status) {
2029 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002030
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002031 hci_dev_lock(hdev);
2032 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2033 hci_dev_unlock(hdev);
2034 return;
2035 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002036}
2037
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002038static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002039{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002040 /* General inquiry access code (GIAC) */
2041 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2042 struct hci_request req;
2043 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002044 int err;
2045
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002046 if (status) {
2047 BT_ERR("Failed to disable LE scanning: status %d", status);
2048 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002049 }
2050
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002051 switch (hdev->discovery.type) {
2052 case DISCOV_TYPE_LE:
2053 hci_dev_lock(hdev);
2054 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2055 hci_dev_unlock(hdev);
2056 break;
2057
2058 case DISCOV_TYPE_INTERLEAVED:
2059 hci_req_init(&req, hdev);
2060
2061 memset(&cp, 0, sizeof(cp));
2062 memcpy(&cp.lap, lap, sizeof(cp.lap));
2063 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2064 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2065
2066 hci_dev_lock(hdev);
2067
2068 hci_inquiry_cache_flush(hdev);
2069
2070 err = hci_req_run(&req, inquiry_complete);
2071 if (err) {
2072 BT_ERR("Inquiry request failed: err %d", err);
2073 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2074 }
2075
2076 hci_dev_unlock(hdev);
2077 break;
2078 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002079}
2080
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002081static void le_scan_disable_work(struct work_struct *work)
2082{
2083 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002084 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002085 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002086 struct hci_request req;
2087 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002088
2089 BT_DBG("%s", hdev->name);
2090
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002091 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002092
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002093 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002094 cp.enable = LE_SCAN_DISABLE;
2095 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002096
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002097 err = hci_req_run(&req, le_scan_disable_work_complete);
2098 if (err)
2099 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002100}
2101
David Herrmann9be0dab2012-04-22 14:39:57 +02002102/* Alloc HCI device */
2103struct hci_dev *hci_alloc_dev(void)
2104{
2105 struct hci_dev *hdev;
2106
2107 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2108 if (!hdev)
2109 return NULL;
2110
David Herrmannb1b813d2012-04-22 14:39:58 +02002111 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2112 hdev->esco_type = (ESCO_HV1);
2113 hdev->link_mode = (HCI_LM_ACCEPT);
2114 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002115 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2116 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002117
David Herrmannb1b813d2012-04-22 14:39:58 +02002118 hdev->sniff_max_interval = 800;
2119 hdev->sniff_min_interval = 80;
2120
2121 mutex_init(&hdev->lock);
2122 mutex_init(&hdev->req_lock);
2123
2124 INIT_LIST_HEAD(&hdev->mgmt_pending);
2125 INIT_LIST_HEAD(&hdev->blacklist);
2126 INIT_LIST_HEAD(&hdev->uuids);
2127 INIT_LIST_HEAD(&hdev->link_keys);
2128 INIT_LIST_HEAD(&hdev->long_term_keys);
2129 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002130 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002131
2132 INIT_WORK(&hdev->rx_work, hci_rx_work);
2133 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2134 INIT_WORK(&hdev->tx_work, hci_tx_work);
2135 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002136
David Herrmannb1b813d2012-04-22 14:39:58 +02002137 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2138 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2139 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2140
David Herrmannb1b813d2012-04-22 14:39:58 +02002141 skb_queue_head_init(&hdev->rx_q);
2142 skb_queue_head_init(&hdev->cmd_q);
2143 skb_queue_head_init(&hdev->raw_q);
2144
2145 init_waitqueue_head(&hdev->req_wait_q);
2146
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002147 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002148
David Herrmannb1b813d2012-04-22 14:39:58 +02002149 hci_init_sysfs(hdev);
2150 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002151
2152 return hdev;
2153}
2154EXPORT_SYMBOL(hci_alloc_dev);
2155
2156/* Free HCI device */
2157void hci_free_dev(struct hci_dev *hdev)
2158{
David Herrmann9be0dab2012-04-22 14:39:57 +02002159 /* will free via device release */
2160 put_device(&hdev->dev);
2161}
2162EXPORT_SYMBOL(hci_free_dev);
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164/* Register HCI device */
2165int hci_register_dev(struct hci_dev *hdev)
2166{
David Herrmannb1b813d2012-04-22 14:39:58 +02002167 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
David Herrmann010666a2012-01-07 15:47:07 +01002169 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 return -EINVAL;
2171
Mat Martineau08add512011-11-02 16:18:36 -07002172 /* Do not allow HCI_AMP devices to register at index 0,
2173 * so the index can be used as the AMP controller ID.
2174 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002175 switch (hdev->dev_type) {
2176 case HCI_BREDR:
2177 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2178 break;
2179 case HCI_AMP:
2180 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2181 break;
2182 default:
2183 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002185
Sasha Levin3df92b32012-05-27 22:36:56 +02002186 if (id < 0)
2187 return id;
2188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 sprintf(hdev->name, "hci%d", id);
2190 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002191
2192 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2193
Kees Cookd8537542013-07-03 15:04:57 -07002194 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2195 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002196 if (!hdev->workqueue) {
2197 error = -ENOMEM;
2198 goto err;
2199 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002200
Kees Cookd8537542013-07-03 15:04:57 -07002201 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2202 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002203 if (!hdev->req_workqueue) {
2204 destroy_workqueue(hdev->workqueue);
2205 error = -ENOMEM;
2206 goto err;
2207 }
2208
David Herrmann33ca9542011-10-08 14:58:49 +02002209 error = hci_add_sysfs(hdev);
2210 if (error < 0)
2211 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002213 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002214 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2215 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002216 if (hdev->rfkill) {
2217 if (rfkill_register(hdev->rfkill) < 0) {
2218 rfkill_destroy(hdev->rfkill);
2219 hdev->rfkill = NULL;
2220 }
2221 }
2222
Johan Hedberg5e130362013-09-13 08:58:17 +03002223 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2224 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2225
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002226 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002227
2228 if (hdev->dev_type != HCI_AMP)
2229 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2230
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002231 write_lock(&hci_dev_list_lock);
2232 list_add(&hdev->list, &hci_dev_list);
2233 write_unlock(&hci_dev_list_lock);
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002236 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Johan Hedberg19202572013-01-14 22:33:51 +02002238 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002241
David Herrmann33ca9542011-10-08 14:58:49 +02002242err_wqueue:
2243 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002244 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002245err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002246 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002247
David Herrmann33ca9542011-10-08 14:58:49 +02002248 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249}
2250EXPORT_SYMBOL(hci_register_dev);
2251
2252/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002253void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254{
Sasha Levin3df92b32012-05-27 22:36:56 +02002255 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002256
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002257 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258
Johan Hovold94324962012-03-15 14:48:41 +01002259 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2260
Sasha Levin3df92b32012-05-27 22:36:56 +02002261 id = hdev->id;
2262
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002263 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002265 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266
2267 hci_dev_do_close(hdev);
2268
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302269 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002270 kfree_skb(hdev->reassembly[i]);
2271
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002272 cancel_work_sync(&hdev->power_on);
2273
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002274 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002275 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002276 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002277 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002278 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002279 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002280
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002281 /* mgmt_index_removed should take care of emptying the
2282 * pending list */
2283 BUG_ON(!list_empty(&hdev->mgmt_pending));
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 hci_notify(hdev, HCI_DEV_UNREG);
2286
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002287 if (hdev->rfkill) {
2288 rfkill_unregister(hdev->rfkill);
2289 rfkill_destroy(hdev->rfkill);
2290 }
2291
David Herrmannce242972011-10-08 14:58:48 +02002292 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002293
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002294 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002295 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002296
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002297 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002298 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002299 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002300 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002301 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002302 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002303 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002304
David Herrmanndc946bd2012-01-07 15:47:24 +01002305 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002306
2307 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308}
2309EXPORT_SYMBOL(hci_unregister_dev);
2310
2311/* Suspend HCI device */
2312int hci_suspend_dev(struct hci_dev *hdev)
2313{
2314 hci_notify(hdev, HCI_DEV_SUSPEND);
2315 return 0;
2316}
2317EXPORT_SYMBOL(hci_suspend_dev);
2318
2319/* Resume HCI device */
2320int hci_resume_dev(struct hci_dev *hdev)
2321{
2322 hci_notify(hdev, HCI_DEV_RESUME);
2323 return 0;
2324}
2325EXPORT_SYMBOL(hci_resume_dev);
2326
Marcel Holtmann76bca882009-11-18 00:40:39 +01002327/* Receive frame from HCI drivers */
2328int hci_recv_frame(struct sk_buff *skb)
2329{
2330 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2331 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002332 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002333 kfree_skb(skb);
2334 return -ENXIO;
2335 }
2336
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002337 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002338 bt_cb(skb)->incoming = 1;
2339
2340 /* Time stamp */
2341 __net_timestamp(skb);
2342
Marcel Holtmann76bca882009-11-18 00:40:39 +01002343 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002344 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002345
Marcel Holtmann76bca882009-11-18 00:40:39 +01002346 return 0;
2347}
2348EXPORT_SYMBOL(hci_recv_frame);
2349
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302350static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002351 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302352{
2353 int len = 0;
2354 int hlen = 0;
2355 int remain = count;
2356 struct sk_buff *skb;
2357 struct bt_skb_cb *scb;
2358
2359 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002360 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302361 return -EILSEQ;
2362
2363 skb = hdev->reassembly[index];
2364
2365 if (!skb) {
2366 switch (type) {
2367 case HCI_ACLDATA_PKT:
2368 len = HCI_MAX_FRAME_SIZE;
2369 hlen = HCI_ACL_HDR_SIZE;
2370 break;
2371 case HCI_EVENT_PKT:
2372 len = HCI_MAX_EVENT_SIZE;
2373 hlen = HCI_EVENT_HDR_SIZE;
2374 break;
2375 case HCI_SCODATA_PKT:
2376 len = HCI_MAX_SCO_SIZE;
2377 hlen = HCI_SCO_HDR_SIZE;
2378 break;
2379 }
2380
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002381 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302382 if (!skb)
2383 return -ENOMEM;
2384
2385 scb = (void *) skb->cb;
2386 scb->expect = hlen;
2387 scb->pkt_type = type;
2388
2389 skb->dev = (void *) hdev;
2390 hdev->reassembly[index] = skb;
2391 }
2392
2393 while (count) {
2394 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002395 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302396
2397 memcpy(skb_put(skb, len), data, len);
2398
2399 count -= len;
2400 data += len;
2401 scb->expect -= len;
2402 remain = count;
2403
2404 switch (type) {
2405 case HCI_EVENT_PKT:
2406 if (skb->len == HCI_EVENT_HDR_SIZE) {
2407 struct hci_event_hdr *h = hci_event_hdr(skb);
2408 scb->expect = h->plen;
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417
2418 case HCI_ACLDATA_PKT:
2419 if (skb->len == HCI_ACL_HDR_SIZE) {
2420 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2421 scb->expect = __le16_to_cpu(h->dlen);
2422
2423 if (skb_tailroom(skb) < scb->expect) {
2424 kfree_skb(skb);
2425 hdev->reassembly[index] = NULL;
2426 return -ENOMEM;
2427 }
2428 }
2429 break;
2430
2431 case HCI_SCODATA_PKT:
2432 if (skb->len == HCI_SCO_HDR_SIZE) {
2433 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2434 scb->expect = h->dlen;
2435
2436 if (skb_tailroom(skb) < scb->expect) {
2437 kfree_skb(skb);
2438 hdev->reassembly[index] = NULL;
2439 return -ENOMEM;
2440 }
2441 }
2442 break;
2443 }
2444
2445 if (scb->expect == 0) {
2446 /* Complete frame */
2447
2448 bt_cb(skb)->pkt_type = type;
2449 hci_recv_frame(skb);
2450
2451 hdev->reassembly[index] = NULL;
2452 return remain;
2453 }
2454 }
2455
2456 return remain;
2457}
2458
Marcel Holtmannef222012007-07-11 06:42:04 +02002459int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2460{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302461 int rem = 0;
2462
Marcel Holtmannef222012007-07-11 06:42:04 +02002463 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2464 return -EILSEQ;
2465
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002466 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002467 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302468 if (rem < 0)
2469 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002470
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302471 data += (count - rem);
2472 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002473 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002474
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302475 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002476}
2477EXPORT_SYMBOL(hci_recv_fragment);
2478
Suraj Sumangala99811512010-07-14 13:02:19 +05302479#define STREAM_REASSEMBLY 0
2480
2481int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2482{
2483 int type;
2484 int rem = 0;
2485
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002486 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302487 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2488
2489 if (!skb) {
2490 struct { char type; } *pkt;
2491
2492 /* Start of the frame */
2493 pkt = data;
2494 type = pkt->type;
2495
2496 data++;
2497 count--;
2498 } else
2499 type = bt_cb(skb)->pkt_type;
2500
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002501 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002502 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302503 if (rem < 0)
2504 return rem;
2505
2506 data += (count - rem);
2507 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002508 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302509
2510 return rem;
2511}
2512EXPORT_SYMBOL(hci_recv_stream_fragment);
2513
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514/* ---- Interface to upper protocols ---- */
2515
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516int hci_register_cb(struct hci_cb *cb)
2517{
2518 BT_DBG("%p name %s", cb, cb->name);
2519
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002520 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002522 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523
2524 return 0;
2525}
2526EXPORT_SYMBOL(hci_register_cb);
2527
2528int hci_unregister_cb(struct hci_cb *cb)
2529{
2530 BT_DBG("%p name %s", cb, cb->name);
2531
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002532 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002534 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535
2536 return 0;
2537}
2538EXPORT_SYMBOL(hci_unregister_cb);
2539
2540static int hci_send_frame(struct sk_buff *skb)
2541{
2542 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2543
2544 if (!hdev) {
2545 kfree_skb(skb);
2546 return -ENODEV;
2547 }
2548
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002549 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002551 /* Time stamp */
2552 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002554 /* Send copy to monitor */
2555 hci_send_to_monitor(hdev, skb);
2556
2557 if (atomic_read(&hdev->promisc)) {
2558 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002559 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560 }
2561
2562 /* Get rid of skb owner, prior to sending to the driver. */
2563 skb_orphan(skb);
2564
2565 return hdev->send(skb);
2566}
2567
Johan Hedberg3119ae92013-03-05 20:37:44 +02002568void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2569{
2570 skb_queue_head_init(&req->cmd_q);
2571 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002572 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002573}
2574
2575int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2576{
2577 struct hci_dev *hdev = req->hdev;
2578 struct sk_buff *skb;
2579 unsigned long flags;
2580
2581 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2582
Andre Guedes5d73e032013-03-08 11:20:16 -03002583 /* If an error occured during request building, remove all HCI
2584 * commands queued on the HCI request queue.
2585 */
2586 if (req->err) {
2587 skb_queue_purge(&req->cmd_q);
2588 return req->err;
2589 }
2590
Johan Hedberg3119ae92013-03-05 20:37:44 +02002591 /* Do not allow empty requests */
2592 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002593 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002594
2595 skb = skb_peek_tail(&req->cmd_q);
2596 bt_cb(skb)->req.complete = complete;
2597
2598 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2599 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2600 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2601
2602 queue_work(hdev->workqueue, &hdev->cmd_work);
2603
2604 return 0;
2605}
2606
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002607static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002608 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002609{
2610 int len = HCI_COMMAND_HDR_SIZE + plen;
2611 struct hci_command_hdr *hdr;
2612 struct sk_buff *skb;
2613
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002615 if (!skb)
2616 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617
2618 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002619 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620 hdr->plen = plen;
2621
2622 if (plen)
2623 memcpy(skb_put(skb, plen), param, plen);
2624
2625 BT_DBG("skb len %d", skb->len);
2626
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002627 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002629
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002630 return skb;
2631}
2632
2633/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002634int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2635 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002636{
2637 struct sk_buff *skb;
2638
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
2641 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2642 if (!skb) {
2643 BT_ERR("%s no memory for command", hdev->name);
2644 return -ENOMEM;
2645 }
2646
Johan Hedberg11714b32013-03-05 20:37:47 +02002647 /* Stand-alone HCI commands must be flaged as
2648 * single-command requests.
2649 */
2650 bt_cb(skb)->req.start = true;
2651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002653 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
2655 return 0;
2656}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
Johan Hedberg71c76a12013-03-05 20:37:46 +02002658/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002659void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2660 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002661{
2662 struct hci_dev *hdev = req->hdev;
2663 struct sk_buff *skb;
2664
2665 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2666
Andre Guedes34739c12013-03-08 11:20:18 -03002667 /* If an error occured during request building, there is no point in
2668 * queueing the HCI command. We can simply return.
2669 */
2670 if (req->err)
2671 return;
2672
Johan Hedberg71c76a12013-03-05 20:37:46 +02002673 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2674 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002675 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2676 hdev->name, opcode);
2677 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002678 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002679 }
2680
2681 if (skb_queue_empty(&req->cmd_q))
2682 bt_cb(skb)->req.start = true;
2683
Johan Hedberg02350a72013-04-03 21:50:29 +03002684 bt_cb(skb)->req.event = event;
2685
Johan Hedberg71c76a12013-03-05 20:37:46 +02002686 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002687}
2688
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002689void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2690 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002691{
2692 hci_req_add_ev(req, opcode, plen, param, 0);
2693}
2694
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002696void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697{
2698 struct hci_command_hdr *hdr;
2699
2700 if (!hdev->sent_cmd)
2701 return NULL;
2702
2703 hdr = (void *) hdev->sent_cmd->data;
2704
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002705 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706 return NULL;
2707
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002708 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
2710 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2711}
2712
2713/* Send ACL data */
2714static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2715{
2716 struct hci_acl_hdr *hdr;
2717 int len = skb->len;
2718
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002719 skb_push(skb, HCI_ACL_HDR_SIZE);
2720 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002721 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002722 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2723 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724}
2725
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002726static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002727 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002729 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 struct hci_dev *hdev = conn->hdev;
2731 struct sk_buff *list;
2732
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002733 skb->len = skb_headlen(skb);
2734 skb->data_len = 0;
2735
2736 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002737
2738 switch (hdev->dev_type) {
2739 case HCI_BREDR:
2740 hci_add_acl_hdr(skb, conn->handle, flags);
2741 break;
2742 case HCI_AMP:
2743 hci_add_acl_hdr(skb, chan->handle, flags);
2744 break;
2745 default:
2746 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2747 return;
2748 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002749
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002750 list = skb_shinfo(skb)->frag_list;
2751 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 /* Non fragmented */
2753 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2754
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002755 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 } else {
2757 /* Fragmented */
2758 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2759
2760 skb_shinfo(skb)->frag_list = NULL;
2761
2762 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002763 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002765 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002766
2767 flags &= ~ACL_START;
2768 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 do {
2770 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002771
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002773 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002774 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775
2776 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2777
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002778 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 } while (list);
2780
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002781 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002783}
2784
2785void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2786{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002787 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002788
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002789 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002790
2791 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002792
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002793 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002795 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797
2798/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002799void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800{
2801 struct hci_dev *hdev = conn->hdev;
2802 struct hci_sco_hdr hdr;
2803
2804 BT_DBG("%s len %d", hdev->name, skb->len);
2805
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002806 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 hdr.dlen = skb->len;
2808
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002809 skb_push(skb, HCI_SCO_HDR_SIZE);
2810 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002811 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
2813 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002814 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002815
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002817 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
2820/* ---- HCI TX task (outgoing data) ---- */
2821
2822/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002823static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2824 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
2826 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002827 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002828 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002830 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002832
2833 rcu_read_lock();
2834
2835 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002836 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002838
2839 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2840 continue;
2841
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 num++;
2843
2844 if (c->sent < min) {
2845 min = c->sent;
2846 conn = c;
2847 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002848
2849 if (hci_conn_num(hdev, type) == num)
2850 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 }
2852
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002853 rcu_read_unlock();
2854
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002856 int cnt, q;
2857
2858 switch (conn->type) {
2859 case ACL_LINK:
2860 cnt = hdev->acl_cnt;
2861 break;
2862 case SCO_LINK:
2863 case ESCO_LINK:
2864 cnt = hdev->sco_cnt;
2865 break;
2866 case LE_LINK:
2867 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2868 break;
2869 default:
2870 cnt = 0;
2871 BT_ERR("Unknown link type");
2872 }
2873
2874 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 *quote = q ? q : 1;
2876 } else
2877 *quote = 0;
2878
2879 BT_DBG("conn %p quote %d", conn, *quote);
2880 return conn;
2881}
2882
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002883static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884{
2885 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002886 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887
Ville Tervobae1f5d92011-02-10 22:38:53 -03002888 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002890 rcu_read_lock();
2891
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002893 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002894 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002895 BT_ERR("%s killing stalled connection %pMR",
2896 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002897 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 }
2899 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002900
2901 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902}
2903
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002904static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2905 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002906{
2907 struct hci_conn_hash *h = &hdev->conn_hash;
2908 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002909 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002910 struct hci_conn *conn;
2911 int cnt, q, conn_num = 0;
2912
2913 BT_DBG("%s", hdev->name);
2914
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002915 rcu_read_lock();
2916
2917 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002918 struct hci_chan *tmp;
2919
2920 if (conn->type != type)
2921 continue;
2922
2923 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2924 continue;
2925
2926 conn_num++;
2927
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002928 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002929 struct sk_buff *skb;
2930
2931 if (skb_queue_empty(&tmp->data_q))
2932 continue;
2933
2934 skb = skb_peek(&tmp->data_q);
2935 if (skb->priority < cur_prio)
2936 continue;
2937
2938 if (skb->priority > cur_prio) {
2939 num = 0;
2940 min = ~0;
2941 cur_prio = skb->priority;
2942 }
2943
2944 num++;
2945
2946 if (conn->sent < min) {
2947 min = conn->sent;
2948 chan = tmp;
2949 }
2950 }
2951
2952 if (hci_conn_num(hdev, type) == conn_num)
2953 break;
2954 }
2955
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002956 rcu_read_unlock();
2957
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002958 if (!chan)
2959 return NULL;
2960
2961 switch (chan->conn->type) {
2962 case ACL_LINK:
2963 cnt = hdev->acl_cnt;
2964 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002965 case AMP_LINK:
2966 cnt = hdev->block_cnt;
2967 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002968 case SCO_LINK:
2969 case ESCO_LINK:
2970 cnt = hdev->sco_cnt;
2971 break;
2972 case LE_LINK:
2973 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2974 break;
2975 default:
2976 cnt = 0;
2977 BT_ERR("Unknown link type");
2978 }
2979
2980 q = cnt / num;
2981 *quote = q ? q : 1;
2982 BT_DBG("chan %p quote %d", chan, *quote);
2983 return chan;
2984}
2985
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002986static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2987{
2988 struct hci_conn_hash *h = &hdev->conn_hash;
2989 struct hci_conn *conn;
2990 int num = 0;
2991
2992 BT_DBG("%s", hdev->name);
2993
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002994 rcu_read_lock();
2995
2996 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002997 struct hci_chan *chan;
2998
2999 if (conn->type != type)
3000 continue;
3001
3002 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3003 continue;
3004
3005 num++;
3006
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003007 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003008 struct sk_buff *skb;
3009
3010 if (chan->sent) {
3011 chan->sent = 0;
3012 continue;
3013 }
3014
3015 if (skb_queue_empty(&chan->data_q))
3016 continue;
3017
3018 skb = skb_peek(&chan->data_q);
3019 if (skb->priority >= HCI_PRIO_MAX - 1)
3020 continue;
3021
3022 skb->priority = HCI_PRIO_MAX - 1;
3023
3024 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003025 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003026 }
3027
3028 if (hci_conn_num(hdev, type) == num)
3029 break;
3030 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003031
3032 rcu_read_unlock();
3033
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003034}
3035
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003036static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3037{
3038 /* Calculate count of blocks used by this packet */
3039 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3040}
3041
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003042static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044 if (!test_bit(HCI_RAW, &hdev->flags)) {
3045 /* ACL tx timeout must be longer than maximum
3046 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003047 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003048 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003049 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003051}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003053static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003054{
3055 unsigned int cnt = hdev->acl_cnt;
3056 struct hci_chan *chan;
3057 struct sk_buff *skb;
3058 int quote;
3059
3060 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003061
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003062 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003063 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003064 u32 priority = (skb_peek(&chan->data_q))->priority;
3065 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003066 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003067 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003068
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003069 /* Stop if priority has changed */
3070 if (skb->priority < priority)
3071 break;
3072
3073 skb = skb_dequeue(&chan->data_q);
3074
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003075 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003076 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003077
Linus Torvalds1da177e2005-04-16 15:20:36 -07003078 hci_send_frame(skb);
3079 hdev->acl_last_tx = jiffies;
3080
3081 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 chan->sent++;
3083 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003084 }
3085 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003086
3087 if (cnt != hdev->acl_cnt)
3088 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003089}
3090
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003091static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003092{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003093 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003094 struct hci_chan *chan;
3095 struct sk_buff *skb;
3096 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003097 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003098
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003099 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003100
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003101 BT_DBG("%s", hdev->name);
3102
3103 if (hdev->dev_type == HCI_AMP)
3104 type = AMP_LINK;
3105 else
3106 type = ACL_LINK;
3107
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003108 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003109 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003110 u32 priority = (skb_peek(&chan->data_q))->priority;
3111 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3112 int blocks;
3113
3114 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003115 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003116
3117 /* Stop if priority has changed */
3118 if (skb->priority < priority)
3119 break;
3120
3121 skb = skb_dequeue(&chan->data_q);
3122
3123 blocks = __get_blocks(hdev, skb);
3124 if (blocks > hdev->block_cnt)
3125 return;
3126
3127 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003128 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003129
3130 hci_send_frame(skb);
3131 hdev->acl_last_tx = jiffies;
3132
3133 hdev->block_cnt -= blocks;
3134 quote -= blocks;
3135
3136 chan->sent += blocks;
3137 chan->conn->sent += blocks;
3138 }
3139 }
3140
3141 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003142 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003143}
3144
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003145static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003146{
3147 BT_DBG("%s", hdev->name);
3148
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003149 /* No ACL link over BR/EDR controller */
3150 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3151 return;
3152
3153 /* No AMP link over AMP controller */
3154 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003155 return;
3156
3157 switch (hdev->flow_ctl_mode) {
3158 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3159 hci_sched_acl_pkt(hdev);
3160 break;
3161
3162 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3163 hci_sched_acl_blk(hdev);
3164 break;
3165 }
3166}
3167
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003169static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003170{
3171 struct hci_conn *conn;
3172 struct sk_buff *skb;
3173 int quote;
3174
3175 BT_DBG("%s", hdev->name);
3176
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003177 if (!hci_conn_num(hdev, SCO_LINK))
3178 return;
3179
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3181 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3182 BT_DBG("skb %p len %d", skb, skb->len);
3183 hci_send_frame(skb);
3184
3185 conn->sent++;
3186 if (conn->sent == ~0)
3187 conn->sent = 0;
3188 }
3189 }
3190}
3191
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003192static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003193{
3194 struct hci_conn *conn;
3195 struct sk_buff *skb;
3196 int quote;
3197
3198 BT_DBG("%s", hdev->name);
3199
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003200 if (!hci_conn_num(hdev, ESCO_LINK))
3201 return;
3202
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003203 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3204 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003205 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3206 BT_DBG("skb %p len %d", skb, skb->len);
3207 hci_send_frame(skb);
3208
3209 conn->sent++;
3210 if (conn->sent == ~0)
3211 conn->sent = 0;
3212 }
3213 }
3214}
3215
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003216static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003217{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003218 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003219 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003220 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003221
3222 BT_DBG("%s", hdev->name);
3223
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003224 if (!hci_conn_num(hdev, LE_LINK))
3225 return;
3226
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003227 if (!test_bit(HCI_RAW, &hdev->flags)) {
3228 /* LE tx timeout must be longer than maximum
3229 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003230 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003231 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003232 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003233 }
3234
3235 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003236 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003238 u32 priority = (skb_peek(&chan->data_q))->priority;
3239 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003240 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003241 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003242
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003243 /* Stop if priority has changed */
3244 if (skb->priority < priority)
3245 break;
3246
3247 skb = skb_dequeue(&chan->data_q);
3248
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003249 hci_send_frame(skb);
3250 hdev->le_last_tx = jiffies;
3251
3252 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003253 chan->sent++;
3254 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003255 }
3256 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003257
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003258 if (hdev->le_pkts)
3259 hdev->le_cnt = cnt;
3260 else
3261 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003262
3263 if (cnt != tmp)
3264 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003265}
3266
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003267static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003269 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270 struct sk_buff *skb;
3271
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003272 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003273 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274
3275 /* Schedule queues and send stuff to HCI driver */
3276
3277 hci_sched_acl(hdev);
3278
3279 hci_sched_sco(hdev);
3280
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003281 hci_sched_esco(hdev);
3282
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003283 hci_sched_le(hdev);
3284
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285 /* Send next queued raw (unknown type) packet */
3286 while ((skb = skb_dequeue(&hdev->raw_q)))
3287 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288}
3289
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003290/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
3292/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003293static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294{
3295 struct hci_acl_hdr *hdr = (void *) skb->data;
3296 struct hci_conn *conn;
3297 __u16 handle, flags;
3298
3299 skb_pull(skb, HCI_ACL_HDR_SIZE);
3300
3301 handle = __le16_to_cpu(hdr->handle);
3302 flags = hci_flags(handle);
3303 handle = hci_handle(handle);
3304
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003305 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003306 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
3308 hdev->stat.acl_rx++;
3309
3310 hci_dev_lock(hdev);
3311 conn = hci_conn_hash_lookup_handle(hdev, handle);
3312 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003313
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003315 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003316
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003318 l2cap_recv_acldata(conn, skb, flags);
3319 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003321 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003322 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323 }
3324
3325 kfree_skb(skb);
3326}
3327
3328/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003329static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330{
3331 struct hci_sco_hdr *hdr = (void *) skb->data;
3332 struct hci_conn *conn;
3333 __u16 handle;
3334
3335 skb_pull(skb, HCI_SCO_HDR_SIZE);
3336
3337 handle = __le16_to_cpu(hdr->handle);
3338
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003339 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003340
3341 hdev->stat.sco_rx++;
3342
3343 hci_dev_lock(hdev);
3344 conn = hci_conn_hash_lookup_handle(hdev, handle);
3345 hci_dev_unlock(hdev);
3346
3347 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003349 sco_recv_scodata(conn, skb);
3350 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003352 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003353 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354 }
3355
3356 kfree_skb(skb);
3357}
3358
Johan Hedberg9238f362013-03-05 20:37:48 +02003359static bool hci_req_is_complete(struct hci_dev *hdev)
3360{
3361 struct sk_buff *skb;
3362
3363 skb = skb_peek(&hdev->cmd_q);
3364 if (!skb)
3365 return true;
3366
3367 return bt_cb(skb)->req.start;
3368}
3369
Johan Hedberg42c6b122013-03-05 20:37:49 +02003370static void hci_resend_last(struct hci_dev *hdev)
3371{
3372 struct hci_command_hdr *sent;
3373 struct sk_buff *skb;
3374 u16 opcode;
3375
3376 if (!hdev->sent_cmd)
3377 return;
3378
3379 sent = (void *) hdev->sent_cmd->data;
3380 opcode = __le16_to_cpu(sent->opcode);
3381 if (opcode == HCI_OP_RESET)
3382 return;
3383
3384 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3385 if (!skb)
3386 return;
3387
3388 skb_queue_head(&hdev->cmd_q, skb);
3389 queue_work(hdev->workqueue, &hdev->cmd_work);
3390}
3391
Johan Hedberg9238f362013-03-05 20:37:48 +02003392void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3393{
3394 hci_req_complete_t req_complete = NULL;
3395 struct sk_buff *skb;
3396 unsigned long flags;
3397
3398 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3399
Johan Hedberg42c6b122013-03-05 20:37:49 +02003400 /* If the completed command doesn't match the last one that was
3401 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003402 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003403 if (!hci_sent_cmd_data(hdev, opcode)) {
3404 /* Some CSR based controllers generate a spontaneous
3405 * reset complete event during init and any pending
3406 * command will never be completed. In such a case we
3407 * need to resend whatever was the last sent
3408 * command.
3409 */
3410 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3411 hci_resend_last(hdev);
3412
Johan Hedberg9238f362013-03-05 20:37:48 +02003413 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003414 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003415
3416 /* If the command succeeded and there's still more commands in
3417 * this request the request is not yet complete.
3418 */
3419 if (!status && !hci_req_is_complete(hdev))
3420 return;
3421
3422 /* If this was the last command in a request the complete
3423 * callback would be found in hdev->sent_cmd instead of the
3424 * command queue (hdev->cmd_q).
3425 */
3426 if (hdev->sent_cmd) {
3427 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003428
3429 if (req_complete) {
3430 /* We must set the complete callback to NULL to
3431 * avoid calling the callback more than once if
3432 * this function gets called again.
3433 */
3434 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3435
Johan Hedberg9238f362013-03-05 20:37:48 +02003436 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003437 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003438 }
3439
3440 /* Remove all pending commands belonging to this request */
3441 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3442 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3443 if (bt_cb(skb)->req.start) {
3444 __skb_queue_head(&hdev->cmd_q, skb);
3445 break;
3446 }
3447
3448 req_complete = bt_cb(skb)->req.complete;
3449 kfree_skb(skb);
3450 }
3451 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3452
3453call_complete:
3454 if (req_complete)
3455 req_complete(hdev, status);
3456}
3457
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003458static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003460 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 struct sk_buff *skb;
3462
3463 BT_DBG("%s", hdev->name);
3464
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003466 /* Send copy to monitor */
3467 hci_send_to_monitor(hdev, skb);
3468
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 if (atomic_read(&hdev->promisc)) {
3470 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003471 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472 }
3473
3474 if (test_bit(HCI_RAW, &hdev->flags)) {
3475 kfree_skb(skb);
3476 continue;
3477 }
3478
3479 if (test_bit(HCI_INIT, &hdev->flags)) {
3480 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003481 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 case HCI_ACLDATA_PKT:
3483 case HCI_SCODATA_PKT:
3484 kfree_skb(skb);
3485 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003486 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003487 }
3488
3489 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003490 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003491 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003492 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 hci_event_packet(hdev, skb);
3494 break;
3495
3496 case HCI_ACLDATA_PKT:
3497 BT_DBG("%s ACL data packet", hdev->name);
3498 hci_acldata_packet(hdev, skb);
3499 break;
3500
3501 case HCI_SCODATA_PKT:
3502 BT_DBG("%s SCO data packet", hdev->name);
3503 hci_scodata_packet(hdev, skb);
3504 break;
3505
3506 default:
3507 kfree_skb(skb);
3508 break;
3509 }
3510 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511}
3512
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003513static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003515 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003516 struct sk_buff *skb;
3517
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003518 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3519 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003522 if (atomic_read(&hdev->cmd_cnt)) {
3523 skb = skb_dequeue(&hdev->cmd_q);
3524 if (!skb)
3525 return;
3526
Wei Yongjun7585b972009-02-25 18:29:52 +08003527 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003529 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3530 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 atomic_dec(&hdev->cmd_cnt);
3532 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003533 if (test_bit(HCI_RESET, &hdev->flags))
3534 del_timer(&hdev->cmd_timer);
3535 else
3536 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003537 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003538 } else {
3539 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003540 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003541 }
3542 }
3543}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003544
Andre Guedes31f79562012-04-24 21:02:53 -03003545u8 bdaddr_to_le(u8 bdaddr_type)
3546{
3547 switch (bdaddr_type) {
3548 case BDADDR_LE_PUBLIC:
3549 return ADDR_LE_DEV_PUBLIC;
3550
3551 default:
3552 /* Fallback to LE Random address type */
3553 return ADDR_LE_DEV_RANDOM;
3554 }
3555}