blob: db7de80b88a2758ea25a630c3209f87d08faff7c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 }
502}
503
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_dev *hdev = req->hdev;
507
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510
511 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
516 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518
519 if (lmp_ssp_capable(hdev)) {
520 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
521 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
523 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200524 } else {
525 struct hci_cp_write_eir cp;
526
527 memset(hdev->eir, 0, sizeof(hdev->eir));
528 memset(&cp, 0, sizeof(cp));
529
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531 }
532 }
533
534 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
540 if (lmp_ext_feat_capable(hdev)) {
541 struct hci_cp_read_local_ext_features cp;
542
543 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200544 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
545 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547
548 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
549 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
551 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200552 }
553}
554
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200557 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200558 struct hci_cp_write_def_link_policy cp;
559 u16 link_policy = 0;
560
561 if (lmp_rswitch_capable(hdev))
562 link_policy |= HCI_LP_RSWITCH;
563 if (lmp_hold_capable(hdev))
564 link_policy |= HCI_LP_HOLD;
565 if (lmp_sniff_capable(hdev))
566 link_policy |= HCI_LP_SNIFF;
567 if (lmp_park_capable(hdev))
568 link_policy |= HCI_LP_PARK;
569
570 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200572}
573
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577 struct hci_cp_write_le_host_supported cp;
578
Johan Hedbergc73eee92013-04-19 18:35:21 +0300579 /* LE-only devices do not support explicit enablement */
580 if (!lmp_bredr_capable(hdev))
581 return;
582
Johan Hedberg2177bab2013-03-05 20:37:43 +0200583 memset(&cp, 0, sizeof(cp));
584
585 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
586 cp.le = 0x01;
587 cp.simul = lmp_le_br_capable(hdev);
588 }
589
590 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
592 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593}
594
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300598 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599
Johan Hedberg59f45d52013-06-13 11:01:13 +0300600 /* Only send HCI_Delete_Stored_Link_Key if it is supported */
601 if (hdev->commands[6] & 0x80) {
602 struct hci_cp_delete_stored_link_key cp;
603
604 bacpy(&cp.bdaddr, BDADDR_ANY);
605 cp.delete_all = 0x01;
606 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
607 sizeof(cp), &cp);
608 }
609
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200612
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500613 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500615 hci_update_ad(req);
616 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300617
618 /* Read features beyond page 1 if available */
619 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
620 struct hci_cp_read_local_ext_features cp;
621
622 cp.page = p;
623 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
624 sizeof(cp), &cp);
625 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200626}
627
628static int __hci_init(struct hci_dev *hdev)
629{
630 int err;
631
632 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
633 if (err < 0)
634 return err;
635
636 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
637 * BR/EDR/LE type controllers. AMP controllers only need the
638 * first stage init.
639 */
640 if (hdev->dev_type != HCI_BREDR)
641 return 0;
642
643 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
644 if (err < 0)
645 return err;
646
647 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
648}
649
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651{
652 __u8 scan = opt;
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655
656 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200657 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658}
659
Johan Hedberg42c6b122013-03-05 20:37:49 +0200660static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661{
662 __u8 auth = opt;
663
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200667 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668}
669
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700671{
672 __u8 encrypt = opt;
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200676 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200681{
682 __le16 policy = cpu_to_le16(opt);
683
Johan Hedberg42c6b122013-03-05 20:37:49 +0200684 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200685
686 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200688}
689
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900690/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 * Device is held on return. */
692struct hci_dev *hci_dev_get(int index)
693{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200694 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696 BT_DBG("%d", index);
697
698 if (index < 0)
699 return NULL;
700
701 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200702 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 if (d->id == index) {
704 hdev = hci_dev_hold(d);
705 break;
706 }
707 }
708 read_unlock(&hci_dev_list_lock);
709 return hdev;
710}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711
712/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200713
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200714bool hci_discovery_active(struct hci_dev *hdev)
715{
716 struct discovery_state *discov = &hdev->discovery;
717
Andre Guedes6fbe1952012-02-03 17:47:58 -0300718 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300719 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300720 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200721 return true;
722
Andre Guedes6fbe1952012-02-03 17:47:58 -0300723 default:
724 return false;
725 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200726}
727
Johan Hedbergff9ef572012-01-04 14:23:45 +0200728void hci_discovery_set_state(struct hci_dev *hdev, int state)
729{
730 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
731
732 if (hdev->discovery.state == state)
733 return;
734
735 switch (state) {
736 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300737 if (hdev->discovery.state != DISCOVERY_STARTING)
738 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200739 break;
740 case DISCOVERY_STARTING:
741 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200743 mgmt_discovering(hdev, 1);
744 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200745 case DISCOVERY_RESOLVING:
746 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200747 case DISCOVERY_STOPPING:
748 break;
749 }
750
751 hdev->discovery.state = state;
752}
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754static void inquiry_cache_flush(struct hci_dev *hdev)
755{
Johan Hedberg30883512012-01-04 14:16:21 +0200756 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200757 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758
Johan Hedberg561aafb2012-01-04 13:31:59 +0200759 list_for_each_entry_safe(p, n, &cache->all, all) {
760 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200761 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200763
764 INIT_LIST_HEAD(&cache->unknown);
765 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300768struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
769 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770{
Johan Hedberg30883512012-01-04 14:16:21 +0200771 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772 struct inquiry_entry *e;
773
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300774 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Johan Hedberg561aafb2012-01-04 13:31:59 +0200776 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200778 return e;
779 }
780
781 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782}
783
Johan Hedberg561aafb2012-01-04 13:31:59 +0200784struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300785 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786{
Johan Hedberg30883512012-01-04 14:16:21 +0200787 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200788 struct inquiry_entry *e;
789
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300790 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200791
792 list_for_each_entry(e, &cache->unknown, list) {
793 if (!bacmp(&e->data.bdaddr, bdaddr))
794 return e;
795 }
796
797 return NULL;
798}
799
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200800struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300801 bdaddr_t *bdaddr,
802 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200803{
804 struct discovery_state *cache = &hdev->discovery;
805 struct inquiry_entry *e;
806
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300807 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200808
809 list_for_each_entry(e, &cache->resolve, list) {
810 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
811 return e;
812 if (!bacmp(&e->data.bdaddr, bdaddr))
813 return e;
814 }
815
816 return NULL;
817}
818
Johan Hedberga3d4e202012-01-09 00:53:02 +0200819void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300820 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200821{
822 struct discovery_state *cache = &hdev->discovery;
823 struct list_head *pos = &cache->resolve;
824 struct inquiry_entry *p;
825
826 list_del(&ie->list);
827
828 list_for_each_entry(p, &cache->resolve, list) {
829 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300830 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200831 break;
832 pos = &p->list;
833 }
834
835 list_add(&ie->list, pos);
836}
837
Johan Hedberg31754052012-01-04 13:39:52 +0200838bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300839 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840{
Johan Hedberg30883512012-01-04 14:16:21 +0200841 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300844 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Szymon Janc2b2fec42012-11-20 11:38:54 +0100846 hci_remove_remote_oob_data(hdev, &data->bdaddr);
847
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200848 if (ssp)
849 *ssp = data->ssp_mode;
850
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200851 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200852 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200853 if (ie->data.ssp_mode && ssp)
854 *ssp = true;
855
Johan Hedberga3d4e202012-01-09 00:53:02 +0200856 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300857 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200858 ie->data.rssi = data->rssi;
859 hci_inquiry_cache_update_resolve(hdev, ie);
860 }
861
Johan Hedberg561aafb2012-01-04 13:31:59 +0200862 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200863 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200864
Johan Hedberg561aafb2012-01-04 13:31:59 +0200865 /* Entry not in the cache. Add new one. */
866 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
867 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200868 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200869
870 list_add(&ie->all, &cache->all);
871
872 if (name_known) {
873 ie->name_state = NAME_KNOWN;
874 } else {
875 ie->name_state = NAME_NOT_KNOWN;
876 list_add(&ie->list, &cache->unknown);
877 }
878
879update:
880 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300881 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200882 ie->name_state = NAME_KNOWN;
883 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 }
885
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200886 memcpy(&ie->data, data, sizeof(*data));
887 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200889
890 if (ie->name_state == NAME_NOT_KNOWN)
891 return false;
892
893 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894}
895
896static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
897{
Johan Hedberg30883512012-01-04 14:16:21 +0200898 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 struct inquiry_info *info = (struct inquiry_info *) buf;
900 struct inquiry_entry *e;
901 int copied = 0;
902
Johan Hedberg561aafb2012-01-04 13:31:59 +0200903 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200905
906 if (copied >= num)
907 break;
908
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 bacpy(&info->bdaddr, &data->bdaddr);
910 info->pscan_rep_mode = data->pscan_rep_mode;
911 info->pscan_period_mode = data->pscan_period_mode;
912 info->pscan_mode = data->pscan_mode;
913 memcpy(info->dev_class, data->dev_class, 3);
914 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200915
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200917 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 }
919
920 BT_DBG("cache %p, copied %d", cache, copied);
921 return copied;
922}
923
Johan Hedberg42c6b122013-03-05 20:37:49 +0200924static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925{
926 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200927 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 struct hci_cp_inquiry cp;
929
930 BT_DBG("%s", hdev->name);
931
932 if (test_bit(HCI_INQUIRY, &hdev->flags))
933 return;
934
935 /* Start Inquiry */
936 memcpy(&cp.lap, &ir->lap, 3);
937 cp.length = ir->length;
938 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200939 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940}
941
Andre Guedes3e13fa12013-03-27 20:04:56 -0300942static int wait_inquiry(void *word)
943{
944 schedule();
945 return signal_pending(current);
946}
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948int hci_inquiry(void __user *arg)
949{
950 __u8 __user *ptr = arg;
951 struct hci_inquiry_req ir;
952 struct hci_dev *hdev;
953 int err = 0, do_inquiry = 0, max_rsp;
954 long timeo;
955 __u8 *buf;
956
957 if (copy_from_user(&ir, ptr, sizeof(ir)))
958 return -EFAULT;
959
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200960 hdev = hci_dev_get(ir.dev_id);
961 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 return -ENODEV;
963
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300964 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900965 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300966 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 inquiry_cache_flush(hdev);
968 do_inquiry = 1;
969 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300970 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Marcel Holtmann04837f62006-07-03 10:02:33 +0200972 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200973
974 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200975 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
976 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200977 if (err < 0)
978 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300979
980 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
981 * cleared). If it is interrupted by a signal, return -EINTR.
982 */
983 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
984 TASK_INTERRUPTIBLE))
985 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200986 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300988 /* for unlimited number of responses we will use buffer with
989 * 255 entries
990 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
992
993 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
994 * copy it to the user space.
995 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100996 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200997 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 err = -ENOMEM;
999 goto done;
1000 }
1001
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001002 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001003 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001004 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005
1006 BT_DBG("num_rsp %d", ir.num_rsp);
1007
1008 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1009 ptr += sizeof(ir);
1010 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001011 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001013 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 err = -EFAULT;
1015
1016 kfree(buf);
1017
1018done:
1019 hci_dev_put(hdev);
1020 return err;
1021}
1022
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001023static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1024{
1025 u8 ad_len = 0, flags = 0;
1026 size_t name_len;
1027
1028 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1029 flags |= LE_AD_GENERAL;
1030
1031 if (!lmp_bredr_capable(hdev))
1032 flags |= LE_AD_NO_BREDR;
1033
1034 if (lmp_le_br_capable(hdev))
1035 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1036
1037 if (lmp_host_le_br_capable(hdev))
1038 flags |= LE_AD_SIM_LE_BREDR_HOST;
1039
1040 if (flags) {
1041 BT_DBG("adv flags 0x%02x", flags);
1042
1043 ptr[0] = 2;
1044 ptr[1] = EIR_FLAGS;
1045 ptr[2] = flags;
1046
1047 ad_len += 3;
1048 ptr += 3;
1049 }
1050
1051 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1052 ptr[0] = 2;
1053 ptr[1] = EIR_TX_POWER;
1054 ptr[2] = (u8) hdev->adv_tx_power;
1055
1056 ad_len += 3;
1057 ptr += 3;
1058 }
1059
1060 name_len = strlen(hdev->dev_name);
1061 if (name_len > 0) {
1062 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1063
1064 if (name_len > max_len) {
1065 name_len = max_len;
1066 ptr[1] = EIR_NAME_SHORT;
1067 } else
1068 ptr[1] = EIR_NAME_COMPLETE;
1069
1070 ptr[0] = name_len + 1;
1071
1072 memcpy(ptr + 2, hdev->dev_name, name_len);
1073
1074 ad_len += (name_len + 2);
1075 ptr += (name_len + 2);
1076 }
1077
1078 return ad_len;
1079}
1080
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001081void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001082{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001083 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001084 struct hci_cp_le_set_adv_data cp;
1085 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001086
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001087 if (!lmp_le_capable(hdev))
1088 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001089
1090 memset(&cp, 0, sizeof(cp));
1091
1092 len = create_ad(hdev, cp.data);
1093
1094 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001095 memcmp(cp.data, hdev->adv_data, len) == 0)
1096 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001097
1098 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1099 hdev->adv_data_len = len;
1100
1101 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001102
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001103 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001104}
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106/* ---- HCI ioctl helpers ---- */
1107
1108int hci_dev_open(__u16 dev)
1109{
1110 struct hci_dev *hdev;
1111 int ret = 0;
1112
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001113 hdev = hci_dev_get(dev);
1114 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 return -ENODEV;
1116
1117 BT_DBG("%s %p", hdev->name, hdev);
1118
1119 hci_req_lock(hdev);
1120
Johan Hovold94324962012-03-15 14:48:41 +01001121 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1122 ret = -ENODEV;
1123 goto done;
1124 }
1125
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001126 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1127 ret = -ERFKILL;
1128 goto done;
1129 }
1130
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131 if (test_bit(HCI_UP, &hdev->flags)) {
1132 ret = -EALREADY;
1133 goto done;
1134 }
1135
Linus Torvalds1da177e2005-04-16 15:20:36 -07001136 if (hdev->open(hdev)) {
1137 ret = -EIO;
1138 goto done;
1139 }
1140
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001141 atomic_set(&hdev->cmd_cnt, 1);
1142 set_bit(HCI_INIT, &hdev->flags);
1143
1144 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1145 ret = hdev->setup(hdev);
1146
1147 if (!ret) {
1148 /* Treat all non BR/EDR controllers as raw devices if
1149 * enable_hs is not set.
1150 */
1151 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1152 set_bit(HCI_RAW, &hdev->flags);
1153
1154 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1155 set_bit(HCI_RAW, &hdev->flags);
1156
1157 if (!test_bit(HCI_RAW, &hdev->flags))
1158 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 }
1160
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001161 clear_bit(HCI_INIT, &hdev->flags);
1162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 if (!ret) {
1164 hci_dev_hold(hdev);
1165 set_bit(HCI_UP, &hdev->flags);
1166 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001167 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1168 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001169 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001170 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001171 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001172 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001173 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001175 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001176 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001177 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
1179 skb_queue_purge(&hdev->cmd_q);
1180 skb_queue_purge(&hdev->rx_q);
1181
1182 if (hdev->flush)
1183 hdev->flush(hdev);
1184
1185 if (hdev->sent_cmd) {
1186 kfree_skb(hdev->sent_cmd);
1187 hdev->sent_cmd = NULL;
1188 }
1189
1190 hdev->close(hdev);
1191 hdev->flags = 0;
1192 }
1193
1194done:
1195 hci_req_unlock(hdev);
1196 hci_dev_put(hdev);
1197 return ret;
1198}
1199
1200static int hci_dev_do_close(struct hci_dev *hdev)
1201{
1202 BT_DBG("%s %p", hdev->name, hdev);
1203
Andre Guedes28b75a82012-02-03 17:48:00 -03001204 cancel_work_sync(&hdev->le_scan);
1205
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001206 cancel_delayed_work(&hdev->power_off);
1207
Linus Torvalds1da177e2005-04-16 15:20:36 -07001208 hci_req_cancel(hdev, ENODEV);
1209 hci_req_lock(hdev);
1210
1211 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001212 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001213 hci_req_unlock(hdev);
1214 return 0;
1215 }
1216
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001217 /* Flush RX and TX works */
1218 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001219 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001221 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001222 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001223 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001224 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001225 }
1226
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001227 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001228 cancel_delayed_work(&hdev->service_cache);
1229
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001230 cancel_delayed_work_sync(&hdev->le_scan_disable);
1231
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001232 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 inquiry_cache_flush(hdev);
1234 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001235 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
1237 hci_notify(hdev, HCI_DEV_DOWN);
1238
1239 if (hdev->flush)
1240 hdev->flush(hdev);
1241
1242 /* Reset device */
1243 skb_queue_purge(&hdev->cmd_q);
1244 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001245 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001246 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001248 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001249 clear_bit(HCI_INIT, &hdev->flags);
1250 }
1251
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001252 /* flush cmd work */
1253 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 /* Drop queues */
1256 skb_queue_purge(&hdev->rx_q);
1257 skb_queue_purge(&hdev->cmd_q);
1258 skb_queue_purge(&hdev->raw_q);
1259
1260 /* Drop last sent command */
1261 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001262 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263 kfree_skb(hdev->sent_cmd);
1264 hdev->sent_cmd = NULL;
1265 }
1266
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001267 kfree_skb(hdev->recv_evt);
1268 hdev->recv_evt = NULL;
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 /* After this point our queues are empty
1271 * and no tasks are scheduled. */
1272 hdev->close(hdev);
1273
Johan Hedberg35b973c2013-03-15 17:06:59 -05001274 /* Clear flags */
1275 hdev->flags = 0;
1276 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1277
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001278 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1279 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001280 hci_dev_lock(hdev);
1281 mgmt_powered(hdev, 0);
1282 hci_dev_unlock(hdev);
1283 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001284
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001285 /* Controller radio is available but is currently powered down */
1286 hdev->amp_status = 0;
1287
Johan Hedberge59fda82012-02-22 18:11:53 +02001288 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001289 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 hci_req_unlock(hdev);
1292
1293 hci_dev_put(hdev);
1294 return 0;
1295}
1296
1297int hci_dev_close(__u16 dev)
1298{
1299 struct hci_dev *hdev;
1300 int err;
1301
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001302 hdev = hci_dev_get(dev);
1303 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001304 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001305
1306 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1307 cancel_delayed_work(&hdev->power_off);
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001310
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 hci_dev_put(hdev);
1312 return err;
1313}
1314
1315int hci_dev_reset(__u16 dev)
1316{
1317 struct hci_dev *hdev;
1318 int ret = 0;
1319
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001320 hdev = hci_dev_get(dev);
1321 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 return -ENODEV;
1323
1324 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325
1326 if (!test_bit(HCI_UP, &hdev->flags))
1327 goto done;
1328
1329 /* Drop queues */
1330 skb_queue_purge(&hdev->rx_q);
1331 skb_queue_purge(&hdev->cmd_q);
1332
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001333 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 inquiry_cache_flush(hdev);
1335 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001336 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 if (hdev->flush)
1339 hdev->flush(hdev);
1340
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001341 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001342 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343
1344 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001345 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 hci_req_unlock(hdev);
1349 hci_dev_put(hdev);
1350 return ret;
1351}
1352
1353int hci_dev_reset_stat(__u16 dev)
1354{
1355 struct hci_dev *hdev;
1356 int ret = 0;
1357
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001358 hdev = hci_dev_get(dev);
1359 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 return -ENODEV;
1361
1362 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1363
1364 hci_dev_put(hdev);
1365
1366 return ret;
1367}
1368
1369int hci_dev_cmd(unsigned int cmd, void __user *arg)
1370{
1371 struct hci_dev *hdev;
1372 struct hci_dev_req dr;
1373 int err = 0;
1374
1375 if (copy_from_user(&dr, arg, sizeof(dr)))
1376 return -EFAULT;
1377
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001378 hdev = hci_dev_get(dr.dev_id);
1379 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 return -ENODEV;
1381
1382 switch (cmd) {
1383 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001384 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1385 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386 break;
1387
1388 case HCISETENCRYPT:
1389 if (!lmp_encrypt_capable(hdev)) {
1390 err = -EOPNOTSUPP;
1391 break;
1392 }
1393
1394 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1395 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001396 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 if (err)
1399 break;
1400 }
1401
Johan Hedberg01178cd2013-03-05 20:37:41 +02001402 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1403 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001404 break;
1405
1406 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001407 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1408 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 break;
1410
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001411 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001412 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1413 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001414 break;
1415
1416 case HCISETLINKMODE:
1417 hdev->link_mode = ((__u16) dr.dev_opt) &
1418 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1419 break;
1420
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 case HCISETPTYPE:
1422 hdev->pkt_type = (__u16) dr.dev_opt;
1423 break;
1424
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001426 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1427 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428 break;
1429
1430 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001431 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1432 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 break;
1434
1435 default:
1436 err = -EINVAL;
1437 break;
1438 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001439
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 hci_dev_put(hdev);
1441 return err;
1442}
1443
1444int hci_get_dev_list(void __user *arg)
1445{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001446 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447 struct hci_dev_list_req *dl;
1448 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 int n = 0, size, err;
1450 __u16 dev_num;
1451
1452 if (get_user(dev_num, (__u16 __user *) arg))
1453 return -EFAULT;
1454
1455 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1456 return -EINVAL;
1457
1458 size = sizeof(*dl) + dev_num * sizeof(*dr);
1459
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001460 dl = kzalloc(size, GFP_KERNEL);
1461 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001462 return -ENOMEM;
1463
1464 dr = dl->dev_req;
1465
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001466 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001467 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001468 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001469 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001470
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001471 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1472 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001473
Linus Torvalds1da177e2005-04-16 15:20:36 -07001474 (dr + n)->dev_id = hdev->id;
1475 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001476
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 if (++n >= dev_num)
1478 break;
1479 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001480 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 dl->dev_num = n;
1483 size = sizeof(*dl) + n * sizeof(*dr);
1484
1485 err = copy_to_user(arg, dl, size);
1486 kfree(dl);
1487
1488 return err ? -EFAULT : 0;
1489}
1490
1491int hci_get_dev_info(void __user *arg)
1492{
1493 struct hci_dev *hdev;
1494 struct hci_dev_info di;
1495 int err = 0;
1496
1497 if (copy_from_user(&di, arg, sizeof(di)))
1498 return -EFAULT;
1499
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001500 hdev = hci_dev_get(di.dev_id);
1501 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502 return -ENODEV;
1503
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001504 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001505 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001506
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001507 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1508 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001509
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 strcpy(di.name, hdev->name);
1511 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001512 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 di.flags = hdev->flags;
1514 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001515 if (lmp_bredr_capable(hdev)) {
1516 di.acl_mtu = hdev->acl_mtu;
1517 di.acl_pkts = hdev->acl_pkts;
1518 di.sco_mtu = hdev->sco_mtu;
1519 di.sco_pkts = hdev->sco_pkts;
1520 } else {
1521 di.acl_mtu = hdev->le_mtu;
1522 di.acl_pkts = hdev->le_pkts;
1523 di.sco_mtu = 0;
1524 di.sco_pkts = 0;
1525 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001526 di.link_policy = hdev->link_policy;
1527 di.link_mode = hdev->link_mode;
1528
1529 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1530 memcpy(&di.features, &hdev->features, sizeof(di.features));
1531
1532 if (copy_to_user(arg, &di, sizeof(di)))
1533 err = -EFAULT;
1534
1535 hci_dev_put(hdev);
1536
1537 return err;
1538}
1539
1540/* ---- Interface to HCI drivers ---- */
1541
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001542static int hci_rfkill_set_block(void *data, bool blocked)
1543{
1544 struct hci_dev *hdev = data;
1545
1546 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1547
1548 if (!blocked)
1549 return 0;
1550
1551 hci_dev_do_close(hdev);
1552
1553 return 0;
1554}
1555
1556static const struct rfkill_ops hci_rfkill_ops = {
1557 .set_block = hci_rfkill_set_block,
1558};
1559
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001560static void hci_power_on(struct work_struct *work)
1561{
1562 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001563 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001564
1565 BT_DBG("%s", hdev->name);
1566
Johan Hedberg96570ff2013-05-29 09:51:29 +03001567 err = hci_dev_open(hdev->id);
1568 if (err < 0) {
1569 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001570 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001571 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001572
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001573 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001574 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1575 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001576
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001577 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001578 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001579}
1580
1581static void hci_power_off(struct work_struct *work)
1582{
Johan Hedberg32435532011-11-07 22:16:04 +02001583 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001584 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001585
1586 BT_DBG("%s", hdev->name);
1587
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001588 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001589}
1590
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001591static void hci_discov_off(struct work_struct *work)
1592{
1593 struct hci_dev *hdev;
1594 u8 scan = SCAN_PAGE;
1595
1596 hdev = container_of(work, struct hci_dev, discov_off.work);
1597
1598 BT_DBG("%s", hdev->name);
1599
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001600 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001601
1602 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1603
1604 hdev->discov_timeout = 0;
1605
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001606 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001607}
1608
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001609int hci_uuids_clear(struct hci_dev *hdev)
1610{
Johan Hedberg48210022013-01-27 00:31:28 +02001611 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001612
Johan Hedberg48210022013-01-27 00:31:28 +02001613 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1614 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001615 kfree(uuid);
1616 }
1617
1618 return 0;
1619}
1620
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001621int hci_link_keys_clear(struct hci_dev *hdev)
1622{
1623 struct list_head *p, *n;
1624
1625 list_for_each_safe(p, n, &hdev->link_keys) {
1626 struct link_key *key;
1627
1628 key = list_entry(p, struct link_key, list);
1629
1630 list_del(p);
1631 kfree(key);
1632 }
1633
1634 return 0;
1635}
1636
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001637int hci_smp_ltks_clear(struct hci_dev *hdev)
1638{
1639 struct smp_ltk *k, *tmp;
1640
1641 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1642 list_del(&k->list);
1643 kfree(k);
1644 }
1645
1646 return 0;
1647}
1648
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001649struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1650{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001651 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001652
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001653 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001654 if (bacmp(bdaddr, &k->bdaddr) == 0)
1655 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001656
1657 return NULL;
1658}
1659
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301660static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001661 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001662{
1663 /* Legacy key */
1664 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301665 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001666
1667 /* Debug keys are insecure so don't store them persistently */
1668 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301669 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001670
1671 /* Changed combination key and there's no previous one */
1672 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301673 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001674
1675 /* Security mode 3 case */
1676 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301677 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001678
1679 /* Neither local nor remote side had no-bonding as requirement */
1680 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301681 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001682
1683 /* Local side had dedicated bonding as requirement */
1684 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301685 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001686
1687 /* Remote side had dedicated bonding as requirement */
1688 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301689 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001690
1691 /* If none of the above criteria match, then don't store the key
1692 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301693 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001694}
1695
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001696struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001697{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001698 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001699
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001700 list_for_each_entry(k, &hdev->long_term_keys, list) {
1701 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001702 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001703 continue;
1704
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001705 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001706 }
1707
1708 return NULL;
1709}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001710
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001711struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001712 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001713{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001714 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001715
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001716 list_for_each_entry(k, &hdev->long_term_keys, list)
1717 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001718 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001719 return k;
1720
1721 return NULL;
1722}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001723
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001724int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001725 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001726{
1727 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301728 u8 old_key_type;
1729 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001730
1731 old_key = hci_find_link_key(hdev, bdaddr);
1732 if (old_key) {
1733 old_key_type = old_key->type;
1734 key = old_key;
1735 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001736 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001737 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1738 if (!key)
1739 return -ENOMEM;
1740 list_add(&key->list, &hdev->link_keys);
1741 }
1742
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001743 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001744
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001745 /* Some buggy controller combinations generate a changed
1746 * combination key for legacy pairing even when there's no
1747 * previous key */
1748 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001749 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001750 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001751 if (conn)
1752 conn->key_type = type;
1753 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001754
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001755 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001756 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001757 key->pin_len = pin_len;
1758
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001759 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001761 else
1762 key->type = type;
1763
Johan Hedberg4df378a2011-04-28 11:29:03 -07001764 if (!new_key)
1765 return 0;
1766
1767 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1768
Johan Hedberg744cf192011-11-08 20:40:14 +02001769 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001770
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301771 if (conn)
1772 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001773
1774 return 0;
1775}
1776
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001777int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001778 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001779 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001780{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001781 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001782
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001783 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1784 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001785
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001786 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1787 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001788 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001789 else {
1790 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791 if (!key)
1792 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001793 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794 }
1795
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001796 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001797 key->bdaddr_type = addr_type;
1798 memcpy(key->val, tk, sizeof(key->val));
1799 key->authenticated = authenticated;
1800 key->ediv = ediv;
1801 key->enc_size = enc_size;
1802 key->type = type;
1803 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001804
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001805 if (!new_key)
1806 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001807
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001808 if (type & HCI_SMP_LTK)
1809 mgmt_new_ltk(hdev, key, 1);
1810
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001811 return 0;
1812}
1813
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001814int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1815{
1816 struct link_key *key;
1817
1818 key = hci_find_link_key(hdev, bdaddr);
1819 if (!key)
1820 return -ENOENT;
1821
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001822 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001823
1824 list_del(&key->list);
1825 kfree(key);
1826
1827 return 0;
1828}
1829
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001830int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1831{
1832 struct smp_ltk *k, *tmp;
1833
1834 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1835 if (bacmp(bdaddr, &k->bdaddr))
1836 continue;
1837
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001838 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001839
1840 list_del(&k->list);
1841 kfree(k);
1842 }
1843
1844 return 0;
1845}
1846
Ville Tervo6bd32322011-02-16 16:32:41 +02001847/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001848static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001849{
1850 struct hci_dev *hdev = (void *) arg;
1851
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001852 if (hdev->sent_cmd) {
1853 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1854 u16 opcode = __le16_to_cpu(sent->opcode);
1855
1856 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1857 } else {
1858 BT_ERR("%s command tx timeout", hdev->name);
1859 }
1860
Ville Tervo6bd32322011-02-16 16:32:41 +02001861 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001862 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001863}
1864
Szymon Janc2763eda2011-03-22 13:12:22 +01001865struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001866 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001867{
1868 struct oob_data *data;
1869
1870 list_for_each_entry(data, &hdev->remote_oob_data, list)
1871 if (bacmp(bdaddr, &data->bdaddr) == 0)
1872 return data;
1873
1874 return NULL;
1875}
1876
1877int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1878{
1879 struct oob_data *data;
1880
1881 data = hci_find_remote_oob_data(hdev, bdaddr);
1882 if (!data)
1883 return -ENOENT;
1884
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001885 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001886
1887 list_del(&data->list);
1888 kfree(data);
1889
1890 return 0;
1891}
1892
1893int hci_remote_oob_data_clear(struct hci_dev *hdev)
1894{
1895 struct oob_data *data, *n;
1896
1897 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1898 list_del(&data->list);
1899 kfree(data);
1900 }
1901
1902 return 0;
1903}
1904
1905int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001906 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001907{
1908 struct oob_data *data;
1909
1910 data = hci_find_remote_oob_data(hdev, bdaddr);
1911
1912 if (!data) {
1913 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1914 if (!data)
1915 return -ENOMEM;
1916
1917 bacpy(&data->bdaddr, bdaddr);
1918 list_add(&data->list, &hdev->remote_oob_data);
1919 }
1920
1921 memcpy(data->hash, hash, sizeof(data->hash));
1922 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1923
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001924 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001925
1926 return 0;
1927}
1928
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001929struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001930{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001931 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001932
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001933 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001934 if (bacmp(bdaddr, &b->bdaddr) == 0)
1935 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001936
1937 return NULL;
1938}
1939
1940int hci_blacklist_clear(struct hci_dev *hdev)
1941{
1942 struct list_head *p, *n;
1943
1944 list_for_each_safe(p, n, &hdev->blacklist) {
1945 struct bdaddr_list *b;
1946
1947 b = list_entry(p, struct bdaddr_list, list);
1948
1949 list_del(p);
1950 kfree(b);
1951 }
1952
1953 return 0;
1954}
1955
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001956int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957{
1958 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001959
1960 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1961 return -EBADF;
1962
Antti Julku5e762442011-08-25 16:48:02 +03001963 if (hci_blacklist_lookup(hdev, bdaddr))
1964 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001965
1966 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001967 if (!entry)
1968 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001969
1970 bacpy(&entry->bdaddr, bdaddr);
1971
1972 list_add(&entry->list, &hdev->blacklist);
1973
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001974 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001975}
1976
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001977int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978{
1979 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001980
Szymon Janc1ec918c2011-11-16 09:32:21 +01001981 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001982 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001983
1984 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001985 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001986 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001987
1988 list_del(&entry->list);
1989 kfree(entry);
1990
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001991 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001992}
1993
Johan Hedberg42c6b122013-03-05 20:37:49 +02001994static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001995{
1996 struct le_scan_params *param = (struct le_scan_params *) opt;
1997 struct hci_cp_le_set_scan_param cp;
1998
1999 memset(&cp, 0, sizeof(cp));
2000 cp.type = param->type;
2001 cp.interval = cpu_to_le16(param->interval);
2002 cp.window = cpu_to_le16(param->window);
2003
Johan Hedberg42c6b122013-03-05 20:37:49 +02002004 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002005}
2006
Johan Hedberg42c6b122013-03-05 20:37:49 +02002007static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002008{
2009 struct hci_cp_le_set_scan_enable cp;
2010
2011 memset(&cp, 0, sizeof(cp));
Andre Guedes76a388b2013-04-04 20:21:02 -03002012 cp.enable = LE_SCAN_ENABLE;
Andre Guedes525e2962013-04-04 20:21:01 -03002013 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002014
Johan Hedberg42c6b122013-03-05 20:37:49 +02002015 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002016}
2017
2018static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002019 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002020{
2021 long timeo = msecs_to_jiffies(3000);
2022 struct le_scan_params param;
2023 int err;
2024
2025 BT_DBG("%s", hdev->name);
2026
2027 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2028 return -EINPROGRESS;
2029
2030 param.type = type;
2031 param.interval = interval;
2032 param.window = window;
2033
2034 hci_req_lock(hdev);
2035
Johan Hedberg01178cd2013-03-05 20:37:41 +02002036 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2037 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002038 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002039 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002040
2041 hci_req_unlock(hdev);
2042
2043 if (err < 0)
2044 return err;
2045
Johan Hedberg46818ed2013-01-14 22:33:52 +02002046 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
Andre Guedesb6c75152013-04-04 20:20:59 -03002047 timeout);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002048
2049 return 0;
2050}
2051
Andre Guedes7dbfac12012-03-15 16:52:07 -03002052int hci_cancel_le_scan(struct hci_dev *hdev)
2053{
2054 BT_DBG("%s", hdev->name);
2055
2056 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2057 return -EALREADY;
2058
2059 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2060 struct hci_cp_le_set_scan_enable cp;
2061
2062 /* Send HCI command to disable LE Scan */
2063 memset(&cp, 0, sizeof(cp));
2064 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2065 }
2066
2067 return 0;
2068}
2069
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002070static void le_scan_disable_work(struct work_struct *work)
2071{
2072 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002073 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002074 struct hci_cp_le_set_scan_enable cp;
2075
2076 BT_DBG("%s", hdev->name);
2077
2078 memset(&cp, 0, sizeof(cp));
2079
2080 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2081}
2082
Andre Guedes28b75a82012-02-03 17:48:00 -03002083static void le_scan_work(struct work_struct *work)
2084{
2085 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2086 struct le_scan_params *param = &hdev->le_scan_params;
2087
2088 BT_DBG("%s", hdev->name);
2089
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002090 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2091 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002092}
2093
2094int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002095 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002096{
2097 struct le_scan_params *param = &hdev->le_scan_params;
2098
2099 BT_DBG("%s", hdev->name);
2100
Johan Hedbergf1550472012-10-24 21:12:03 +03002101 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2102 return -ENOTSUPP;
2103
Andre Guedes28b75a82012-02-03 17:48:00 -03002104 if (work_busy(&hdev->le_scan))
2105 return -EINPROGRESS;
2106
2107 param->type = type;
2108 param->interval = interval;
2109 param->window = window;
2110 param->timeout = timeout;
2111
2112 queue_work(system_long_wq, &hdev->le_scan);
2113
2114 return 0;
2115}
2116
David Herrmann9be0dab2012-04-22 14:39:57 +02002117/* Alloc HCI device */
2118struct hci_dev *hci_alloc_dev(void)
2119{
2120 struct hci_dev *hdev;
2121
2122 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2123 if (!hdev)
2124 return NULL;
2125
David Herrmannb1b813d2012-04-22 14:39:58 +02002126 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2127 hdev->esco_type = (ESCO_HV1);
2128 hdev->link_mode = (HCI_LM_ACCEPT);
2129 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002130 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2131 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002132
David Herrmannb1b813d2012-04-22 14:39:58 +02002133 hdev->sniff_max_interval = 800;
2134 hdev->sniff_min_interval = 80;
2135
2136 mutex_init(&hdev->lock);
2137 mutex_init(&hdev->req_lock);
2138
2139 INIT_LIST_HEAD(&hdev->mgmt_pending);
2140 INIT_LIST_HEAD(&hdev->blacklist);
2141 INIT_LIST_HEAD(&hdev->uuids);
2142 INIT_LIST_HEAD(&hdev->link_keys);
2143 INIT_LIST_HEAD(&hdev->long_term_keys);
2144 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002145 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002146
2147 INIT_WORK(&hdev->rx_work, hci_rx_work);
2148 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2149 INIT_WORK(&hdev->tx_work, hci_tx_work);
2150 INIT_WORK(&hdev->power_on, hci_power_on);
2151 INIT_WORK(&hdev->le_scan, le_scan_work);
2152
David Herrmannb1b813d2012-04-22 14:39:58 +02002153 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2154 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2155 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2156
David Herrmannb1b813d2012-04-22 14:39:58 +02002157 skb_queue_head_init(&hdev->rx_q);
2158 skb_queue_head_init(&hdev->cmd_q);
2159 skb_queue_head_init(&hdev->raw_q);
2160
2161 init_waitqueue_head(&hdev->req_wait_q);
2162
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002163 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002164
David Herrmannb1b813d2012-04-22 14:39:58 +02002165 hci_init_sysfs(hdev);
2166 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002167
2168 return hdev;
2169}
2170EXPORT_SYMBOL(hci_alloc_dev);
2171
2172/* Free HCI device */
2173void hci_free_dev(struct hci_dev *hdev)
2174{
David Herrmann9be0dab2012-04-22 14:39:57 +02002175 /* will free via device release */
2176 put_device(&hdev->dev);
2177}
2178EXPORT_SYMBOL(hci_free_dev);
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180/* Register HCI device */
2181int hci_register_dev(struct hci_dev *hdev)
2182{
David Herrmannb1b813d2012-04-22 14:39:58 +02002183 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
David Herrmann010666a2012-01-07 15:47:07 +01002185 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 return -EINVAL;
2187
Mat Martineau08add512011-11-02 16:18:36 -07002188 /* Do not allow HCI_AMP devices to register at index 0,
2189 * so the index can be used as the AMP controller ID.
2190 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002191 switch (hdev->dev_type) {
2192 case HCI_BREDR:
2193 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2194 break;
2195 case HCI_AMP:
2196 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2197 break;
2198 default:
2199 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002201
Sasha Levin3df92b32012-05-27 22:36:56 +02002202 if (id < 0)
2203 return id;
2204
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 sprintf(hdev->name, "hci%d", id);
2206 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002207
2208 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2209
Sasha Levin3df92b32012-05-27 22:36:56 +02002210 write_lock(&hci_dev_list_lock);
2211 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002212 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Kees Cookd8537542013-07-03 15:04:57 -07002214 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2215 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002216 if (!hdev->workqueue) {
2217 error = -ENOMEM;
2218 goto err;
2219 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002220
Kees Cookd8537542013-07-03 15:04:57 -07002221 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2222 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002223 if (!hdev->req_workqueue) {
2224 destroy_workqueue(hdev->workqueue);
2225 error = -ENOMEM;
2226 goto err;
2227 }
2228
David Herrmann33ca9542011-10-08 14:58:49 +02002229 error = hci_add_sysfs(hdev);
2230 if (error < 0)
2231 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002233 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002234 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2235 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002236 if (hdev->rfkill) {
2237 if (rfkill_register(hdev->rfkill) < 0) {
2238 rfkill_destroy(hdev->rfkill);
2239 hdev->rfkill = NULL;
2240 }
2241 }
2242
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002243 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002244
2245 if (hdev->dev_type != HCI_AMP)
2246 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002249 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Johan Hedberg19202572013-01-14 22:33:51 +02002251 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002252
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002254
David Herrmann33ca9542011-10-08 14:58:49 +02002255err_wqueue:
2256 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002257 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002258err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002259 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002260 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002261 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002262 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002263
David Herrmann33ca9542011-10-08 14:58:49 +02002264 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265}
2266EXPORT_SYMBOL(hci_register_dev);
2267
2268/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002269void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270{
Sasha Levin3df92b32012-05-27 22:36:56 +02002271 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002272
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002273 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Johan Hovold94324962012-03-15 14:48:41 +01002275 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2276
Sasha Levin3df92b32012-05-27 22:36:56 +02002277 id = hdev->id;
2278
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002279 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002281 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
2283 hci_dev_do_close(hdev);
2284
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302285 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002286 kfree_skb(hdev->reassembly[i]);
2287
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002288 cancel_work_sync(&hdev->power_on);
2289
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002290 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002291 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002292 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002293 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002294 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002295 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002296
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002297 /* mgmt_index_removed should take care of emptying the
2298 * pending list */
2299 BUG_ON(!list_empty(&hdev->mgmt_pending));
2300
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 hci_notify(hdev, HCI_DEV_UNREG);
2302
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002303 if (hdev->rfkill) {
2304 rfkill_unregister(hdev->rfkill);
2305 rfkill_destroy(hdev->rfkill);
2306 }
2307
David Herrmannce242972011-10-08 14:58:48 +02002308 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002309
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002310 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002311 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002312
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002313 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002314 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002315 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002316 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002317 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002318 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002319 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002320
David Herrmanndc946bd2012-01-07 15:47:24 +01002321 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002322
2323 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324}
2325EXPORT_SYMBOL(hci_unregister_dev);
2326
2327/* Suspend HCI device */
2328int hci_suspend_dev(struct hci_dev *hdev)
2329{
2330 hci_notify(hdev, HCI_DEV_SUSPEND);
2331 return 0;
2332}
2333EXPORT_SYMBOL(hci_suspend_dev);
2334
2335/* Resume HCI device */
2336int hci_resume_dev(struct hci_dev *hdev)
2337{
2338 hci_notify(hdev, HCI_DEV_RESUME);
2339 return 0;
2340}
2341EXPORT_SYMBOL(hci_resume_dev);
2342
Marcel Holtmann76bca882009-11-18 00:40:39 +01002343/* Receive frame from HCI drivers */
2344int hci_recv_frame(struct sk_buff *skb)
2345{
2346 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2347 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002348 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002349 kfree_skb(skb);
2350 return -ENXIO;
2351 }
2352
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002353 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002354 bt_cb(skb)->incoming = 1;
2355
2356 /* Time stamp */
2357 __net_timestamp(skb);
2358
Marcel Holtmann76bca882009-11-18 00:40:39 +01002359 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002360 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002361
Marcel Holtmann76bca882009-11-18 00:40:39 +01002362 return 0;
2363}
2364EXPORT_SYMBOL(hci_recv_frame);
2365
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302366static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002367 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302368{
2369 int len = 0;
2370 int hlen = 0;
2371 int remain = count;
2372 struct sk_buff *skb;
2373 struct bt_skb_cb *scb;
2374
2375 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002376 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302377 return -EILSEQ;
2378
2379 skb = hdev->reassembly[index];
2380
2381 if (!skb) {
2382 switch (type) {
2383 case HCI_ACLDATA_PKT:
2384 len = HCI_MAX_FRAME_SIZE;
2385 hlen = HCI_ACL_HDR_SIZE;
2386 break;
2387 case HCI_EVENT_PKT:
2388 len = HCI_MAX_EVENT_SIZE;
2389 hlen = HCI_EVENT_HDR_SIZE;
2390 break;
2391 case HCI_SCODATA_PKT:
2392 len = HCI_MAX_SCO_SIZE;
2393 hlen = HCI_SCO_HDR_SIZE;
2394 break;
2395 }
2396
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002397 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302398 if (!skb)
2399 return -ENOMEM;
2400
2401 scb = (void *) skb->cb;
2402 scb->expect = hlen;
2403 scb->pkt_type = type;
2404
2405 skb->dev = (void *) hdev;
2406 hdev->reassembly[index] = skb;
2407 }
2408
2409 while (count) {
2410 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002411 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302412
2413 memcpy(skb_put(skb, len), data, len);
2414
2415 count -= len;
2416 data += len;
2417 scb->expect -= len;
2418 remain = count;
2419
2420 switch (type) {
2421 case HCI_EVENT_PKT:
2422 if (skb->len == HCI_EVENT_HDR_SIZE) {
2423 struct hci_event_hdr *h = hci_event_hdr(skb);
2424 scb->expect = h->plen;
2425
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2430 }
2431 }
2432 break;
2433
2434 case HCI_ACLDATA_PKT:
2435 if (skb->len == HCI_ACL_HDR_SIZE) {
2436 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2437 scb->expect = __le16_to_cpu(h->dlen);
2438
2439 if (skb_tailroom(skb) < scb->expect) {
2440 kfree_skb(skb);
2441 hdev->reassembly[index] = NULL;
2442 return -ENOMEM;
2443 }
2444 }
2445 break;
2446
2447 case HCI_SCODATA_PKT:
2448 if (skb->len == HCI_SCO_HDR_SIZE) {
2449 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2450 scb->expect = h->dlen;
2451
2452 if (skb_tailroom(skb) < scb->expect) {
2453 kfree_skb(skb);
2454 hdev->reassembly[index] = NULL;
2455 return -ENOMEM;
2456 }
2457 }
2458 break;
2459 }
2460
2461 if (scb->expect == 0) {
2462 /* Complete frame */
2463
2464 bt_cb(skb)->pkt_type = type;
2465 hci_recv_frame(skb);
2466
2467 hdev->reassembly[index] = NULL;
2468 return remain;
2469 }
2470 }
2471
2472 return remain;
2473}
2474
Marcel Holtmannef222012007-07-11 06:42:04 +02002475int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2476{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302477 int rem = 0;
2478
Marcel Holtmannef222012007-07-11 06:42:04 +02002479 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2480 return -EILSEQ;
2481
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002482 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002483 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302484 if (rem < 0)
2485 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002486
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302487 data += (count - rem);
2488 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002489 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002490
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302491 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002492}
2493EXPORT_SYMBOL(hci_recv_fragment);
2494
Suraj Sumangala99811512010-07-14 13:02:19 +05302495#define STREAM_REASSEMBLY 0
2496
2497int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2498{
2499 int type;
2500 int rem = 0;
2501
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002502 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302503 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2504
2505 if (!skb) {
2506 struct { char type; } *pkt;
2507
2508 /* Start of the frame */
2509 pkt = data;
2510 type = pkt->type;
2511
2512 data++;
2513 count--;
2514 } else
2515 type = bt_cb(skb)->pkt_type;
2516
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002517 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002518 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302519 if (rem < 0)
2520 return rem;
2521
2522 data += (count - rem);
2523 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002524 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302525
2526 return rem;
2527}
2528EXPORT_SYMBOL(hci_recv_stream_fragment);
2529
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530/* ---- Interface to upper protocols ---- */
2531
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532int hci_register_cb(struct hci_cb *cb)
2533{
2534 BT_DBG("%p name %s", cb, cb->name);
2535
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002536 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002538 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
2540 return 0;
2541}
2542EXPORT_SYMBOL(hci_register_cb);
2543
2544int hci_unregister_cb(struct hci_cb *cb)
2545{
2546 BT_DBG("%p name %s", cb, cb->name);
2547
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002548 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002550 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551
2552 return 0;
2553}
2554EXPORT_SYMBOL(hci_unregister_cb);
2555
2556static int hci_send_frame(struct sk_buff *skb)
2557{
2558 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2559
2560 if (!hdev) {
2561 kfree_skb(skb);
2562 return -ENODEV;
2563 }
2564
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002565 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002567 /* Time stamp */
2568 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002570 /* Send copy to monitor */
2571 hci_send_to_monitor(hdev, skb);
2572
2573 if (atomic_read(&hdev->promisc)) {
2574 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002575 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 }
2577
2578 /* Get rid of skb owner, prior to sending to the driver. */
2579 skb_orphan(skb);
2580
2581 return hdev->send(skb);
2582}
2583
Johan Hedberg3119ae92013-03-05 20:37:44 +02002584void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2585{
2586 skb_queue_head_init(&req->cmd_q);
2587 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002588 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002589}
2590
2591int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2592{
2593 struct hci_dev *hdev = req->hdev;
2594 struct sk_buff *skb;
2595 unsigned long flags;
2596
2597 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2598
Andre Guedes5d73e032013-03-08 11:20:16 -03002599 /* If an error occured during request building, remove all HCI
2600 * commands queued on the HCI request queue.
2601 */
2602 if (req->err) {
2603 skb_queue_purge(&req->cmd_q);
2604 return req->err;
2605 }
2606
Johan Hedberg3119ae92013-03-05 20:37:44 +02002607 /* Do not allow empty requests */
2608 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002609 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002610
2611 skb = skb_peek_tail(&req->cmd_q);
2612 bt_cb(skb)->req.complete = complete;
2613
2614 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2615 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2616 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2617
2618 queue_work(hdev->workqueue, &hdev->cmd_work);
2619
2620 return 0;
2621}
2622
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002623static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002624 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625{
2626 int len = HCI_COMMAND_HDR_SIZE + plen;
2627 struct hci_command_hdr *hdr;
2628 struct sk_buff *skb;
2629
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002631 if (!skb)
2632 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002633
2634 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002635 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002636 hdr->plen = plen;
2637
2638 if (plen)
2639 memcpy(skb_put(skb, plen), param, plen);
2640
2641 BT_DBG("skb len %d", skb->len);
2642
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002643 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002644 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002645
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002646 return skb;
2647}
2648
2649/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002650int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2651 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002652{
2653 struct sk_buff *skb;
2654
2655 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2656
2657 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2658 if (!skb) {
2659 BT_ERR("%s no memory for command", hdev->name);
2660 return -ENOMEM;
2661 }
2662
Johan Hedberg11714b32013-03-05 20:37:47 +02002663 /* Stand-alone HCI commands must be flaged as
2664 * single-command requests.
2665 */
2666 bt_cb(skb)->req.start = true;
2667
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002669 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
2671 return 0;
2672}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
Johan Hedberg71c76a12013-03-05 20:37:46 +02002674/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002675void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2676 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002677{
2678 struct hci_dev *hdev = req->hdev;
2679 struct sk_buff *skb;
2680
2681 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2682
Andre Guedes34739c12013-03-08 11:20:18 -03002683 /* If an error occured during request building, there is no point in
2684 * queueing the HCI command. We can simply return.
2685 */
2686 if (req->err)
2687 return;
2688
Johan Hedberg71c76a12013-03-05 20:37:46 +02002689 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2690 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002691 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2692 hdev->name, opcode);
2693 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002694 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002695 }
2696
2697 if (skb_queue_empty(&req->cmd_q))
2698 bt_cb(skb)->req.start = true;
2699
Johan Hedberg02350a72013-04-03 21:50:29 +03002700 bt_cb(skb)->req.event = event;
2701
Johan Hedberg71c76a12013-03-05 20:37:46 +02002702 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002703}
2704
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002705void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2706 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002707{
2708 hci_req_add_ev(req, opcode, plen, param, 0);
2709}
2710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002712void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713{
2714 struct hci_command_hdr *hdr;
2715
2716 if (!hdev->sent_cmd)
2717 return NULL;
2718
2719 hdr = (void *) hdev->sent_cmd->data;
2720
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002721 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 return NULL;
2723
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002724 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
2726 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2727}
2728
2729/* Send ACL data */
2730static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2731{
2732 struct hci_acl_hdr *hdr;
2733 int len = skb->len;
2734
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002735 skb_push(skb, HCI_ACL_HDR_SIZE);
2736 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002737 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002738 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2739 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740}
2741
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002742static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002743 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002745 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 struct hci_dev *hdev = conn->hdev;
2747 struct sk_buff *list;
2748
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002749 skb->len = skb_headlen(skb);
2750 skb->data_len = 0;
2751
2752 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002753
2754 switch (hdev->dev_type) {
2755 case HCI_BREDR:
2756 hci_add_acl_hdr(skb, conn->handle, flags);
2757 break;
2758 case HCI_AMP:
2759 hci_add_acl_hdr(skb, chan->handle, flags);
2760 break;
2761 default:
2762 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2763 return;
2764 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002765
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002766 list = skb_shinfo(skb)->frag_list;
2767 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 /* Non fragmented */
2769 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2770
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002771 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 } else {
2773 /* Fragmented */
2774 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2775
2776 skb_shinfo(skb)->frag_list = NULL;
2777
2778 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002779 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002781 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002782
2783 flags &= ~ACL_START;
2784 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 do {
2786 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002789 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002790 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
2792 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2793
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002794 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 } while (list);
2796
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002797 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002799}
2800
2801void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2802{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002803 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002804
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002805 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002806
2807 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002808
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002809 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002811 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002815void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816{
2817 struct hci_dev *hdev = conn->hdev;
2818 struct hci_sco_hdr hdr;
2819
2820 BT_DBG("%s len %d", hdev->name, skb->len);
2821
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002822 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 hdr.dlen = skb->len;
2824
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002825 skb_push(skb, HCI_SCO_HDR_SIZE);
2826 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002827 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
2829 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002830 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002831
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002833 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835
2836/* ---- HCI TX task (outgoing data) ---- */
2837
2838/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002839static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2840 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841{
2842 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002843 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002844 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002846 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002848
2849 rcu_read_lock();
2850
2851 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002852 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002854
2855 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2856 continue;
2857
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 num++;
2859
2860 if (c->sent < min) {
2861 min = c->sent;
2862 conn = c;
2863 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002864
2865 if (hci_conn_num(hdev, type) == num)
2866 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 }
2868
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002869 rcu_read_unlock();
2870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002872 int cnt, q;
2873
2874 switch (conn->type) {
2875 case ACL_LINK:
2876 cnt = hdev->acl_cnt;
2877 break;
2878 case SCO_LINK:
2879 case ESCO_LINK:
2880 cnt = hdev->sco_cnt;
2881 break;
2882 case LE_LINK:
2883 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2884 break;
2885 default:
2886 cnt = 0;
2887 BT_ERR("Unknown link type");
2888 }
2889
2890 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 *quote = q ? q : 1;
2892 } else
2893 *quote = 0;
2894
2895 BT_DBG("conn %p quote %d", conn, *quote);
2896 return conn;
2897}
2898
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002899static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900{
2901 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002902 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903
Ville Tervobae1f5d92011-02-10 22:38:53 -03002904 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002906 rcu_read_lock();
2907
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002909 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002910 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002911 BT_ERR("%s killing stalled connection %pMR",
2912 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002913 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 }
2915 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002916
2917 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918}
2919
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002920static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2921 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922{
2923 struct hci_conn_hash *h = &hdev->conn_hash;
2924 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002925 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002926 struct hci_conn *conn;
2927 int cnt, q, conn_num = 0;
2928
2929 BT_DBG("%s", hdev->name);
2930
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002931 rcu_read_lock();
2932
2933 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002934 struct hci_chan *tmp;
2935
2936 if (conn->type != type)
2937 continue;
2938
2939 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2940 continue;
2941
2942 conn_num++;
2943
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002944 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002945 struct sk_buff *skb;
2946
2947 if (skb_queue_empty(&tmp->data_q))
2948 continue;
2949
2950 skb = skb_peek(&tmp->data_q);
2951 if (skb->priority < cur_prio)
2952 continue;
2953
2954 if (skb->priority > cur_prio) {
2955 num = 0;
2956 min = ~0;
2957 cur_prio = skb->priority;
2958 }
2959
2960 num++;
2961
2962 if (conn->sent < min) {
2963 min = conn->sent;
2964 chan = tmp;
2965 }
2966 }
2967
2968 if (hci_conn_num(hdev, type) == conn_num)
2969 break;
2970 }
2971
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002972 rcu_read_unlock();
2973
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002974 if (!chan)
2975 return NULL;
2976
2977 switch (chan->conn->type) {
2978 case ACL_LINK:
2979 cnt = hdev->acl_cnt;
2980 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002981 case AMP_LINK:
2982 cnt = hdev->block_cnt;
2983 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002984 case SCO_LINK:
2985 case ESCO_LINK:
2986 cnt = hdev->sco_cnt;
2987 break;
2988 case LE_LINK:
2989 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2990 break;
2991 default:
2992 cnt = 0;
2993 BT_ERR("Unknown link type");
2994 }
2995
2996 q = cnt / num;
2997 *quote = q ? q : 1;
2998 BT_DBG("chan %p quote %d", chan, *quote);
2999 return chan;
3000}
3001
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003002static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3003{
3004 struct hci_conn_hash *h = &hdev->conn_hash;
3005 struct hci_conn *conn;
3006 int num = 0;
3007
3008 BT_DBG("%s", hdev->name);
3009
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003010 rcu_read_lock();
3011
3012 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003013 struct hci_chan *chan;
3014
3015 if (conn->type != type)
3016 continue;
3017
3018 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3019 continue;
3020
3021 num++;
3022
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003023 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003024 struct sk_buff *skb;
3025
3026 if (chan->sent) {
3027 chan->sent = 0;
3028 continue;
3029 }
3030
3031 if (skb_queue_empty(&chan->data_q))
3032 continue;
3033
3034 skb = skb_peek(&chan->data_q);
3035 if (skb->priority >= HCI_PRIO_MAX - 1)
3036 continue;
3037
3038 skb->priority = HCI_PRIO_MAX - 1;
3039
3040 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003041 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003042 }
3043
3044 if (hci_conn_num(hdev, type) == num)
3045 break;
3046 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003047
3048 rcu_read_unlock();
3049
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003050}
3051
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003052static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3053{
3054 /* Calculate count of blocks used by this packet */
3055 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3056}
3057
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003058static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003060 if (!test_bit(HCI_RAW, &hdev->flags)) {
3061 /* ACL tx timeout must be longer than maximum
3062 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003063 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003064 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003065 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003066 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003067}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003068
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003069static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003070{
3071 unsigned int cnt = hdev->acl_cnt;
3072 struct hci_chan *chan;
3073 struct sk_buff *skb;
3074 int quote;
3075
3076 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003077
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003078 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003079 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003080 u32 priority = (skb_peek(&chan->data_q))->priority;
3081 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003083 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003084
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003085 /* Stop if priority has changed */
3086 if (skb->priority < priority)
3087 break;
3088
3089 skb = skb_dequeue(&chan->data_q);
3090
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003091 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003092 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003093
Linus Torvalds1da177e2005-04-16 15:20:36 -07003094 hci_send_frame(skb);
3095 hdev->acl_last_tx = jiffies;
3096
3097 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003098 chan->sent++;
3099 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003100 }
3101 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003102
3103 if (cnt != hdev->acl_cnt)
3104 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003105}
3106
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003107static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003108{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003109 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003110 struct hci_chan *chan;
3111 struct sk_buff *skb;
3112 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003113 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003114
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003115 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003116
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003117 BT_DBG("%s", hdev->name);
3118
3119 if (hdev->dev_type == HCI_AMP)
3120 type = AMP_LINK;
3121 else
3122 type = ACL_LINK;
3123
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003124 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003125 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003126 u32 priority = (skb_peek(&chan->data_q))->priority;
3127 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3128 int blocks;
3129
3130 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003131 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003132
3133 /* Stop if priority has changed */
3134 if (skb->priority < priority)
3135 break;
3136
3137 skb = skb_dequeue(&chan->data_q);
3138
3139 blocks = __get_blocks(hdev, skb);
3140 if (blocks > hdev->block_cnt)
3141 return;
3142
3143 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003144 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003145
3146 hci_send_frame(skb);
3147 hdev->acl_last_tx = jiffies;
3148
3149 hdev->block_cnt -= blocks;
3150 quote -= blocks;
3151
3152 chan->sent += blocks;
3153 chan->conn->sent += blocks;
3154 }
3155 }
3156
3157 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003158 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003159}
3160
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003161static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003162{
3163 BT_DBG("%s", hdev->name);
3164
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003165 /* No ACL link over BR/EDR controller */
3166 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3167 return;
3168
3169 /* No AMP link over AMP controller */
3170 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003171 return;
3172
3173 switch (hdev->flow_ctl_mode) {
3174 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3175 hci_sched_acl_pkt(hdev);
3176 break;
3177
3178 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3179 hci_sched_acl_blk(hdev);
3180 break;
3181 }
3182}
3183
Linus Torvalds1da177e2005-04-16 15:20:36 -07003184/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003185static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003186{
3187 struct hci_conn *conn;
3188 struct sk_buff *skb;
3189 int quote;
3190
3191 BT_DBG("%s", hdev->name);
3192
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003193 if (!hci_conn_num(hdev, SCO_LINK))
3194 return;
3195
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3197 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3198 BT_DBG("skb %p len %d", skb, skb->len);
3199 hci_send_frame(skb);
3200
3201 conn->sent++;
3202 if (conn->sent == ~0)
3203 conn->sent = 0;
3204 }
3205 }
3206}
3207
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003208static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003209{
3210 struct hci_conn *conn;
3211 struct sk_buff *skb;
3212 int quote;
3213
3214 BT_DBG("%s", hdev->name);
3215
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003216 if (!hci_conn_num(hdev, ESCO_LINK))
3217 return;
3218
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003219 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3220 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003221 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3222 BT_DBG("skb %p len %d", skb, skb->len);
3223 hci_send_frame(skb);
3224
3225 conn->sent++;
3226 if (conn->sent == ~0)
3227 conn->sent = 0;
3228 }
3229 }
3230}
3231
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003232static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003233{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003234 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003235 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003236 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003237
3238 BT_DBG("%s", hdev->name);
3239
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003240 if (!hci_conn_num(hdev, LE_LINK))
3241 return;
3242
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003243 if (!test_bit(HCI_RAW, &hdev->flags)) {
3244 /* LE tx timeout must be longer than maximum
3245 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003246 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003247 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003248 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003249 }
3250
3251 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003252 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003253 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003254 u32 priority = (skb_peek(&chan->data_q))->priority;
3255 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003256 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003257 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003258
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003259 /* Stop if priority has changed */
3260 if (skb->priority < priority)
3261 break;
3262
3263 skb = skb_dequeue(&chan->data_q);
3264
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003265 hci_send_frame(skb);
3266 hdev->le_last_tx = jiffies;
3267
3268 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003269 chan->sent++;
3270 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003271 }
3272 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003273
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003274 if (hdev->le_pkts)
3275 hdev->le_cnt = cnt;
3276 else
3277 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003278
3279 if (cnt != tmp)
3280 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003281}
3282
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003283static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003284{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003285 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 struct sk_buff *skb;
3287
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003288 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003289 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
3291 /* Schedule queues and send stuff to HCI driver */
3292
3293 hci_sched_acl(hdev);
3294
3295 hci_sched_sco(hdev);
3296
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003297 hci_sched_esco(hdev);
3298
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003299 hci_sched_le(hdev);
3300
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301 /* Send next queued raw (unknown type) packet */
3302 while ((skb = skb_dequeue(&hdev->raw_q)))
3303 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304}
3305
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003306/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003307
3308/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003309static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310{
3311 struct hci_acl_hdr *hdr = (void *) skb->data;
3312 struct hci_conn *conn;
3313 __u16 handle, flags;
3314
3315 skb_pull(skb, HCI_ACL_HDR_SIZE);
3316
3317 handle = __le16_to_cpu(hdr->handle);
3318 flags = hci_flags(handle);
3319 handle = hci_handle(handle);
3320
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003321 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003322 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003323
3324 hdev->stat.acl_rx++;
3325
3326 hci_dev_lock(hdev);
3327 conn = hci_conn_hash_lookup_handle(hdev, handle);
3328 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003329
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003331 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003332
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003334 l2cap_recv_acldata(conn, skb, flags);
3335 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003337 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003338 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 }
3340
3341 kfree_skb(skb);
3342}
3343
3344/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003345static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346{
3347 struct hci_sco_hdr *hdr = (void *) skb->data;
3348 struct hci_conn *conn;
3349 __u16 handle;
3350
3351 skb_pull(skb, HCI_SCO_HDR_SIZE);
3352
3353 handle = __le16_to_cpu(hdr->handle);
3354
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003355 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003356
3357 hdev->stat.sco_rx++;
3358
3359 hci_dev_lock(hdev);
3360 conn = hci_conn_hash_lookup_handle(hdev, handle);
3361 hci_dev_unlock(hdev);
3362
3363 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003364 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003365 sco_recv_scodata(conn, skb);
3366 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003367 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003368 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003369 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003370 }
3371
3372 kfree_skb(skb);
3373}
3374
Johan Hedberg9238f362013-03-05 20:37:48 +02003375static bool hci_req_is_complete(struct hci_dev *hdev)
3376{
3377 struct sk_buff *skb;
3378
3379 skb = skb_peek(&hdev->cmd_q);
3380 if (!skb)
3381 return true;
3382
3383 return bt_cb(skb)->req.start;
3384}
3385
Johan Hedberg42c6b122013-03-05 20:37:49 +02003386static void hci_resend_last(struct hci_dev *hdev)
3387{
3388 struct hci_command_hdr *sent;
3389 struct sk_buff *skb;
3390 u16 opcode;
3391
3392 if (!hdev->sent_cmd)
3393 return;
3394
3395 sent = (void *) hdev->sent_cmd->data;
3396 opcode = __le16_to_cpu(sent->opcode);
3397 if (opcode == HCI_OP_RESET)
3398 return;
3399
3400 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3401 if (!skb)
3402 return;
3403
3404 skb_queue_head(&hdev->cmd_q, skb);
3405 queue_work(hdev->workqueue, &hdev->cmd_work);
3406}
3407
Johan Hedberg9238f362013-03-05 20:37:48 +02003408void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3409{
3410 hci_req_complete_t req_complete = NULL;
3411 struct sk_buff *skb;
3412 unsigned long flags;
3413
3414 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3415
Johan Hedberg42c6b122013-03-05 20:37:49 +02003416 /* If the completed command doesn't match the last one that was
3417 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003418 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003419 if (!hci_sent_cmd_data(hdev, opcode)) {
3420 /* Some CSR based controllers generate a spontaneous
3421 * reset complete event during init and any pending
3422 * command will never be completed. In such a case we
3423 * need to resend whatever was the last sent
3424 * command.
3425 */
3426 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3427 hci_resend_last(hdev);
3428
Johan Hedberg9238f362013-03-05 20:37:48 +02003429 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003430 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003431
3432 /* If the command succeeded and there's still more commands in
3433 * this request the request is not yet complete.
3434 */
3435 if (!status && !hci_req_is_complete(hdev))
3436 return;
3437
3438 /* If this was the last command in a request the complete
3439 * callback would be found in hdev->sent_cmd instead of the
3440 * command queue (hdev->cmd_q).
3441 */
3442 if (hdev->sent_cmd) {
3443 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3444 if (req_complete)
3445 goto call_complete;
3446 }
3447
3448 /* Remove all pending commands belonging to this request */
3449 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3450 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3451 if (bt_cb(skb)->req.start) {
3452 __skb_queue_head(&hdev->cmd_q, skb);
3453 break;
3454 }
3455
3456 req_complete = bt_cb(skb)->req.complete;
3457 kfree_skb(skb);
3458 }
3459 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3460
3461call_complete:
3462 if (req_complete)
3463 req_complete(hdev, status);
3464}
3465
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003466static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003468 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003469 struct sk_buff *skb;
3470
3471 BT_DBG("%s", hdev->name);
3472
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003474 /* Send copy to monitor */
3475 hci_send_to_monitor(hdev, skb);
3476
Linus Torvalds1da177e2005-04-16 15:20:36 -07003477 if (atomic_read(&hdev->promisc)) {
3478 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003479 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 }
3481
3482 if (test_bit(HCI_RAW, &hdev->flags)) {
3483 kfree_skb(skb);
3484 continue;
3485 }
3486
3487 if (test_bit(HCI_INIT, &hdev->flags)) {
3488 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003489 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 case HCI_ACLDATA_PKT:
3491 case HCI_SCODATA_PKT:
3492 kfree_skb(skb);
3493 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003494 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 }
3496
3497 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003498 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003499 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003500 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003501 hci_event_packet(hdev, skb);
3502 break;
3503
3504 case HCI_ACLDATA_PKT:
3505 BT_DBG("%s ACL data packet", hdev->name);
3506 hci_acldata_packet(hdev, skb);
3507 break;
3508
3509 case HCI_SCODATA_PKT:
3510 BT_DBG("%s SCO data packet", hdev->name);
3511 hci_scodata_packet(hdev, skb);
3512 break;
3513
3514 default:
3515 kfree_skb(skb);
3516 break;
3517 }
3518 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519}
3520
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003521static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003523 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 struct sk_buff *skb;
3525
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003526 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3527 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003528
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003530 if (atomic_read(&hdev->cmd_cnt)) {
3531 skb = skb_dequeue(&hdev->cmd_q);
3532 if (!skb)
3533 return;
3534
Wei Yongjun7585b972009-02-25 18:29:52 +08003535 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003536
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003537 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3538 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003539 atomic_dec(&hdev->cmd_cnt);
3540 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003541 if (test_bit(HCI_RESET, &hdev->flags))
3542 del_timer(&hdev->cmd_timer);
3543 else
3544 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003545 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003546 } else {
3547 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003548 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003549 }
3550 }
3551}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003552
3553int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3554{
3555 /* General inquiry access code (GIAC) */
3556 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3557 struct hci_cp_inquiry cp;
3558
3559 BT_DBG("%s", hdev->name);
3560
3561 if (test_bit(HCI_INQUIRY, &hdev->flags))
3562 return -EINPROGRESS;
3563
Johan Hedberg46632622012-01-02 16:06:08 +02003564 inquiry_cache_flush(hdev);
3565
Andre Guedes2519a1f2011-11-07 11:45:24 -03003566 memset(&cp, 0, sizeof(cp));
3567 memcpy(&cp.lap, lap, sizeof(cp.lap));
3568 cp.length = length;
3569
3570 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3571}
Andre Guedes023d50492011-11-04 14:16:52 -03003572
3573int hci_cancel_inquiry(struct hci_dev *hdev)
3574{
3575 BT_DBG("%s", hdev->name);
3576
3577 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003578 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003579
3580 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3581}
Andre Guedes31f79562012-04-24 21:02:53 -03003582
3583u8 bdaddr_to_le(u8 bdaddr_type)
3584{
3585 switch (bdaddr_type) {
3586 case BDADDR_LE_PUBLIC:
3587 return ADDR_LE_DEV_PUBLIC;
3588
3589 default:
3590 /* Fallback to LE Random address type */
3591 return ADDR_LE_DEV_RANDOM;
3592 }
3593}