blob: 0d5bc246b607e517740bc0064bd8f75ef7bbcd3a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700457 } else {
458 /* Use a different default for LE-only devices */
459 memset(events, 0, sizeof(events));
460 events[0] |= 0x10; /* Disconnection Complete */
461 events[0] |= 0x80; /* Encryption Change */
462 events[1] |= 0x08; /* Read Remote Version Information Complete */
463 events[1] |= 0x20; /* Command Complete */
464 events[1] |= 0x40; /* Command Status */
465 events[1] |= 0x80; /* Hardware Error */
466 events[2] |= 0x04; /* Number of Completed Packets */
467 events[3] |= 0x02; /* Data Buffer Overflow */
468 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469 }
470
471 if (lmp_inq_rssi_capable(hdev))
472 events[4] |= 0x02; /* Inquiry Result with RSSI */
473
474 if (lmp_sniffsubr_capable(hdev))
475 events[5] |= 0x20; /* Sniff Subrating */
476
477 if (lmp_pause_enc_capable(hdev))
478 events[5] |= 0x80; /* Encryption Key Refresh Complete */
479
480 if (lmp_ext_inq_capable(hdev))
481 events[5] |= 0x40; /* Extended Inquiry Result */
482
483 if (lmp_no_flush_capable(hdev))
484 events[7] |= 0x01; /* Enhanced Flush Complete */
485
486 if (lmp_lsto_capable(hdev))
487 events[6] |= 0x80; /* Link Supervision Timeout Changed */
488
489 if (lmp_ssp_capable(hdev)) {
490 events[6] |= 0x01; /* IO Capability Request */
491 events[6] |= 0x02; /* IO Capability Response */
492 events[6] |= 0x04; /* User Confirmation Request */
493 events[6] |= 0x08; /* User Passkey Request */
494 events[6] |= 0x10; /* Remote OOB Data Request */
495 events[6] |= 0x20; /* Simple Pairing Complete */
496 events[7] |= 0x04; /* User Passkey Notification */
497 events[7] |= 0x08; /* Keypress Notification */
498 events[7] |= 0x10; /* Remote Host Supported
499 * Features Notification
500 */
501 }
502
503 if (lmp_le_capable(hdev))
504 events[7] |= 0x20; /* LE Meta-Event */
505
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200507
508 if (lmp_le_capable(hdev)) {
509 memset(events, 0, sizeof(events));
510 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
512 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 }
514}
515
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 struct hci_dev *hdev = req->hdev;
519
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200525
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300528 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
529 * local supported commands HCI command.
530 */
531 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (lmp_ssp_capable(hdev)) {
535 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
536 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200537 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
538 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539 } else {
540 struct hci_cp_write_eir cp;
541
542 memset(hdev->eir, 0, sizeof(hdev->eir));
543 memset(&cp, 0, sizeof(cp));
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200546 }
547 }
548
549 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200550 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551
552 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_ext_feat_capable(hdev)) {
556 struct hci_cp_read_local_ext_features cp;
557
558 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
560 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 }
562
563 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
564 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200565 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
566 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567 }
568}
569
Johan Hedberg42c6b122013-03-05 20:37:49 +0200570static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200571{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 struct hci_cp_write_def_link_policy cp;
574 u16 link_policy = 0;
575
576 if (lmp_rswitch_capable(hdev))
577 link_policy |= HCI_LP_RSWITCH;
578 if (lmp_hold_capable(hdev))
579 link_policy |= HCI_LP_HOLD;
580 if (lmp_sniff_capable(hdev))
581 link_policy |= HCI_LP_SNIFF;
582 if (lmp_park_capable(hdev))
583 link_policy |= HCI_LP_PARK;
584
585 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200592 struct hci_cp_write_le_host_supported cp;
593
Johan Hedbergc73eee92013-04-19 18:35:21 +0300594 /* LE-only devices do not support explicit enablement */
595 if (!lmp_bredr_capable(hdev))
596 return;
597
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598 memset(&cp, 0, sizeof(cp));
599
600 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
601 cp.le = 0x01;
602 cp.simul = lmp_le_br_capable(hdev);
603 }
604
605 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200606 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
607 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608}
609
Johan Hedberg42c6b122013-03-05 20:37:49 +0200610static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200611{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300613 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100615 /* Some Broadcom based Bluetooth controllers do not support the
616 * Delete Stored Link Key command. They are clearly indicating its
617 * absence in the bit mask of supported commands.
618 *
619 * Check the supported commands and only if the the command is marked
620 * as supported send it. If not supported assume that the controller
621 * does not have actual support for stored link keys which makes this
622 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700623 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300624 if (hdev->commands[6] & 0x80) {
625 struct hci_cp_delete_stored_link_key cp;
626
627 bacpy(&cp.bdaddr, BDADDR_ANY);
628 cp.delete_all = 0x01;
629 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
630 sizeof(cp), &cp);
631 }
632
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500636 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200637 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500638 hci_update_ad(req);
639 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300640
641 /* Read features beyond page 1 if available */
642 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
643 struct hci_cp_read_local_ext_features cp;
644
645 cp.page = p;
646 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
647 sizeof(cp), &cp);
648 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
651static int __hci_init(struct hci_dev *hdev)
652{
653 int err;
654
655 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
656 if (err < 0)
657 return err;
658
659 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
660 * BR/EDR/LE type controllers. AMP controllers only need the
661 * first stage init.
662 */
663 if (hdev->dev_type != HCI_BREDR)
664 return 0;
665
666 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
667 if (err < 0)
668 return err;
669
670 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
671}
672
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674{
675 __u8 scan = opt;
676
Johan Hedberg42c6b122013-03-05 20:37:49 +0200677 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678
679 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200680 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681}
682
Johan Hedberg42c6b122013-03-05 20:37:49 +0200683static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684{
685 __u8 auth = opt;
686
Johan Hedberg42c6b122013-03-05 20:37:49 +0200687 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
689 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200690 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691}
692
Johan Hedberg42c6b122013-03-05 20:37:49 +0200693static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694{
695 __u8 encrypt = opt;
696
Johan Hedberg42c6b122013-03-05 20:37:49 +0200697 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200699 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701}
702
Johan Hedberg42c6b122013-03-05 20:37:49 +0200703static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200704{
705 __le16 policy = cpu_to_le16(opt);
706
Johan Hedberg42c6b122013-03-05 20:37:49 +0200707 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200708
709 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200710 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200711}
712
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900713/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 * Device is held on return. */
715struct hci_dev *hci_dev_get(int index)
716{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200717 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718
719 BT_DBG("%d", index);
720
721 if (index < 0)
722 return NULL;
723
724 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200725 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (d->id == index) {
727 hdev = hci_dev_hold(d);
728 break;
729 }
730 }
731 read_unlock(&hci_dev_list_lock);
732 return hdev;
733}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734
735/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200736
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200737bool hci_discovery_active(struct hci_dev *hdev)
738{
739 struct discovery_state *discov = &hdev->discovery;
740
Andre Guedes6fbe1952012-02-03 17:47:58 -0300741 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300742 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300743 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200744 return true;
745
Andre Guedes6fbe1952012-02-03 17:47:58 -0300746 default:
747 return false;
748 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200749}
750
Johan Hedbergff9ef572012-01-04 14:23:45 +0200751void hci_discovery_set_state(struct hci_dev *hdev, int state)
752{
753 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
754
755 if (hdev->discovery.state == state)
756 return;
757
758 switch (state) {
759 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300760 if (hdev->discovery.state != DISCOVERY_STARTING)
761 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200762 break;
763 case DISCOVERY_STARTING:
764 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300765 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200766 mgmt_discovering(hdev, 1);
767 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200768 case DISCOVERY_RESOLVING:
769 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200770 case DISCOVERY_STOPPING:
771 break;
772 }
773
774 hdev->discovery.state = state;
775}
776
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300777void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778{
Johan Hedberg30883512012-01-04 14:16:21 +0200779 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200780 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry_safe(p, n, &cache->all, all) {
783 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200786
787 INIT_LIST_HEAD(&cache->unknown);
788 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789}
790
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300791struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
792 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793{
Johan Hedberg30883512012-01-04 14:16:21 +0200794 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 struct inquiry_entry *e;
796
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300797 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200801 return e;
802 }
803
804 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805}
806
Johan Hedberg561aafb2012-01-04 13:31:59 +0200807struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300808 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200809{
Johan Hedberg30883512012-01-04 14:16:21 +0200810 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200811 struct inquiry_entry *e;
812
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300813 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200814
815 list_for_each_entry(e, &cache->unknown, list) {
816 if (!bacmp(&e->data.bdaddr, bdaddr))
817 return e;
818 }
819
820 return NULL;
821}
822
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200823struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300824 bdaddr_t *bdaddr,
825 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200826{
827 struct discovery_state *cache = &hdev->discovery;
828 struct inquiry_entry *e;
829
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300830 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200831
832 list_for_each_entry(e, &cache->resolve, list) {
833 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
834 return e;
835 if (!bacmp(&e->data.bdaddr, bdaddr))
836 return e;
837 }
838
839 return NULL;
840}
841
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300843 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200844{
845 struct discovery_state *cache = &hdev->discovery;
846 struct list_head *pos = &cache->resolve;
847 struct inquiry_entry *p;
848
849 list_del(&ie->list);
850
851 list_for_each_entry(p, &cache->resolve, list) {
852 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300853 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200854 break;
855 pos = &p->list;
856 }
857
858 list_add(&ie->list, pos);
859}
860
Johan Hedberg31754052012-01-04 13:39:52 +0200861bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300862 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863{
Johan Hedberg30883512012-01-04 14:16:21 +0200864 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300867 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
Szymon Janc2b2fec42012-11-20 11:38:54 +0100869 hci_remove_remote_oob_data(hdev, &data->bdaddr);
870
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200871 if (ssp)
872 *ssp = data->ssp_mode;
873
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200874 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200875 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200876 if (ie->data.ssp_mode && ssp)
877 *ssp = true;
878
Johan Hedberga3d4e202012-01-09 00:53:02 +0200879 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300880 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200881 ie->data.rssi = data->rssi;
882 hci_inquiry_cache_update_resolve(hdev, ie);
883 }
884
Johan Hedberg561aafb2012-01-04 13:31:59 +0200885 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200886 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200887
Johan Hedberg561aafb2012-01-04 13:31:59 +0200888 /* Entry not in the cache. Add new one. */
889 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
890 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200891 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200892
893 list_add(&ie->all, &cache->all);
894
895 if (name_known) {
896 ie->name_state = NAME_KNOWN;
897 } else {
898 ie->name_state = NAME_NOT_KNOWN;
899 list_add(&ie->list, &cache->unknown);
900 }
901
902update:
903 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300904 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200905 ie->name_state = NAME_KNOWN;
906 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 }
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 memcpy(&ie->data, data, sizeof(*data));
910 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200912
913 if (ie->name_state == NAME_NOT_KNOWN)
914 return false;
915
916 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917}
918
919static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
920{
Johan Hedberg30883512012-01-04 14:16:21 +0200921 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct inquiry_info *info = (struct inquiry_info *) buf;
923 struct inquiry_entry *e;
924 int copied = 0;
925
Johan Hedberg561aafb2012-01-04 13:31:59 +0200926 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200928
929 if (copied >= num)
930 break;
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 bacpy(&info->bdaddr, &data->bdaddr);
933 info->pscan_rep_mode = data->pscan_rep_mode;
934 info->pscan_period_mode = data->pscan_period_mode;
935 info->pscan_mode = data->pscan_mode;
936 memcpy(info->dev_class, data->dev_class, 3);
937 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200938
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200940 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 }
942
943 BT_DBG("cache %p, copied %d", cache, copied);
944 return copied;
945}
946
Johan Hedberg42c6b122013-03-05 20:37:49 +0200947static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948{
949 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 struct hci_cp_inquiry cp;
952
953 BT_DBG("%s", hdev->name);
954
955 if (test_bit(HCI_INQUIRY, &hdev->flags))
956 return;
957
958 /* Start Inquiry */
959 memcpy(&cp.lap, &ir->lap, 3);
960 cp.length = ir->length;
961 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200962 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963}
964
Andre Guedes3e13fa12013-03-27 20:04:56 -0300965static int wait_inquiry(void *word)
966{
967 schedule();
968 return signal_pending(current);
969}
970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971int hci_inquiry(void __user *arg)
972{
973 __u8 __user *ptr = arg;
974 struct hci_inquiry_req ir;
975 struct hci_dev *hdev;
976 int err = 0, do_inquiry = 0, max_rsp;
977 long timeo;
978 __u8 *buf;
979
980 if (copy_from_user(&ir, ptr, sizeof(ir)))
981 return -EFAULT;
982
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200983 hdev = hci_dev_get(ir.dev_id);
984 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -ENODEV;
986
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300987 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900988 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300989 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300990 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 do_inquiry = 1;
992 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300993 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Marcel Holtmann04837f62006-07-03 10:02:33 +0200995 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200996
997 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200998 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
999 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 if (err < 0)
1001 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001002
1003 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1004 * cleared). If it is interrupted by a signal, return -EINTR.
1005 */
1006 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1007 TASK_INTERRUPTIBLE))
1008 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001009 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001011 /* for unlimited number of responses we will use buffer with
1012 * 255 entries
1013 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1015
1016 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1017 * copy it to the user space.
1018 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001019 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001020 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 err = -ENOMEM;
1022 goto done;
1023 }
1024
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001025 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001027 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028
1029 BT_DBG("num_rsp %d", ir.num_rsp);
1030
1031 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1032 ptr += sizeof(ir);
1033 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001034 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001036 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 err = -EFAULT;
1038
1039 kfree(buf);
1040
1041done:
1042 hci_dev_put(hdev);
1043 return err;
1044}
1045
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001046static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1047{
1048 u8 ad_len = 0, flags = 0;
1049 size_t name_len;
1050
1051 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1052 flags |= LE_AD_GENERAL;
1053
1054 if (!lmp_bredr_capable(hdev))
1055 flags |= LE_AD_NO_BREDR;
1056
1057 if (lmp_le_br_capable(hdev))
1058 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1059
1060 if (lmp_host_le_br_capable(hdev))
1061 flags |= LE_AD_SIM_LE_BREDR_HOST;
1062
1063 if (flags) {
1064 BT_DBG("adv flags 0x%02x", flags);
1065
1066 ptr[0] = 2;
1067 ptr[1] = EIR_FLAGS;
1068 ptr[2] = flags;
1069
1070 ad_len += 3;
1071 ptr += 3;
1072 }
1073
1074 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1075 ptr[0] = 2;
1076 ptr[1] = EIR_TX_POWER;
1077 ptr[2] = (u8) hdev->adv_tx_power;
1078
1079 ad_len += 3;
1080 ptr += 3;
1081 }
1082
1083 name_len = strlen(hdev->dev_name);
1084 if (name_len > 0) {
1085 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1086
1087 if (name_len > max_len) {
1088 name_len = max_len;
1089 ptr[1] = EIR_NAME_SHORT;
1090 } else
1091 ptr[1] = EIR_NAME_COMPLETE;
1092
1093 ptr[0] = name_len + 1;
1094
1095 memcpy(ptr + 2, hdev->dev_name, name_len);
1096
1097 ad_len += (name_len + 2);
1098 ptr += (name_len + 2);
1099 }
1100
1101 return ad_len;
1102}
1103
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001104void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001105{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001106 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001107 struct hci_cp_le_set_adv_data cp;
1108 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001110 if (!lmp_le_capable(hdev))
1111 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001112
1113 memset(&cp, 0, sizeof(cp));
1114
1115 len = create_ad(hdev, cp.data);
1116
1117 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001118 memcmp(cp.data, hdev->adv_data, len) == 0)
1119 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001120
1121 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1122 hdev->adv_data_len = len;
1123
1124 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001126 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001127}
1128
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129/* ---- HCI ioctl helpers ---- */
1130
1131int hci_dev_open(__u16 dev)
1132{
1133 struct hci_dev *hdev;
1134 int ret = 0;
1135
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001136 hdev = hci_dev_get(dev);
1137 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 return -ENODEV;
1139
1140 BT_DBG("%s %p", hdev->name, hdev);
1141
1142 hci_req_lock(hdev);
1143
Johan Hovold94324962012-03-15 14:48:41 +01001144 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1145 ret = -ENODEV;
1146 goto done;
1147 }
1148
Johan Hedberg5e130362013-09-13 08:58:17 +03001149 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001150 ret = -ERFKILL;
1151 goto done;
1152 }
1153
Linus Torvalds1da177e2005-04-16 15:20:36 -07001154 if (test_bit(HCI_UP, &hdev->flags)) {
1155 ret = -EALREADY;
1156 goto done;
1157 }
1158
Linus Torvalds1da177e2005-04-16 15:20:36 -07001159 if (hdev->open(hdev)) {
1160 ret = -EIO;
1161 goto done;
1162 }
1163
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001164 atomic_set(&hdev->cmd_cnt, 1);
1165 set_bit(HCI_INIT, &hdev->flags);
1166
1167 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1168 ret = hdev->setup(hdev);
1169
1170 if (!ret) {
1171 /* Treat all non BR/EDR controllers as raw devices if
1172 * enable_hs is not set.
1173 */
1174 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1175 set_bit(HCI_RAW, &hdev->flags);
1176
1177 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1178 set_bit(HCI_RAW, &hdev->flags);
1179
1180 if (!test_bit(HCI_RAW, &hdev->flags))
1181 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 }
1183
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001184 clear_bit(HCI_INIT, &hdev->flags);
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 if (!ret) {
1187 hci_dev_hold(hdev);
1188 set_bit(HCI_UP, &hdev->flags);
1189 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001190 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1191 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001192 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001193 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001194 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001195 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001196 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001198 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001199 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001200 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 skb_queue_purge(&hdev->cmd_q);
1203 skb_queue_purge(&hdev->rx_q);
1204
1205 if (hdev->flush)
1206 hdev->flush(hdev);
1207
1208 if (hdev->sent_cmd) {
1209 kfree_skb(hdev->sent_cmd);
1210 hdev->sent_cmd = NULL;
1211 }
1212
1213 hdev->close(hdev);
1214 hdev->flags = 0;
1215 }
1216
1217done:
1218 hci_req_unlock(hdev);
1219 hci_dev_put(hdev);
1220 return ret;
1221}
1222
1223static int hci_dev_do_close(struct hci_dev *hdev)
1224{
1225 BT_DBG("%s %p", hdev->name, hdev);
1226
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001227 cancel_delayed_work(&hdev->power_off);
1228
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229 hci_req_cancel(hdev, ENODEV);
1230 hci_req_lock(hdev);
1231
1232 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001233 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 hci_req_unlock(hdev);
1235 return 0;
1236 }
1237
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001238 /* Flush RX and TX works */
1239 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001240 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001242 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001243 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001244 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001245 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001246 }
1247
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001248 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001249 cancel_delayed_work(&hdev->service_cache);
1250
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001251 cancel_delayed_work_sync(&hdev->le_scan_disable);
1252
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001253 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001254 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001256 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 hci_notify(hdev, HCI_DEV_DOWN);
1259
1260 if (hdev->flush)
1261 hdev->flush(hdev);
1262
1263 /* Reset device */
1264 skb_queue_purge(&hdev->cmd_q);
1265 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001266 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001267 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001268 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001269 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 clear_bit(HCI_INIT, &hdev->flags);
1271 }
1272
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001273 /* flush cmd work */
1274 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 /* Drop queues */
1277 skb_queue_purge(&hdev->rx_q);
1278 skb_queue_purge(&hdev->cmd_q);
1279 skb_queue_purge(&hdev->raw_q);
1280
1281 /* Drop last sent command */
1282 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001283 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284 kfree_skb(hdev->sent_cmd);
1285 hdev->sent_cmd = NULL;
1286 }
1287
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001288 kfree_skb(hdev->recv_evt);
1289 hdev->recv_evt = NULL;
1290
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 /* After this point our queues are empty
1292 * and no tasks are scheduled. */
1293 hdev->close(hdev);
1294
Johan Hedberg35b973c2013-03-15 17:06:59 -05001295 /* Clear flags */
1296 hdev->flags = 0;
1297 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1298
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001299 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1300 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001301 hci_dev_lock(hdev);
1302 mgmt_powered(hdev, 0);
1303 hci_dev_unlock(hdev);
1304 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001305
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001306 /* Controller radio is available but is currently powered down */
1307 hdev->amp_status = 0;
1308
Johan Hedberge59fda82012-02-22 18:11:53 +02001309 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001310 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 hci_req_unlock(hdev);
1313
1314 hci_dev_put(hdev);
1315 return 0;
1316}
1317
1318int hci_dev_close(__u16 dev)
1319{
1320 struct hci_dev *hdev;
1321 int err;
1322
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001323 hdev = hci_dev_get(dev);
1324 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001325 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001326
1327 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1328 cancel_delayed_work(&hdev->power_off);
1329
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_dev_put(hdev);
1333 return err;
1334}
1335
1336int hci_dev_reset(__u16 dev)
1337{
1338 struct hci_dev *hdev;
1339 int ret = 0;
1340
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001341 hdev = hci_dev_get(dev);
1342 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343 return -ENODEV;
1344
1345 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 if (!test_bit(HCI_UP, &hdev->flags))
1348 goto done;
1349
1350 /* Drop queues */
1351 skb_queue_purge(&hdev->rx_q);
1352 skb_queue_purge(&hdev->cmd_q);
1353
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001354 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001355 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001357 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358
1359 if (hdev->flush)
1360 hdev->flush(hdev);
1361
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001362 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001363 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364
1365 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001366 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367
1368done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 hci_req_unlock(hdev);
1370 hci_dev_put(hdev);
1371 return ret;
1372}
1373
1374int hci_dev_reset_stat(__u16 dev)
1375{
1376 struct hci_dev *hdev;
1377 int ret = 0;
1378
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001379 hdev = hci_dev_get(dev);
1380 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381 return -ENODEV;
1382
1383 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1384
1385 hci_dev_put(hdev);
1386
1387 return ret;
1388}
1389
1390int hci_dev_cmd(unsigned int cmd, void __user *arg)
1391{
1392 struct hci_dev *hdev;
1393 struct hci_dev_req dr;
1394 int err = 0;
1395
1396 if (copy_from_user(&dr, arg, sizeof(dr)))
1397 return -EFAULT;
1398
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001399 hdev = hci_dev_get(dr.dev_id);
1400 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001401 return -ENODEV;
1402
1403 switch (cmd) {
1404 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
1409 case HCISETENCRYPT:
1410 if (!lmp_encrypt_capable(hdev)) {
1411 err = -EOPNOTSUPP;
1412 break;
1413 }
1414
1415 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1416 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001417 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1418 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 if (err)
1420 break;
1421 }
1422
Johan Hedberg01178cd2013-03-05 20:37:41 +02001423 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1424 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 break;
1426
1427 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001428 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1429 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 break;
1431
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001432 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001433 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1434 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001435 break;
1436
1437 case HCISETLINKMODE:
1438 hdev->link_mode = ((__u16) dr.dev_opt) &
1439 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1440 break;
1441
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 case HCISETPTYPE:
1443 hdev->pkt_type = (__u16) dr.dev_opt;
1444 break;
1445
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001447 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1448 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 break;
1450
1451 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001452 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1453 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454 break;
1455
1456 default:
1457 err = -EINVAL;
1458 break;
1459 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 hci_dev_put(hdev);
1462 return err;
1463}
1464
1465int hci_get_dev_list(void __user *arg)
1466{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001467 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 struct hci_dev_list_req *dl;
1469 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001470 int n = 0, size, err;
1471 __u16 dev_num;
1472
1473 if (get_user(dev_num, (__u16 __user *) arg))
1474 return -EFAULT;
1475
1476 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1477 return -EINVAL;
1478
1479 size = sizeof(*dl) + dev_num * sizeof(*dr);
1480
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001481 dl = kzalloc(size, GFP_KERNEL);
1482 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 return -ENOMEM;
1484
1485 dr = dl->dev_req;
1486
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001487 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001488 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001489 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001490 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001491
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001492 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1493 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001494
Linus Torvalds1da177e2005-04-16 15:20:36 -07001495 (dr + n)->dev_id = hdev->id;
1496 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001497
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 if (++n >= dev_num)
1499 break;
1500 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001501 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001502
1503 dl->dev_num = n;
1504 size = sizeof(*dl) + n * sizeof(*dr);
1505
1506 err = copy_to_user(arg, dl, size);
1507 kfree(dl);
1508
1509 return err ? -EFAULT : 0;
1510}
1511
1512int hci_get_dev_info(void __user *arg)
1513{
1514 struct hci_dev *hdev;
1515 struct hci_dev_info di;
1516 int err = 0;
1517
1518 if (copy_from_user(&di, arg, sizeof(di)))
1519 return -EFAULT;
1520
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001521 hdev = hci_dev_get(di.dev_id);
1522 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 return -ENODEV;
1524
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001525 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001526 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001527
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001528 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1529 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 strcpy(di.name, hdev->name);
1532 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001533 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534 di.flags = hdev->flags;
1535 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001536 if (lmp_bredr_capable(hdev)) {
1537 di.acl_mtu = hdev->acl_mtu;
1538 di.acl_pkts = hdev->acl_pkts;
1539 di.sco_mtu = hdev->sco_mtu;
1540 di.sco_pkts = hdev->sco_pkts;
1541 } else {
1542 di.acl_mtu = hdev->le_mtu;
1543 di.acl_pkts = hdev->le_pkts;
1544 di.sco_mtu = 0;
1545 di.sco_pkts = 0;
1546 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 di.link_policy = hdev->link_policy;
1548 di.link_mode = hdev->link_mode;
1549
1550 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1551 memcpy(&di.features, &hdev->features, sizeof(di.features));
1552
1553 if (copy_to_user(arg, &di, sizeof(di)))
1554 err = -EFAULT;
1555
1556 hci_dev_put(hdev);
1557
1558 return err;
1559}
1560
1561/* ---- Interface to HCI drivers ---- */
1562
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001563static int hci_rfkill_set_block(void *data, bool blocked)
1564{
1565 struct hci_dev *hdev = data;
1566
1567 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1568
Johan Hedberg5e130362013-09-13 08:58:17 +03001569 if (blocked) {
1570 set_bit(HCI_RFKILLED, &hdev->dev_flags);
1571 hci_dev_do_close(hdev);
1572 } else {
1573 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
1574}
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001575
1576 return 0;
1577}
1578
1579static const struct rfkill_ops hci_rfkill_ops = {
1580 .set_block = hci_rfkill_set_block,
1581};
1582
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001583static void hci_power_on(struct work_struct *work)
1584{
1585 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001586 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001587
1588 BT_DBG("%s", hdev->name);
1589
Johan Hedberg96570ff2013-05-29 09:51:29 +03001590 err = hci_dev_open(hdev->id);
1591 if (err < 0) {
1592 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001593 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001594 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001595
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001596 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001597 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1598 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001599
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001600 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001601 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001602}
1603
1604static void hci_power_off(struct work_struct *work)
1605{
Johan Hedberg32435532011-11-07 22:16:04 +02001606 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001607 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001608
1609 BT_DBG("%s", hdev->name);
1610
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001611 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001612}
1613
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001614static void hci_discov_off(struct work_struct *work)
1615{
1616 struct hci_dev *hdev;
1617 u8 scan = SCAN_PAGE;
1618
1619 hdev = container_of(work, struct hci_dev, discov_off.work);
1620
1621 BT_DBG("%s", hdev->name);
1622
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001623 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001624
1625 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1626
1627 hdev->discov_timeout = 0;
1628
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001629 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001630}
1631
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001632int hci_uuids_clear(struct hci_dev *hdev)
1633{
Johan Hedberg48210022013-01-27 00:31:28 +02001634 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001635
Johan Hedberg48210022013-01-27 00:31:28 +02001636 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1637 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001638 kfree(uuid);
1639 }
1640
1641 return 0;
1642}
1643
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001644int hci_link_keys_clear(struct hci_dev *hdev)
1645{
1646 struct list_head *p, *n;
1647
1648 list_for_each_safe(p, n, &hdev->link_keys) {
1649 struct link_key *key;
1650
1651 key = list_entry(p, struct link_key, list);
1652
1653 list_del(p);
1654 kfree(key);
1655 }
1656
1657 return 0;
1658}
1659
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001660int hci_smp_ltks_clear(struct hci_dev *hdev)
1661{
1662 struct smp_ltk *k, *tmp;
1663
1664 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1665 list_del(&k->list);
1666 kfree(k);
1667 }
1668
1669 return 0;
1670}
1671
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001672struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1673{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001674 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001675
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001676 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001677 if (bacmp(bdaddr, &k->bdaddr) == 0)
1678 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001679
1680 return NULL;
1681}
1682
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301683static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001684 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001685{
1686 /* Legacy key */
1687 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301688 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001689
1690 /* Debug keys are insecure so don't store them persistently */
1691 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301692 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001693
1694 /* Changed combination key and there's no previous one */
1695 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301696 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001697
1698 /* Security mode 3 case */
1699 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301700 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001701
1702 /* Neither local nor remote side had no-bonding as requirement */
1703 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301704 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001705
1706 /* Local side had dedicated bonding as requirement */
1707 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301708 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001709
1710 /* Remote side had dedicated bonding as requirement */
1711 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301712 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001713
1714 /* If none of the above criteria match, then don't store the key
1715 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301716 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001717}
1718
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001719struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001720{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001721 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001722
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001723 list_for_each_entry(k, &hdev->long_term_keys, list) {
1724 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001725 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001726 continue;
1727
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001728 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001729 }
1730
1731 return NULL;
1732}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001733
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001734struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001735 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001736{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001737 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001738
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001739 list_for_each_entry(k, &hdev->long_term_keys, list)
1740 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001741 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001742 return k;
1743
1744 return NULL;
1745}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001746
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001747int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001748 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001749{
1750 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301751 u8 old_key_type;
1752 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001753
1754 old_key = hci_find_link_key(hdev, bdaddr);
1755 if (old_key) {
1756 old_key_type = old_key->type;
1757 key = old_key;
1758 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001759 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1761 if (!key)
1762 return -ENOMEM;
1763 list_add(&key->list, &hdev->link_keys);
1764 }
1765
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001766 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001767
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001768 /* Some buggy controller combinations generate a changed
1769 * combination key for legacy pairing even when there's no
1770 * previous key */
1771 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001772 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001773 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001774 if (conn)
1775 conn->key_type = type;
1776 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001777
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001778 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001779 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001780 key->pin_len = pin_len;
1781
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001782 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001783 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001784 else
1785 key->type = type;
1786
Johan Hedberg4df378a2011-04-28 11:29:03 -07001787 if (!new_key)
1788 return 0;
1789
1790 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1791
Johan Hedberg744cf192011-11-08 20:40:14 +02001792 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001793
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301794 if (conn)
1795 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001796
1797 return 0;
1798}
1799
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001800int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001801 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001802 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001803{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001804 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001805
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001806 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1807 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001808
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001809 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1810 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001811 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001812 else {
1813 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001814 if (!key)
1815 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001816 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001817 }
1818
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001819 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001820 key->bdaddr_type = addr_type;
1821 memcpy(key->val, tk, sizeof(key->val));
1822 key->authenticated = authenticated;
1823 key->ediv = ediv;
1824 key->enc_size = enc_size;
1825 key->type = type;
1826 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001827
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001828 if (!new_key)
1829 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001830
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001831 if (type & HCI_SMP_LTK)
1832 mgmt_new_ltk(hdev, key, 1);
1833
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001834 return 0;
1835}
1836
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001837int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1838{
1839 struct link_key *key;
1840
1841 key = hci_find_link_key(hdev, bdaddr);
1842 if (!key)
1843 return -ENOENT;
1844
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001845 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001846
1847 list_del(&key->list);
1848 kfree(key);
1849
1850 return 0;
1851}
1852
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001853int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1854{
1855 struct smp_ltk *k, *tmp;
1856
1857 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1858 if (bacmp(bdaddr, &k->bdaddr))
1859 continue;
1860
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001861 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001862
1863 list_del(&k->list);
1864 kfree(k);
1865 }
1866
1867 return 0;
1868}
1869
Ville Tervo6bd32322011-02-16 16:32:41 +02001870/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001871static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001872{
1873 struct hci_dev *hdev = (void *) arg;
1874
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001875 if (hdev->sent_cmd) {
1876 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1877 u16 opcode = __le16_to_cpu(sent->opcode);
1878
1879 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1880 } else {
1881 BT_ERR("%s command tx timeout", hdev->name);
1882 }
1883
Ville Tervo6bd32322011-02-16 16:32:41 +02001884 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001885 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001886}
1887
Szymon Janc2763eda2011-03-22 13:12:22 +01001888struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001889 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001890{
1891 struct oob_data *data;
1892
1893 list_for_each_entry(data, &hdev->remote_oob_data, list)
1894 if (bacmp(bdaddr, &data->bdaddr) == 0)
1895 return data;
1896
1897 return NULL;
1898}
1899
1900int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1901{
1902 struct oob_data *data;
1903
1904 data = hci_find_remote_oob_data(hdev, bdaddr);
1905 if (!data)
1906 return -ENOENT;
1907
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001908 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001909
1910 list_del(&data->list);
1911 kfree(data);
1912
1913 return 0;
1914}
1915
1916int hci_remote_oob_data_clear(struct hci_dev *hdev)
1917{
1918 struct oob_data *data, *n;
1919
1920 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1921 list_del(&data->list);
1922 kfree(data);
1923 }
1924
1925 return 0;
1926}
1927
1928int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001929 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001930{
1931 struct oob_data *data;
1932
1933 data = hci_find_remote_oob_data(hdev, bdaddr);
1934
1935 if (!data) {
1936 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1937 if (!data)
1938 return -ENOMEM;
1939
1940 bacpy(&data->bdaddr, bdaddr);
1941 list_add(&data->list, &hdev->remote_oob_data);
1942 }
1943
1944 memcpy(data->hash, hash, sizeof(data->hash));
1945 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1946
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001947 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001948
1949 return 0;
1950}
1951
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001952struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001953{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001954 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001955
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001956 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957 if (bacmp(bdaddr, &b->bdaddr) == 0)
1958 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001959
1960 return NULL;
1961}
1962
1963int hci_blacklist_clear(struct hci_dev *hdev)
1964{
1965 struct list_head *p, *n;
1966
1967 list_for_each_safe(p, n, &hdev->blacklist) {
1968 struct bdaddr_list *b;
1969
1970 b = list_entry(p, struct bdaddr_list, list);
1971
1972 list_del(p);
1973 kfree(b);
1974 }
1975
1976 return 0;
1977}
1978
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001979int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001980{
1981 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001982
1983 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1984 return -EBADF;
1985
Antti Julku5e762442011-08-25 16:48:02 +03001986 if (hci_blacklist_lookup(hdev, bdaddr))
1987 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001988
1989 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001990 if (!entry)
1991 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001992
1993 bacpy(&entry->bdaddr, bdaddr);
1994
1995 list_add(&entry->list, &hdev->blacklist);
1996
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001997 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001998}
1999
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002000int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002001{
2002 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002003
Szymon Janc1ec918c2011-11-16 09:32:21 +01002004 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002005 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002006
2007 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002008 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002009 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002010
2011 list_del(&entry->list);
2012 kfree(entry);
2013
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002014 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002015}
2016
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002017static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002018{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002019 if (status) {
2020 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002021
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002022 hci_dev_lock(hdev);
2023 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2024 hci_dev_unlock(hdev);
2025 return;
2026 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002027}
2028
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002029static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002030{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002031 /* General inquiry access code (GIAC) */
2032 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2033 struct hci_request req;
2034 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002035 int err;
2036
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002037 if (status) {
2038 BT_ERR("Failed to disable LE scanning: status %d", status);
2039 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002040 }
2041
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002042 switch (hdev->discovery.type) {
2043 case DISCOV_TYPE_LE:
2044 hci_dev_lock(hdev);
2045 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2046 hci_dev_unlock(hdev);
2047 break;
2048
2049 case DISCOV_TYPE_INTERLEAVED:
2050 hci_req_init(&req, hdev);
2051
2052 memset(&cp, 0, sizeof(cp));
2053 memcpy(&cp.lap, lap, sizeof(cp.lap));
2054 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2055 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2056
2057 hci_dev_lock(hdev);
2058
2059 hci_inquiry_cache_flush(hdev);
2060
2061 err = hci_req_run(&req, inquiry_complete);
2062 if (err) {
2063 BT_ERR("Inquiry request failed: err %d", err);
2064 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2065 }
2066
2067 hci_dev_unlock(hdev);
2068 break;
2069 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002070}
2071
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002072static void le_scan_disable_work(struct work_struct *work)
2073{
2074 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002075 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002076 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002077 struct hci_request req;
2078 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002079
2080 BT_DBG("%s", hdev->name);
2081
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002082 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002083
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002084 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002085 cp.enable = LE_SCAN_DISABLE;
2086 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002087
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002088 err = hci_req_run(&req, le_scan_disable_work_complete);
2089 if (err)
2090 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002091}
2092
David Herrmann9be0dab2012-04-22 14:39:57 +02002093/* Alloc HCI device */
2094struct hci_dev *hci_alloc_dev(void)
2095{
2096 struct hci_dev *hdev;
2097
2098 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2099 if (!hdev)
2100 return NULL;
2101
David Herrmannb1b813d2012-04-22 14:39:58 +02002102 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2103 hdev->esco_type = (ESCO_HV1);
2104 hdev->link_mode = (HCI_LM_ACCEPT);
2105 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002106 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2107 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002108
David Herrmannb1b813d2012-04-22 14:39:58 +02002109 hdev->sniff_max_interval = 800;
2110 hdev->sniff_min_interval = 80;
2111
2112 mutex_init(&hdev->lock);
2113 mutex_init(&hdev->req_lock);
2114
2115 INIT_LIST_HEAD(&hdev->mgmt_pending);
2116 INIT_LIST_HEAD(&hdev->blacklist);
2117 INIT_LIST_HEAD(&hdev->uuids);
2118 INIT_LIST_HEAD(&hdev->link_keys);
2119 INIT_LIST_HEAD(&hdev->long_term_keys);
2120 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002121 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002122
2123 INIT_WORK(&hdev->rx_work, hci_rx_work);
2124 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2125 INIT_WORK(&hdev->tx_work, hci_tx_work);
2126 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002127
David Herrmannb1b813d2012-04-22 14:39:58 +02002128 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2129 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2130 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2131
David Herrmannb1b813d2012-04-22 14:39:58 +02002132 skb_queue_head_init(&hdev->rx_q);
2133 skb_queue_head_init(&hdev->cmd_q);
2134 skb_queue_head_init(&hdev->raw_q);
2135
2136 init_waitqueue_head(&hdev->req_wait_q);
2137
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002138 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002139
David Herrmannb1b813d2012-04-22 14:39:58 +02002140 hci_init_sysfs(hdev);
2141 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002142
2143 return hdev;
2144}
2145EXPORT_SYMBOL(hci_alloc_dev);
2146
2147/* Free HCI device */
2148void hci_free_dev(struct hci_dev *hdev)
2149{
David Herrmann9be0dab2012-04-22 14:39:57 +02002150 /* will free via device release */
2151 put_device(&hdev->dev);
2152}
2153EXPORT_SYMBOL(hci_free_dev);
2154
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155/* Register HCI device */
2156int hci_register_dev(struct hci_dev *hdev)
2157{
David Herrmannb1b813d2012-04-22 14:39:58 +02002158 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159
David Herrmann010666a2012-01-07 15:47:07 +01002160 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 return -EINVAL;
2162
Mat Martineau08add512011-11-02 16:18:36 -07002163 /* Do not allow HCI_AMP devices to register at index 0,
2164 * so the index can be used as the AMP controller ID.
2165 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002166 switch (hdev->dev_type) {
2167 case HCI_BREDR:
2168 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2169 break;
2170 case HCI_AMP:
2171 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2172 break;
2173 default:
2174 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002176
Sasha Levin3df92b32012-05-27 22:36:56 +02002177 if (id < 0)
2178 return id;
2179
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 sprintf(hdev->name, "hci%d", id);
2181 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002182
2183 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2184
Kees Cookd8537542013-07-03 15:04:57 -07002185 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2186 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002187 if (!hdev->workqueue) {
2188 error = -ENOMEM;
2189 goto err;
2190 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002191
Kees Cookd8537542013-07-03 15:04:57 -07002192 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2193 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002194 if (!hdev->req_workqueue) {
2195 destroy_workqueue(hdev->workqueue);
2196 error = -ENOMEM;
2197 goto err;
2198 }
2199
David Herrmann33ca9542011-10-08 14:58:49 +02002200 error = hci_add_sysfs(hdev);
2201 if (error < 0)
2202 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002204 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002205 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2206 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002207 if (hdev->rfkill) {
2208 if (rfkill_register(hdev->rfkill) < 0) {
2209 rfkill_destroy(hdev->rfkill);
2210 hdev->rfkill = NULL;
2211 }
2212 }
2213
Johan Hedberg5e130362013-09-13 08:58:17 +03002214 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2215 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2216
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002217 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002218
2219 if (hdev->dev_type != HCI_AMP)
2220 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2221
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002222 write_lock(&hci_dev_list_lock);
2223 list_add(&hdev->list, &hci_dev_list);
2224 write_unlock(&hci_dev_list_lock);
2225
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002227 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228
Johan Hedberg19202572013-01-14 22:33:51 +02002229 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002232
David Herrmann33ca9542011-10-08 14:58:49 +02002233err_wqueue:
2234 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002235 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002236err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002237 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002238
David Herrmann33ca9542011-10-08 14:58:49 +02002239 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240}
2241EXPORT_SYMBOL(hci_register_dev);
2242
2243/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002244void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245{
Sasha Levin3df92b32012-05-27 22:36:56 +02002246 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002247
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002248 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Johan Hovold94324962012-03-15 14:48:41 +01002250 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2251
Sasha Levin3df92b32012-05-27 22:36:56 +02002252 id = hdev->id;
2253
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002254 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002256 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257
2258 hci_dev_do_close(hdev);
2259
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302260 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002261 kfree_skb(hdev->reassembly[i]);
2262
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002263 cancel_work_sync(&hdev->power_on);
2264
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002265 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002266 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002267 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002268 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002269 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002270 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002271
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002272 /* mgmt_index_removed should take care of emptying the
2273 * pending list */
2274 BUG_ON(!list_empty(&hdev->mgmt_pending));
2275
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 hci_notify(hdev, HCI_DEV_UNREG);
2277
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002278 if (hdev->rfkill) {
2279 rfkill_unregister(hdev->rfkill);
2280 rfkill_destroy(hdev->rfkill);
2281 }
2282
David Herrmannce242972011-10-08 14:58:48 +02002283 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002284
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002285 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002286 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002287
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002288 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002289 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002290 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002291 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002292 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002293 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002294 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002295
David Herrmanndc946bd2012-01-07 15:47:24 +01002296 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002297
2298 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299}
2300EXPORT_SYMBOL(hci_unregister_dev);
2301
2302/* Suspend HCI device */
2303int hci_suspend_dev(struct hci_dev *hdev)
2304{
2305 hci_notify(hdev, HCI_DEV_SUSPEND);
2306 return 0;
2307}
2308EXPORT_SYMBOL(hci_suspend_dev);
2309
2310/* Resume HCI device */
2311int hci_resume_dev(struct hci_dev *hdev)
2312{
2313 hci_notify(hdev, HCI_DEV_RESUME);
2314 return 0;
2315}
2316EXPORT_SYMBOL(hci_resume_dev);
2317
Marcel Holtmann76bca882009-11-18 00:40:39 +01002318/* Receive frame from HCI drivers */
2319int hci_recv_frame(struct sk_buff *skb)
2320{
2321 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2322 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002323 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002324 kfree_skb(skb);
2325 return -ENXIO;
2326 }
2327
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002328 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002329 bt_cb(skb)->incoming = 1;
2330
2331 /* Time stamp */
2332 __net_timestamp(skb);
2333
Marcel Holtmann76bca882009-11-18 00:40:39 +01002334 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002335 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002336
Marcel Holtmann76bca882009-11-18 00:40:39 +01002337 return 0;
2338}
2339EXPORT_SYMBOL(hci_recv_frame);
2340
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302341static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002342 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302343{
2344 int len = 0;
2345 int hlen = 0;
2346 int remain = count;
2347 struct sk_buff *skb;
2348 struct bt_skb_cb *scb;
2349
2350 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002351 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302352 return -EILSEQ;
2353
2354 skb = hdev->reassembly[index];
2355
2356 if (!skb) {
2357 switch (type) {
2358 case HCI_ACLDATA_PKT:
2359 len = HCI_MAX_FRAME_SIZE;
2360 hlen = HCI_ACL_HDR_SIZE;
2361 break;
2362 case HCI_EVENT_PKT:
2363 len = HCI_MAX_EVENT_SIZE;
2364 hlen = HCI_EVENT_HDR_SIZE;
2365 break;
2366 case HCI_SCODATA_PKT:
2367 len = HCI_MAX_SCO_SIZE;
2368 hlen = HCI_SCO_HDR_SIZE;
2369 break;
2370 }
2371
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002372 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302373 if (!skb)
2374 return -ENOMEM;
2375
2376 scb = (void *) skb->cb;
2377 scb->expect = hlen;
2378 scb->pkt_type = type;
2379
2380 skb->dev = (void *) hdev;
2381 hdev->reassembly[index] = skb;
2382 }
2383
2384 while (count) {
2385 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002386 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302387
2388 memcpy(skb_put(skb, len), data, len);
2389
2390 count -= len;
2391 data += len;
2392 scb->expect -= len;
2393 remain = count;
2394
2395 switch (type) {
2396 case HCI_EVENT_PKT:
2397 if (skb->len == HCI_EVENT_HDR_SIZE) {
2398 struct hci_event_hdr *h = hci_event_hdr(skb);
2399 scb->expect = h->plen;
2400
2401 if (skb_tailroom(skb) < scb->expect) {
2402 kfree_skb(skb);
2403 hdev->reassembly[index] = NULL;
2404 return -ENOMEM;
2405 }
2406 }
2407 break;
2408
2409 case HCI_ACLDATA_PKT:
2410 if (skb->len == HCI_ACL_HDR_SIZE) {
2411 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2412 scb->expect = __le16_to_cpu(h->dlen);
2413
2414 if (skb_tailroom(skb) < scb->expect) {
2415 kfree_skb(skb);
2416 hdev->reassembly[index] = NULL;
2417 return -ENOMEM;
2418 }
2419 }
2420 break;
2421
2422 case HCI_SCODATA_PKT:
2423 if (skb->len == HCI_SCO_HDR_SIZE) {
2424 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2425 scb->expect = h->dlen;
2426
2427 if (skb_tailroom(skb) < scb->expect) {
2428 kfree_skb(skb);
2429 hdev->reassembly[index] = NULL;
2430 return -ENOMEM;
2431 }
2432 }
2433 break;
2434 }
2435
2436 if (scb->expect == 0) {
2437 /* Complete frame */
2438
2439 bt_cb(skb)->pkt_type = type;
2440 hci_recv_frame(skb);
2441
2442 hdev->reassembly[index] = NULL;
2443 return remain;
2444 }
2445 }
2446
2447 return remain;
2448}
2449
Marcel Holtmannef222012007-07-11 06:42:04 +02002450int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2451{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302452 int rem = 0;
2453
Marcel Holtmannef222012007-07-11 06:42:04 +02002454 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2455 return -EILSEQ;
2456
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002457 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002458 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302459 if (rem < 0)
2460 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002461
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302462 data += (count - rem);
2463 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002464 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002465
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302466 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002467}
2468EXPORT_SYMBOL(hci_recv_fragment);
2469
Suraj Sumangala99811512010-07-14 13:02:19 +05302470#define STREAM_REASSEMBLY 0
2471
2472int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2473{
2474 int type;
2475 int rem = 0;
2476
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002477 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302478 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2479
2480 if (!skb) {
2481 struct { char type; } *pkt;
2482
2483 /* Start of the frame */
2484 pkt = data;
2485 type = pkt->type;
2486
2487 data++;
2488 count--;
2489 } else
2490 type = bt_cb(skb)->pkt_type;
2491
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002492 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002493 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302494 if (rem < 0)
2495 return rem;
2496
2497 data += (count - rem);
2498 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002499 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302500
2501 return rem;
2502}
2503EXPORT_SYMBOL(hci_recv_stream_fragment);
2504
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505/* ---- Interface to upper protocols ---- */
2506
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507int hci_register_cb(struct hci_cb *cb)
2508{
2509 BT_DBG("%p name %s", cb, cb->name);
2510
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002511 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002513 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002514
2515 return 0;
2516}
2517EXPORT_SYMBOL(hci_register_cb);
2518
2519int hci_unregister_cb(struct hci_cb *cb)
2520{
2521 BT_DBG("%p name %s", cb, cb->name);
2522
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002523 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002525 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
2527 return 0;
2528}
2529EXPORT_SYMBOL(hci_unregister_cb);
2530
2531static int hci_send_frame(struct sk_buff *skb)
2532{
2533 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2534
2535 if (!hdev) {
2536 kfree_skb(skb);
2537 return -ENODEV;
2538 }
2539
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002540 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002542 /* Time stamp */
2543 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002545 /* Send copy to monitor */
2546 hci_send_to_monitor(hdev, skb);
2547
2548 if (atomic_read(&hdev->promisc)) {
2549 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002550 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002551 }
2552
2553 /* Get rid of skb owner, prior to sending to the driver. */
2554 skb_orphan(skb);
2555
2556 return hdev->send(skb);
2557}
2558
Johan Hedberg3119ae92013-03-05 20:37:44 +02002559void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2560{
2561 skb_queue_head_init(&req->cmd_q);
2562 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002563 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002564}
2565
2566int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2567{
2568 struct hci_dev *hdev = req->hdev;
2569 struct sk_buff *skb;
2570 unsigned long flags;
2571
2572 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2573
Andre Guedes5d73e032013-03-08 11:20:16 -03002574 /* If an error occured during request building, remove all HCI
2575 * commands queued on the HCI request queue.
2576 */
2577 if (req->err) {
2578 skb_queue_purge(&req->cmd_q);
2579 return req->err;
2580 }
2581
Johan Hedberg3119ae92013-03-05 20:37:44 +02002582 /* Do not allow empty requests */
2583 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002584 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002585
2586 skb = skb_peek_tail(&req->cmd_q);
2587 bt_cb(skb)->req.complete = complete;
2588
2589 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2590 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2591 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2592
2593 queue_work(hdev->workqueue, &hdev->cmd_work);
2594
2595 return 0;
2596}
2597
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002598static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002599 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600{
2601 int len = HCI_COMMAND_HDR_SIZE + plen;
2602 struct hci_command_hdr *hdr;
2603 struct sk_buff *skb;
2604
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002606 if (!skb)
2607 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608
2609 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002610 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 hdr->plen = plen;
2612
2613 if (plen)
2614 memcpy(skb_put(skb, plen), param, plen);
2615
2616 BT_DBG("skb len %d", skb->len);
2617
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002618 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002619 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002620
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002621 return skb;
2622}
2623
2624/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002625int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2626 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002627{
2628 struct sk_buff *skb;
2629
2630 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2631
2632 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2633 if (!skb) {
2634 BT_ERR("%s no memory for command", hdev->name);
2635 return -ENOMEM;
2636 }
2637
Johan Hedberg11714b32013-03-05 20:37:47 +02002638 /* Stand-alone HCI commands must be flaged as
2639 * single-command requests.
2640 */
2641 bt_cb(skb)->req.start = true;
2642
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002644 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002645
2646 return 0;
2647}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002648
Johan Hedberg71c76a12013-03-05 20:37:46 +02002649/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002650void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2651 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002652{
2653 struct hci_dev *hdev = req->hdev;
2654 struct sk_buff *skb;
2655
2656 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2657
Andre Guedes34739c12013-03-08 11:20:18 -03002658 /* If an error occured during request building, there is no point in
2659 * queueing the HCI command. We can simply return.
2660 */
2661 if (req->err)
2662 return;
2663
Johan Hedberg71c76a12013-03-05 20:37:46 +02002664 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2665 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002666 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2667 hdev->name, opcode);
2668 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002669 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002670 }
2671
2672 if (skb_queue_empty(&req->cmd_q))
2673 bt_cb(skb)->req.start = true;
2674
Johan Hedberg02350a72013-04-03 21:50:29 +03002675 bt_cb(skb)->req.event = event;
2676
Johan Hedberg71c76a12013-03-05 20:37:46 +02002677 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002678}
2679
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002680void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2681 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002682{
2683 hci_req_add_ev(req, opcode, plen, param, 0);
2684}
2685
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002687void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688{
2689 struct hci_command_hdr *hdr;
2690
2691 if (!hdev->sent_cmd)
2692 return NULL;
2693
2694 hdr = (void *) hdev->sent_cmd->data;
2695
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002696 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 return NULL;
2698
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002699 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2702}
2703
2704/* Send ACL data */
2705static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2706{
2707 struct hci_acl_hdr *hdr;
2708 int len = skb->len;
2709
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002710 skb_push(skb, HCI_ACL_HDR_SIZE);
2711 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002712 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002713 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2714 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715}
2716
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002717static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002718 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002720 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 struct hci_dev *hdev = conn->hdev;
2722 struct sk_buff *list;
2723
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002724 skb->len = skb_headlen(skb);
2725 skb->data_len = 0;
2726
2727 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002728
2729 switch (hdev->dev_type) {
2730 case HCI_BREDR:
2731 hci_add_acl_hdr(skb, conn->handle, flags);
2732 break;
2733 case HCI_AMP:
2734 hci_add_acl_hdr(skb, chan->handle, flags);
2735 break;
2736 default:
2737 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2738 return;
2739 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002740
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002741 list = skb_shinfo(skb)->frag_list;
2742 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 /* Non fragmented */
2744 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2745
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002746 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747 } else {
2748 /* Fragmented */
2749 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2750
2751 skb_shinfo(skb)->frag_list = NULL;
2752
2753 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002754 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002756 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002757
2758 flags &= ~ACL_START;
2759 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 do {
2761 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002762
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002764 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002765 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2768
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002769 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 } while (list);
2771
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002772 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002774}
2775
2776void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2777{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002778 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002779
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002780 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002781
2782 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002783
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002784 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002786 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002790void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791{
2792 struct hci_dev *hdev = conn->hdev;
2793 struct hci_sco_hdr hdr;
2794
2795 BT_DBG("%s len %d", hdev->name, skb->len);
2796
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002797 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 hdr.dlen = skb->len;
2799
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002800 skb_push(skb, HCI_SCO_HDR_SIZE);
2801 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002802 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
2804 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002805 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002806
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002808 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810
2811/* ---- HCI TX task (outgoing data) ---- */
2812
2813/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002814static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2815 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816{
2817 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002818 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002819 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002821 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002823
2824 rcu_read_lock();
2825
2826 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002827 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002829
2830 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2831 continue;
2832
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 num++;
2834
2835 if (c->sent < min) {
2836 min = c->sent;
2837 conn = c;
2838 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002839
2840 if (hci_conn_num(hdev, type) == num)
2841 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 }
2843
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002844 rcu_read_unlock();
2845
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002847 int cnt, q;
2848
2849 switch (conn->type) {
2850 case ACL_LINK:
2851 cnt = hdev->acl_cnt;
2852 break;
2853 case SCO_LINK:
2854 case ESCO_LINK:
2855 cnt = hdev->sco_cnt;
2856 break;
2857 case LE_LINK:
2858 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2859 break;
2860 default:
2861 cnt = 0;
2862 BT_ERR("Unknown link type");
2863 }
2864
2865 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 *quote = q ? q : 1;
2867 } else
2868 *quote = 0;
2869
2870 BT_DBG("conn %p quote %d", conn, *quote);
2871 return conn;
2872}
2873
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002874static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875{
2876 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002877 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
Ville Tervobae1f5d92011-02-10 22:38:53 -03002879 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002881 rcu_read_lock();
2882
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002884 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002885 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002886 BT_ERR("%s killing stalled connection %pMR",
2887 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002888 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 }
2890 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002891
2892 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893}
2894
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002895static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2896 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002897{
2898 struct hci_conn_hash *h = &hdev->conn_hash;
2899 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002900 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002901 struct hci_conn *conn;
2902 int cnt, q, conn_num = 0;
2903
2904 BT_DBG("%s", hdev->name);
2905
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002906 rcu_read_lock();
2907
2908 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002909 struct hci_chan *tmp;
2910
2911 if (conn->type != type)
2912 continue;
2913
2914 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2915 continue;
2916
2917 conn_num++;
2918
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002919 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002920 struct sk_buff *skb;
2921
2922 if (skb_queue_empty(&tmp->data_q))
2923 continue;
2924
2925 skb = skb_peek(&tmp->data_q);
2926 if (skb->priority < cur_prio)
2927 continue;
2928
2929 if (skb->priority > cur_prio) {
2930 num = 0;
2931 min = ~0;
2932 cur_prio = skb->priority;
2933 }
2934
2935 num++;
2936
2937 if (conn->sent < min) {
2938 min = conn->sent;
2939 chan = tmp;
2940 }
2941 }
2942
2943 if (hci_conn_num(hdev, type) == conn_num)
2944 break;
2945 }
2946
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002947 rcu_read_unlock();
2948
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002949 if (!chan)
2950 return NULL;
2951
2952 switch (chan->conn->type) {
2953 case ACL_LINK:
2954 cnt = hdev->acl_cnt;
2955 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002956 case AMP_LINK:
2957 cnt = hdev->block_cnt;
2958 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002959 case SCO_LINK:
2960 case ESCO_LINK:
2961 cnt = hdev->sco_cnt;
2962 break;
2963 case LE_LINK:
2964 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2965 break;
2966 default:
2967 cnt = 0;
2968 BT_ERR("Unknown link type");
2969 }
2970
2971 q = cnt / num;
2972 *quote = q ? q : 1;
2973 BT_DBG("chan %p quote %d", chan, *quote);
2974 return chan;
2975}
2976
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002977static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2978{
2979 struct hci_conn_hash *h = &hdev->conn_hash;
2980 struct hci_conn *conn;
2981 int num = 0;
2982
2983 BT_DBG("%s", hdev->name);
2984
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002985 rcu_read_lock();
2986
2987 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002988 struct hci_chan *chan;
2989
2990 if (conn->type != type)
2991 continue;
2992
2993 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2994 continue;
2995
2996 num++;
2997
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002998 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002999 struct sk_buff *skb;
3000
3001 if (chan->sent) {
3002 chan->sent = 0;
3003 continue;
3004 }
3005
3006 if (skb_queue_empty(&chan->data_q))
3007 continue;
3008
3009 skb = skb_peek(&chan->data_q);
3010 if (skb->priority >= HCI_PRIO_MAX - 1)
3011 continue;
3012
3013 skb->priority = HCI_PRIO_MAX - 1;
3014
3015 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003016 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003017 }
3018
3019 if (hci_conn_num(hdev, type) == num)
3020 break;
3021 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003022
3023 rcu_read_unlock();
3024
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003025}
3026
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003027static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3028{
3029 /* Calculate count of blocks used by this packet */
3030 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3031}
3032
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003033static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003034{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003035 if (!test_bit(HCI_RAW, &hdev->flags)) {
3036 /* ACL tx timeout must be longer than maximum
3037 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003038 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003039 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003040 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003042}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003043
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003044static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003045{
3046 unsigned int cnt = hdev->acl_cnt;
3047 struct hci_chan *chan;
3048 struct sk_buff *skb;
3049 int quote;
3050
3051 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003052
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003053 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003054 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003055 u32 priority = (skb_peek(&chan->data_q))->priority;
3056 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003057 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003058 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003059
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003060 /* Stop if priority has changed */
3061 if (skb->priority < priority)
3062 break;
3063
3064 skb = skb_dequeue(&chan->data_q);
3065
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003066 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003067 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003068
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069 hci_send_frame(skb);
3070 hdev->acl_last_tx = jiffies;
3071
3072 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003073 chan->sent++;
3074 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 }
3076 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003077
3078 if (cnt != hdev->acl_cnt)
3079 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003080}
3081
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003082static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003083{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003084 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003085 struct hci_chan *chan;
3086 struct sk_buff *skb;
3087 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003088 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003089
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003090 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003091
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003092 BT_DBG("%s", hdev->name);
3093
3094 if (hdev->dev_type == HCI_AMP)
3095 type = AMP_LINK;
3096 else
3097 type = ACL_LINK;
3098
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003099 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003100 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003101 u32 priority = (skb_peek(&chan->data_q))->priority;
3102 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3103 int blocks;
3104
3105 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003106 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003107
3108 /* Stop if priority has changed */
3109 if (skb->priority < priority)
3110 break;
3111
3112 skb = skb_dequeue(&chan->data_q);
3113
3114 blocks = __get_blocks(hdev, skb);
3115 if (blocks > hdev->block_cnt)
3116 return;
3117
3118 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003119 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003120
3121 hci_send_frame(skb);
3122 hdev->acl_last_tx = jiffies;
3123
3124 hdev->block_cnt -= blocks;
3125 quote -= blocks;
3126
3127 chan->sent += blocks;
3128 chan->conn->sent += blocks;
3129 }
3130 }
3131
3132 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003133 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003134}
3135
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003136static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003137{
3138 BT_DBG("%s", hdev->name);
3139
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003140 /* No ACL link over BR/EDR controller */
3141 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3142 return;
3143
3144 /* No AMP link over AMP controller */
3145 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003146 return;
3147
3148 switch (hdev->flow_ctl_mode) {
3149 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3150 hci_sched_acl_pkt(hdev);
3151 break;
3152
3153 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3154 hci_sched_acl_blk(hdev);
3155 break;
3156 }
3157}
3158
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003160static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003161{
3162 struct hci_conn *conn;
3163 struct sk_buff *skb;
3164 int quote;
3165
3166 BT_DBG("%s", hdev->name);
3167
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003168 if (!hci_conn_num(hdev, SCO_LINK))
3169 return;
3170
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3172 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3173 BT_DBG("skb %p len %d", skb, skb->len);
3174 hci_send_frame(skb);
3175
3176 conn->sent++;
3177 if (conn->sent == ~0)
3178 conn->sent = 0;
3179 }
3180 }
3181}
3182
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003183static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003184{
3185 struct hci_conn *conn;
3186 struct sk_buff *skb;
3187 int quote;
3188
3189 BT_DBG("%s", hdev->name);
3190
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003191 if (!hci_conn_num(hdev, ESCO_LINK))
3192 return;
3193
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003194 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3195 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003196 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3197 BT_DBG("skb %p len %d", skb, skb->len);
3198 hci_send_frame(skb);
3199
3200 conn->sent++;
3201 if (conn->sent == ~0)
3202 conn->sent = 0;
3203 }
3204 }
3205}
3206
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003207static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003208{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003209 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003210 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003211 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003212
3213 BT_DBG("%s", hdev->name);
3214
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003215 if (!hci_conn_num(hdev, LE_LINK))
3216 return;
3217
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003218 if (!test_bit(HCI_RAW, &hdev->flags)) {
3219 /* LE tx timeout must be longer than maximum
3220 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003221 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003222 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003223 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003224 }
3225
3226 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003227 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003228 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003229 u32 priority = (skb_peek(&chan->data_q))->priority;
3230 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003231 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003232 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003233
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003234 /* Stop if priority has changed */
3235 if (skb->priority < priority)
3236 break;
3237
3238 skb = skb_dequeue(&chan->data_q);
3239
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003240 hci_send_frame(skb);
3241 hdev->le_last_tx = jiffies;
3242
3243 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003244 chan->sent++;
3245 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003246 }
3247 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003248
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003249 if (hdev->le_pkts)
3250 hdev->le_cnt = cnt;
3251 else
3252 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003253
3254 if (cnt != tmp)
3255 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003256}
3257
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003258static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003260 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261 struct sk_buff *skb;
3262
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003263 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003264 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 /* Schedule queues and send stuff to HCI driver */
3267
3268 hci_sched_acl(hdev);
3269
3270 hci_sched_sco(hdev);
3271
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003272 hci_sched_esco(hdev);
3273
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003274 hci_sched_le(hdev);
3275
Linus Torvalds1da177e2005-04-16 15:20:36 -07003276 /* Send next queued raw (unknown type) packet */
3277 while ((skb = skb_dequeue(&hdev->raw_q)))
3278 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003279}
3280
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003281/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282
3283/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003284static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285{
3286 struct hci_acl_hdr *hdr = (void *) skb->data;
3287 struct hci_conn *conn;
3288 __u16 handle, flags;
3289
3290 skb_pull(skb, HCI_ACL_HDR_SIZE);
3291
3292 handle = __le16_to_cpu(hdr->handle);
3293 flags = hci_flags(handle);
3294 handle = hci_handle(handle);
3295
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003296 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003297 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
3299 hdev->stat.acl_rx++;
3300
3301 hci_dev_lock(hdev);
3302 conn = hci_conn_hash_lookup_handle(hdev, handle);
3303 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003304
Linus Torvalds1da177e2005-04-16 15:20:36 -07003305 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003306 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003307
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003309 l2cap_recv_acldata(conn, skb, flags);
3310 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003312 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003313 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 }
3315
3316 kfree_skb(skb);
3317}
3318
3319/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003320static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321{
3322 struct hci_sco_hdr *hdr = (void *) skb->data;
3323 struct hci_conn *conn;
3324 __u16 handle;
3325
3326 skb_pull(skb, HCI_SCO_HDR_SIZE);
3327
3328 handle = __le16_to_cpu(hdr->handle);
3329
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003330 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331
3332 hdev->stat.sco_rx++;
3333
3334 hci_dev_lock(hdev);
3335 conn = hci_conn_hash_lookup_handle(hdev, handle);
3336 hci_dev_unlock(hdev);
3337
3338 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003340 sco_recv_scodata(conn, skb);
3341 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003343 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003344 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 }
3346
3347 kfree_skb(skb);
3348}
3349
Johan Hedberg9238f362013-03-05 20:37:48 +02003350static bool hci_req_is_complete(struct hci_dev *hdev)
3351{
3352 struct sk_buff *skb;
3353
3354 skb = skb_peek(&hdev->cmd_q);
3355 if (!skb)
3356 return true;
3357
3358 return bt_cb(skb)->req.start;
3359}
3360
Johan Hedberg42c6b122013-03-05 20:37:49 +02003361static void hci_resend_last(struct hci_dev *hdev)
3362{
3363 struct hci_command_hdr *sent;
3364 struct sk_buff *skb;
3365 u16 opcode;
3366
3367 if (!hdev->sent_cmd)
3368 return;
3369
3370 sent = (void *) hdev->sent_cmd->data;
3371 opcode = __le16_to_cpu(sent->opcode);
3372 if (opcode == HCI_OP_RESET)
3373 return;
3374
3375 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3376 if (!skb)
3377 return;
3378
3379 skb_queue_head(&hdev->cmd_q, skb);
3380 queue_work(hdev->workqueue, &hdev->cmd_work);
3381}
3382
Johan Hedberg9238f362013-03-05 20:37:48 +02003383void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3384{
3385 hci_req_complete_t req_complete = NULL;
3386 struct sk_buff *skb;
3387 unsigned long flags;
3388
3389 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3390
Johan Hedberg42c6b122013-03-05 20:37:49 +02003391 /* If the completed command doesn't match the last one that was
3392 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003393 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003394 if (!hci_sent_cmd_data(hdev, opcode)) {
3395 /* Some CSR based controllers generate a spontaneous
3396 * reset complete event during init and any pending
3397 * command will never be completed. In such a case we
3398 * need to resend whatever was the last sent
3399 * command.
3400 */
3401 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3402 hci_resend_last(hdev);
3403
Johan Hedberg9238f362013-03-05 20:37:48 +02003404 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003405 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003406
3407 /* If the command succeeded and there's still more commands in
3408 * this request the request is not yet complete.
3409 */
3410 if (!status && !hci_req_is_complete(hdev))
3411 return;
3412
3413 /* If this was the last command in a request the complete
3414 * callback would be found in hdev->sent_cmd instead of the
3415 * command queue (hdev->cmd_q).
3416 */
3417 if (hdev->sent_cmd) {
3418 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003419
3420 if (req_complete) {
3421 /* We must set the complete callback to NULL to
3422 * avoid calling the callback more than once if
3423 * this function gets called again.
3424 */
3425 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3426
Johan Hedberg9238f362013-03-05 20:37:48 +02003427 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003428 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003429 }
3430
3431 /* Remove all pending commands belonging to this request */
3432 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3433 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3434 if (bt_cb(skb)->req.start) {
3435 __skb_queue_head(&hdev->cmd_q, skb);
3436 break;
3437 }
3438
3439 req_complete = bt_cb(skb)->req.complete;
3440 kfree_skb(skb);
3441 }
3442 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3443
3444call_complete:
3445 if (req_complete)
3446 req_complete(hdev, status);
3447}
3448
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003449static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003451 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452 struct sk_buff *skb;
3453
3454 BT_DBG("%s", hdev->name);
3455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003457 /* Send copy to monitor */
3458 hci_send_to_monitor(hdev, skb);
3459
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 if (atomic_read(&hdev->promisc)) {
3461 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003462 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 }
3464
3465 if (test_bit(HCI_RAW, &hdev->flags)) {
3466 kfree_skb(skb);
3467 continue;
3468 }
3469
3470 if (test_bit(HCI_INIT, &hdev->flags)) {
3471 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003472 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 case HCI_ACLDATA_PKT:
3474 case HCI_SCODATA_PKT:
3475 kfree_skb(skb);
3476 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003477 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478 }
3479
3480 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003481 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003483 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 hci_event_packet(hdev, skb);
3485 break;
3486
3487 case HCI_ACLDATA_PKT:
3488 BT_DBG("%s ACL data packet", hdev->name);
3489 hci_acldata_packet(hdev, skb);
3490 break;
3491
3492 case HCI_SCODATA_PKT:
3493 BT_DBG("%s SCO data packet", hdev->name);
3494 hci_scodata_packet(hdev, skb);
3495 break;
3496
3497 default:
3498 kfree_skb(skb);
3499 break;
3500 }
3501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502}
3503
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003504static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003506 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507 struct sk_buff *skb;
3508
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003509 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3510 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003511
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003513 if (atomic_read(&hdev->cmd_cnt)) {
3514 skb = skb_dequeue(&hdev->cmd_q);
3515 if (!skb)
3516 return;
3517
Wei Yongjun7585b972009-02-25 18:29:52 +08003518 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003520 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3521 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003522 atomic_dec(&hdev->cmd_cnt);
3523 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003524 if (test_bit(HCI_RESET, &hdev->flags))
3525 del_timer(&hdev->cmd_timer);
3526 else
3527 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003528 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003529 } else {
3530 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003531 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003532 }
3533 }
3534}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003535
Andre Guedes31f79562012-04-24 21:02:53 -03003536u8 bdaddr_to_le(u8 bdaddr_type)
3537{
3538 switch (bdaddr_type) {
3539 case BDADDR_LE_PUBLIC:
3540 return ADDR_LE_DEV_PUBLIC;
3541
3542 default:
3543 /* Fallback to LE Random address type */
3544 return ADDR_LE_DEV_RANDOM;
3545 }
3546}