blob: cc27297da5a9f9094e3079ef43e207dbd9848b68 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
310 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200311 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300312
313 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200314 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315}
316
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200318{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200319 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320
321 BT_DBG("%s %ld", hdev->name, opt);
322
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300323 /* Reset */
324 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300326
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200327 switch (hdev->dev_type) {
328 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200330 break;
331
332 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200334 break;
335
336 default:
337 BT_ERR("Unknown device type %d", hdev->dev_type);
338 break;
339 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340}
341
Johan Hedberg42c6b122013-03-05 20:37:49 +0200342static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200343{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
Johan Hedbergf332ec62013-03-15 17:07:11 -0500367 /* Read page scan parameters */
368 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
369 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
370 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
371 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372}
373
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300376 struct hci_dev *hdev = req->hdev;
377
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200380
381 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200382 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383
384 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300392
393 /* LE-only controllers have LE implicitly enabled */
394 if (!lmp_bredr_capable(hdev))
395 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
398static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
399{
400 if (lmp_ext_inq_capable(hdev))
401 return 0x02;
402
403 if (lmp_inq_rssi_capable(hdev))
404 return 0x01;
405
406 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
407 hdev->lmp_subver == 0x0757)
408 return 0x01;
409
410 if (hdev->manufacturer == 15) {
411 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
414 return 0x01;
415 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
416 return 0x01;
417 }
418
419 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
420 hdev->lmp_subver == 0x1805)
421 return 0x01;
422
423 return 0x00;
424}
425
Johan Hedberg42c6b122013-03-05 20:37:49 +0200426static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427{
428 u8 mode;
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433}
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 struct hci_dev *hdev = req->hdev;
438
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439 /* The second byte is 0xff instead of 0x9f (two reserved bits
440 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
441 * command otherwise.
442 */
443 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
444
445 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
446 * any event mask for pre 1.2 devices.
447 */
448 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
449 return;
450
451 if (lmp_bredr_capable(hdev)) {
452 events[4] |= 0x01; /* Flow Specification Complete */
453 events[4] |= 0x02; /* Inquiry Result with RSSI */
454 events[4] |= 0x04; /* Read Remote Extended Features Complete */
455 events[5] |= 0x08; /* Synchronous Connection Complete */
456 events[5] |= 0x10; /* Synchronous Connection Changed */
457 }
458
459 if (lmp_inq_rssi_capable(hdev))
460 events[4] |= 0x02; /* Inquiry Result with RSSI */
461
462 if (lmp_sniffsubr_capable(hdev))
463 events[5] |= 0x20; /* Sniff Subrating */
464
465 if (lmp_pause_enc_capable(hdev))
466 events[5] |= 0x80; /* Encryption Key Refresh Complete */
467
468 if (lmp_ext_inq_capable(hdev))
469 events[5] |= 0x40; /* Extended Inquiry Result */
470
471 if (lmp_no_flush_capable(hdev))
472 events[7] |= 0x01; /* Enhanced Flush Complete */
473
474 if (lmp_lsto_capable(hdev))
475 events[6] |= 0x80; /* Link Supervision Timeout Changed */
476
477 if (lmp_ssp_capable(hdev)) {
478 events[6] |= 0x01; /* IO Capability Request */
479 events[6] |= 0x02; /* IO Capability Response */
480 events[6] |= 0x04; /* User Confirmation Request */
481 events[6] |= 0x08; /* User Passkey Request */
482 events[6] |= 0x10; /* Remote OOB Data Request */
483 events[6] |= 0x20; /* Simple Pairing Complete */
484 events[7] |= 0x04; /* User Passkey Notification */
485 events[7] |= 0x08; /* Keypress Notification */
486 events[7] |= 0x10; /* Remote Host Supported
487 * Features Notification
488 */
489 }
490
491 if (lmp_le_capable(hdev))
492 events[7] |= 0x20; /* LE Meta-Event */
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495
496 if (lmp_le_capable(hdev)) {
497 memset(events, 0, sizeof(events));
498 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
500 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200501 }
502}
503
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200506 struct hci_dev *hdev = req->hdev;
507
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510
511 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300516 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
517 * local supported commands HCI command.
518 */
519 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200521
522 if (lmp_ssp_capable(hdev)) {
523 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
524 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
526 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200527 } else {
528 struct hci_cp_write_eir cp;
529
530 memset(hdev->eir, 0, sizeof(hdev->eir));
531 memset(&cp, 0, sizeof(cp));
532
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200534 }
535 }
536
537 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200539
540 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542
543 if (lmp_ext_feat_capable(hdev)) {
544 struct hci_cp_read_local_ext_features cp;
545
546 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200547 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
548 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549 }
550
551 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
552 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
554 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200555 }
556}
557
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200559{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561 struct hci_cp_write_def_link_policy cp;
562 u16 link_policy = 0;
563
564 if (lmp_rswitch_capable(hdev))
565 link_policy |= HCI_LP_RSWITCH;
566 if (lmp_hold_capable(hdev))
567 link_policy |= HCI_LP_HOLD;
568 if (lmp_sniff_capable(hdev))
569 link_policy |= HCI_LP_SNIFF;
570 if (lmp_park_capable(hdev))
571 link_policy |= HCI_LP_PARK;
572
573 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575}
576
Johan Hedberg42c6b122013-03-05 20:37:49 +0200577static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200578{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580 struct hci_cp_write_le_host_supported cp;
581
Johan Hedbergc73eee92013-04-19 18:35:21 +0300582 /* LE-only devices do not support explicit enablement */
583 if (!lmp_bredr_capable(hdev))
584 return;
585
Johan Hedberg2177bab2013-03-05 20:37:43 +0200586 memset(&cp, 0, sizeof(cp));
587
588 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
589 cp.le = 0x01;
590 cp.simul = lmp_le_br_capable(hdev);
591 }
592
593 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
595 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596}
597
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200599{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200600 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300601 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200602
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100603 /* Some Broadcom based Bluetooth controllers do not support the
604 * Delete Stored Link Key command. They are clearly indicating its
605 * absence in the bit mask of supported commands.
606 *
607 * Check the supported commands and only if the the command is marked
608 * as supported send it. If not supported assume that the controller
609 * does not have actual support for stored link keys which makes this
610 * command redundant anyway.
611 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300612 if (hdev->commands[6] & 0x80) {
613 struct hci_cp_delete_stored_link_key cp;
614
615 bacpy(&cp.bdaddr, BDADDR_ANY);
616 cp.delete_all = 0x01;
617 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
618 sizeof(cp), &cp);
619 }
620
Johan Hedberg2177bab2013-03-05 20:37:43 +0200621 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200622 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200623
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500624 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200625 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500626 hci_update_ad(req);
627 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300628
629 /* Read features beyond page 1 if available */
630 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
631 struct hci_cp_read_local_ext_features cp;
632
633 cp.page = p;
634 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
635 sizeof(cp), &cp);
636 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200637}
638
639static int __hci_init(struct hci_dev *hdev)
640{
641 int err;
642
643 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
644 if (err < 0)
645 return err;
646
647 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
648 * BR/EDR/LE type controllers. AMP controllers only need the
649 * first stage init.
650 */
651 if (hdev->dev_type != HCI_BREDR)
652 return 0;
653
654 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
655 if (err < 0)
656 return err;
657
658 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
659}
660
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662{
663 __u8 scan = opt;
664
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666
667 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669}
670
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672{
673 __u8 auth = opt;
674
Johan Hedberg42c6b122013-03-05 20:37:49 +0200675 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679}
680
Johan Hedberg42c6b122013-03-05 20:37:49 +0200681static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682{
683 __u8 encrypt = opt;
684
Johan Hedberg42c6b122013-03-05 20:37:49 +0200685 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200687 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200688 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689}
690
Johan Hedberg42c6b122013-03-05 20:37:49 +0200691static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200692{
693 __le16 policy = cpu_to_le16(opt);
694
Johan Hedberg42c6b122013-03-05 20:37:49 +0200695 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200696
697 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200698 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200699}
700
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900701/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 * Device is held on return. */
703struct hci_dev *hci_dev_get(int index)
704{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200705 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
707 BT_DBG("%d", index);
708
709 if (index < 0)
710 return NULL;
711
712 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200713 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714 if (d->id == index) {
715 hdev = hci_dev_hold(d);
716 break;
717 }
718 }
719 read_unlock(&hci_dev_list_lock);
720 return hdev;
721}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200724
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200725bool hci_discovery_active(struct hci_dev *hdev)
726{
727 struct discovery_state *discov = &hdev->discovery;
728
Andre Guedes6fbe1952012-02-03 17:47:58 -0300729 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300730 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300731 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200732 return true;
733
Andre Guedes6fbe1952012-02-03 17:47:58 -0300734 default:
735 return false;
736 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200737}
738
Johan Hedbergff9ef572012-01-04 14:23:45 +0200739void hci_discovery_set_state(struct hci_dev *hdev, int state)
740{
741 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
742
743 if (hdev->discovery.state == state)
744 return;
745
746 switch (state) {
747 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300748 if (hdev->discovery.state != DISCOVERY_STARTING)
749 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200750 break;
751 case DISCOVERY_STARTING:
752 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300753 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200754 mgmt_discovering(hdev, 1);
755 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200756 case DISCOVERY_RESOLVING:
757 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200758 case DISCOVERY_STOPPING:
759 break;
760 }
761
762 hdev->discovery.state = state;
763}
764
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300765void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766{
Johan Hedberg30883512012-01-04 14:16:21 +0200767 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200768 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Johan Hedberg561aafb2012-01-04 13:31:59 +0200770 list_for_each_entry_safe(p, n, &cache->all, all) {
771 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200772 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200774
775 INIT_LIST_HEAD(&cache->unknown);
776 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777}
778
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300779struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
780 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781{
Johan Hedberg30883512012-01-04 14:16:21 +0200782 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 struct inquiry_entry *e;
784
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300785 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
Johan Hedberg561aafb2012-01-04 13:31:59 +0200787 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200789 return e;
790 }
791
792 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793}
794
Johan Hedberg561aafb2012-01-04 13:31:59 +0200795struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300796 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200797{
Johan Hedberg30883512012-01-04 14:16:21 +0200798 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200799 struct inquiry_entry *e;
800
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300801 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200802
803 list_for_each_entry(e, &cache->unknown, list) {
804 if (!bacmp(&e->data.bdaddr, bdaddr))
805 return e;
806 }
807
808 return NULL;
809}
810
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200811struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300812 bdaddr_t *bdaddr,
813 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200814{
815 struct discovery_state *cache = &hdev->discovery;
816 struct inquiry_entry *e;
817
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300818 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200819
820 list_for_each_entry(e, &cache->resolve, list) {
821 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
822 return e;
823 if (!bacmp(&e->data.bdaddr, bdaddr))
824 return e;
825 }
826
827 return NULL;
828}
829
Johan Hedberga3d4e202012-01-09 00:53:02 +0200830void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300831 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200832{
833 struct discovery_state *cache = &hdev->discovery;
834 struct list_head *pos = &cache->resolve;
835 struct inquiry_entry *p;
836
837 list_del(&ie->list);
838
839 list_for_each_entry(p, &cache->resolve, list) {
840 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300841 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842 break;
843 pos = &p->list;
844 }
845
846 list_add(&ie->list, pos);
847}
848
Johan Hedberg31754052012-01-04 13:39:52 +0200849bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300850 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851{
Johan Hedberg30883512012-01-04 14:16:21 +0200852 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300855 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856
Szymon Janc2b2fec42012-11-20 11:38:54 +0100857 hci_remove_remote_oob_data(hdev, &data->bdaddr);
858
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200859 if (ssp)
860 *ssp = data->ssp_mode;
861
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200862 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200863 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200864 if (ie->data.ssp_mode && ssp)
865 *ssp = true;
866
Johan Hedberga3d4e202012-01-09 00:53:02 +0200867 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300868 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200869 ie->data.rssi = data->rssi;
870 hci_inquiry_cache_update_resolve(hdev, ie);
871 }
872
Johan Hedberg561aafb2012-01-04 13:31:59 +0200873 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200874 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200875
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876 /* Entry not in the cache. Add new one. */
877 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
878 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200879 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200880
881 list_add(&ie->all, &cache->all);
882
883 if (name_known) {
884 ie->name_state = NAME_KNOWN;
885 } else {
886 ie->name_state = NAME_NOT_KNOWN;
887 list_add(&ie->list, &cache->unknown);
888 }
889
890update:
891 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300892 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200893 ie->name_state = NAME_KNOWN;
894 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 }
896
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200897 memcpy(&ie->data, data, sizeof(*data));
898 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200900
901 if (ie->name_state == NAME_NOT_KNOWN)
902 return false;
903
904 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905}
906
907static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
908{
Johan Hedberg30883512012-01-04 14:16:21 +0200909 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 struct inquiry_info *info = (struct inquiry_info *) buf;
911 struct inquiry_entry *e;
912 int copied = 0;
913
Johan Hedberg561aafb2012-01-04 13:31:59 +0200914 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200916
917 if (copied >= num)
918 break;
919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 bacpy(&info->bdaddr, &data->bdaddr);
921 info->pscan_rep_mode = data->pscan_rep_mode;
922 info->pscan_period_mode = data->pscan_period_mode;
923 info->pscan_mode = data->pscan_mode;
924 memcpy(info->dev_class, data->dev_class, 3);
925 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200926
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200928 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 }
930
931 BT_DBG("cache %p, copied %d", cache, copied);
932 return copied;
933}
934
Johan Hedberg42c6b122013-03-05 20:37:49 +0200935static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936{
937 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200938 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 struct hci_cp_inquiry cp;
940
941 BT_DBG("%s", hdev->name);
942
943 if (test_bit(HCI_INQUIRY, &hdev->flags))
944 return;
945
946 /* Start Inquiry */
947 memcpy(&cp.lap, &ir->lap, 3);
948 cp.length = ir->length;
949 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200950 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951}
952
Andre Guedes3e13fa12013-03-27 20:04:56 -0300953static int wait_inquiry(void *word)
954{
955 schedule();
956 return signal_pending(current);
957}
958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959int hci_inquiry(void __user *arg)
960{
961 __u8 __user *ptr = arg;
962 struct hci_inquiry_req ir;
963 struct hci_dev *hdev;
964 int err = 0, do_inquiry = 0, max_rsp;
965 long timeo;
966 __u8 *buf;
967
968 if (copy_from_user(&ir, ptr, sizeof(ir)))
969 return -EFAULT;
970
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200971 hdev = hci_dev_get(ir.dev_id);
972 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 return -ENODEV;
974
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300975 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900976 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300977 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300978 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 do_inquiry = 1;
980 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300981 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Marcel Holtmann04837f62006-07-03 10:02:33 +0200983 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200984
985 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200986 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
987 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200988 if (err < 0)
989 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300990
991 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
992 * cleared). If it is interrupted by a signal, return -EINTR.
993 */
994 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
995 TASK_INTERRUPTIBLE))
996 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200997 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300999 /* for unlimited number of responses we will use buffer with
1000 * 255 entries
1001 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1003
1004 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1005 * copy it to the user space.
1006 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001007 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001008 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 err = -ENOMEM;
1010 goto done;
1011 }
1012
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001013 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001015 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 BT_DBG("num_rsp %d", ir.num_rsp);
1018
1019 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1020 ptr += sizeof(ir);
1021 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001022 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001024 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 err = -EFAULT;
1026
1027 kfree(buf);
1028
1029done:
1030 hci_dev_put(hdev);
1031 return err;
1032}
1033
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001034static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1035{
1036 u8 ad_len = 0, flags = 0;
1037 size_t name_len;
1038
1039 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1040 flags |= LE_AD_GENERAL;
1041
1042 if (!lmp_bredr_capable(hdev))
1043 flags |= LE_AD_NO_BREDR;
1044
1045 if (lmp_le_br_capable(hdev))
1046 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1047
1048 if (lmp_host_le_br_capable(hdev))
1049 flags |= LE_AD_SIM_LE_BREDR_HOST;
1050
1051 if (flags) {
1052 BT_DBG("adv flags 0x%02x", flags);
1053
1054 ptr[0] = 2;
1055 ptr[1] = EIR_FLAGS;
1056 ptr[2] = flags;
1057
1058 ad_len += 3;
1059 ptr += 3;
1060 }
1061
1062 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1063 ptr[0] = 2;
1064 ptr[1] = EIR_TX_POWER;
1065 ptr[2] = (u8) hdev->adv_tx_power;
1066
1067 ad_len += 3;
1068 ptr += 3;
1069 }
1070
1071 name_len = strlen(hdev->dev_name);
1072 if (name_len > 0) {
1073 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1074
1075 if (name_len > max_len) {
1076 name_len = max_len;
1077 ptr[1] = EIR_NAME_SHORT;
1078 } else
1079 ptr[1] = EIR_NAME_COMPLETE;
1080
1081 ptr[0] = name_len + 1;
1082
1083 memcpy(ptr + 2, hdev->dev_name, name_len);
1084
1085 ad_len += (name_len + 2);
1086 ptr += (name_len + 2);
1087 }
1088
1089 return ad_len;
1090}
1091
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001092void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001093{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001094 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001095 struct hci_cp_le_set_adv_data cp;
1096 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001097
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001098 if (!lmp_le_capable(hdev))
1099 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001100
1101 memset(&cp, 0, sizeof(cp));
1102
1103 len = create_ad(hdev, cp.data);
1104
1105 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001106 memcmp(cp.data, hdev->adv_data, len) == 0)
1107 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001108
1109 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1110 hdev->adv_data_len = len;
1111
1112 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001113
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001114 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001115}
1116
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117/* ---- HCI ioctl helpers ---- */
1118
1119int hci_dev_open(__u16 dev)
1120{
1121 struct hci_dev *hdev;
1122 int ret = 0;
1123
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001124 hdev = hci_dev_get(dev);
1125 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 return -ENODEV;
1127
1128 BT_DBG("%s %p", hdev->name, hdev);
1129
1130 hci_req_lock(hdev);
1131
Johan Hovold94324962012-03-15 14:48:41 +01001132 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1133 ret = -ENODEV;
1134 goto done;
1135 }
1136
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001137 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1138 ret = -ERFKILL;
1139 goto done;
1140 }
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 if (test_bit(HCI_UP, &hdev->flags)) {
1143 ret = -EALREADY;
1144 goto done;
1145 }
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 if (hdev->open(hdev)) {
1148 ret = -EIO;
1149 goto done;
1150 }
1151
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001152 atomic_set(&hdev->cmd_cnt, 1);
1153 set_bit(HCI_INIT, &hdev->flags);
1154
1155 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1156 ret = hdev->setup(hdev);
1157
1158 if (!ret) {
1159 /* Treat all non BR/EDR controllers as raw devices if
1160 * enable_hs is not set.
1161 */
1162 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1163 set_bit(HCI_RAW, &hdev->flags);
1164
1165 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1166 set_bit(HCI_RAW, &hdev->flags);
1167
1168 if (!test_bit(HCI_RAW, &hdev->flags))
1169 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 }
1171
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001172 clear_bit(HCI_INIT, &hdev->flags);
1173
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 if (!ret) {
1175 hci_dev_hold(hdev);
1176 set_bit(HCI_UP, &hdev->flags);
1177 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001178 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1179 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001180 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001181 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001182 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001183 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001184 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001186 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001187 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001188 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001189
1190 skb_queue_purge(&hdev->cmd_q);
1191 skb_queue_purge(&hdev->rx_q);
1192
1193 if (hdev->flush)
1194 hdev->flush(hdev);
1195
1196 if (hdev->sent_cmd) {
1197 kfree_skb(hdev->sent_cmd);
1198 hdev->sent_cmd = NULL;
1199 }
1200
1201 hdev->close(hdev);
1202 hdev->flags = 0;
1203 }
1204
1205done:
1206 hci_req_unlock(hdev);
1207 hci_dev_put(hdev);
1208 return ret;
1209}
1210
1211static int hci_dev_do_close(struct hci_dev *hdev)
1212{
1213 BT_DBG("%s %p", hdev->name, hdev);
1214
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001215 cancel_delayed_work(&hdev->power_off);
1216
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 hci_req_cancel(hdev, ENODEV);
1218 hci_req_lock(hdev);
1219
1220 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001221 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222 hci_req_unlock(hdev);
1223 return 0;
1224 }
1225
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001226 /* Flush RX and TX works */
1227 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001228 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001230 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001231 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001232 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001233 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001234 }
1235
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001236 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001237 cancel_delayed_work(&hdev->service_cache);
1238
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001239 cancel_delayed_work_sync(&hdev->le_scan_disable);
1240
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001241 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001242 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001244 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 hci_notify(hdev, HCI_DEV_DOWN);
1247
1248 if (hdev->flush)
1249 hdev->flush(hdev);
1250
1251 /* Reset device */
1252 skb_queue_purge(&hdev->cmd_q);
1253 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001254 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001255 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001257 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 clear_bit(HCI_INIT, &hdev->flags);
1259 }
1260
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001261 /* flush cmd work */
1262 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001263
1264 /* Drop queues */
1265 skb_queue_purge(&hdev->rx_q);
1266 skb_queue_purge(&hdev->cmd_q);
1267 skb_queue_purge(&hdev->raw_q);
1268
1269 /* Drop last sent command */
1270 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001271 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001272 kfree_skb(hdev->sent_cmd);
1273 hdev->sent_cmd = NULL;
1274 }
1275
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001276 kfree_skb(hdev->recv_evt);
1277 hdev->recv_evt = NULL;
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 /* After this point our queues are empty
1280 * and no tasks are scheduled. */
1281 hdev->close(hdev);
1282
Johan Hedberg35b973c2013-03-15 17:06:59 -05001283 /* Clear flags */
1284 hdev->flags = 0;
1285 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1286
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001287 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1288 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001289 hci_dev_lock(hdev);
1290 mgmt_powered(hdev, 0);
1291 hci_dev_unlock(hdev);
1292 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001293
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001294 /* Controller radio is available but is currently powered down */
1295 hdev->amp_status = 0;
1296
Johan Hedberge59fda82012-02-22 18:11:53 +02001297 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001298 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001299
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 hci_req_unlock(hdev);
1301
1302 hci_dev_put(hdev);
1303 return 0;
1304}
1305
1306int hci_dev_close(__u16 dev)
1307{
1308 struct hci_dev *hdev;
1309 int err;
1310
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001311 hdev = hci_dev_get(dev);
1312 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001314
1315 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1316 cancel_delayed_work(&hdev->power_off);
1317
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001319
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 hci_dev_put(hdev);
1321 return err;
1322}
1323
1324int hci_dev_reset(__u16 dev)
1325{
1326 struct hci_dev *hdev;
1327 int ret = 0;
1328
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001329 hdev = hci_dev_get(dev);
1330 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 return -ENODEV;
1332
1333 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334
1335 if (!test_bit(HCI_UP, &hdev->flags))
1336 goto done;
1337
1338 /* Drop queues */
1339 skb_queue_purge(&hdev->rx_q);
1340 skb_queue_purge(&hdev->cmd_q);
1341
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001342 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001343 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001345 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346
1347 if (hdev->flush)
1348 hdev->flush(hdev);
1349
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001350 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001351 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352
1353 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001354 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001355
1356done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 hci_req_unlock(hdev);
1358 hci_dev_put(hdev);
1359 return ret;
1360}
1361
1362int hci_dev_reset_stat(__u16 dev)
1363{
1364 struct hci_dev *hdev;
1365 int ret = 0;
1366
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001367 hdev = hci_dev_get(dev);
1368 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369 return -ENODEV;
1370
1371 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1372
1373 hci_dev_put(hdev);
1374
1375 return ret;
1376}
1377
1378int hci_dev_cmd(unsigned int cmd, void __user *arg)
1379{
1380 struct hci_dev *hdev;
1381 struct hci_dev_req dr;
1382 int err = 0;
1383
1384 if (copy_from_user(&dr, arg, sizeof(dr)))
1385 return -EFAULT;
1386
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001387 hdev = hci_dev_get(dr.dev_id);
1388 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 return -ENODEV;
1390
1391 switch (cmd) {
1392 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001393 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1394 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 break;
1396
1397 case HCISETENCRYPT:
1398 if (!lmp_encrypt_capable(hdev)) {
1399 err = -EOPNOTSUPP;
1400 break;
1401 }
1402
1403 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1404 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001405 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1406 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 if (err)
1408 break;
1409 }
1410
Johan Hedberg01178cd2013-03-05 20:37:41 +02001411 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1412 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413 break;
1414
1415 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001416 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1417 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001418 break;
1419
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001420 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001421 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1422 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001423 break;
1424
1425 case HCISETLINKMODE:
1426 hdev->link_mode = ((__u16) dr.dev_opt) &
1427 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1428 break;
1429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 case HCISETPTYPE:
1431 hdev->pkt_type = (__u16) dr.dev_opt;
1432 break;
1433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001435 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1436 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437 break;
1438
1439 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001440 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1441 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 break;
1443
1444 default:
1445 err = -EINVAL;
1446 break;
1447 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001448
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 hci_dev_put(hdev);
1450 return err;
1451}
1452
1453int hci_get_dev_list(void __user *arg)
1454{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001455 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 struct hci_dev_list_req *dl;
1457 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 int n = 0, size, err;
1459 __u16 dev_num;
1460
1461 if (get_user(dev_num, (__u16 __user *) arg))
1462 return -EFAULT;
1463
1464 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1465 return -EINVAL;
1466
1467 size = sizeof(*dl) + dev_num * sizeof(*dr);
1468
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001469 dl = kzalloc(size, GFP_KERNEL);
1470 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 return -ENOMEM;
1472
1473 dr = dl->dev_req;
1474
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001475 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001476 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001477 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001478 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001479
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001480 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1481 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001482
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483 (dr + n)->dev_id = hdev->id;
1484 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001485
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 if (++n >= dev_num)
1487 break;
1488 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001489 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001490
1491 dl->dev_num = n;
1492 size = sizeof(*dl) + n * sizeof(*dr);
1493
1494 err = copy_to_user(arg, dl, size);
1495 kfree(dl);
1496
1497 return err ? -EFAULT : 0;
1498}
1499
1500int hci_get_dev_info(void __user *arg)
1501{
1502 struct hci_dev *hdev;
1503 struct hci_dev_info di;
1504 int err = 0;
1505
1506 if (copy_from_user(&di, arg, sizeof(di)))
1507 return -EFAULT;
1508
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001509 hdev = hci_dev_get(di.dev_id);
1510 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001511 return -ENODEV;
1512
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001513 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001514 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001515
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001516 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1517 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001518
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519 strcpy(di.name, hdev->name);
1520 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001521 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001522 di.flags = hdev->flags;
1523 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001524 if (lmp_bredr_capable(hdev)) {
1525 di.acl_mtu = hdev->acl_mtu;
1526 di.acl_pkts = hdev->acl_pkts;
1527 di.sco_mtu = hdev->sco_mtu;
1528 di.sco_pkts = hdev->sco_pkts;
1529 } else {
1530 di.acl_mtu = hdev->le_mtu;
1531 di.acl_pkts = hdev->le_pkts;
1532 di.sco_mtu = 0;
1533 di.sco_pkts = 0;
1534 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 di.link_policy = hdev->link_policy;
1536 di.link_mode = hdev->link_mode;
1537
1538 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1539 memcpy(&di.features, &hdev->features, sizeof(di.features));
1540
1541 if (copy_to_user(arg, &di, sizeof(di)))
1542 err = -EFAULT;
1543
1544 hci_dev_put(hdev);
1545
1546 return err;
1547}
1548
1549/* ---- Interface to HCI drivers ---- */
1550
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001551static int hci_rfkill_set_block(void *data, bool blocked)
1552{
1553 struct hci_dev *hdev = data;
1554
1555 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1556
1557 if (!blocked)
1558 return 0;
1559
1560 hci_dev_do_close(hdev);
1561
1562 return 0;
1563}
1564
1565static const struct rfkill_ops hci_rfkill_ops = {
1566 .set_block = hci_rfkill_set_block,
1567};
1568
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001569static void hci_power_on(struct work_struct *work)
1570{
1571 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001572 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001573
1574 BT_DBG("%s", hdev->name);
1575
Johan Hedberg96570ff2013-05-29 09:51:29 +03001576 err = hci_dev_open(hdev->id);
1577 if (err < 0) {
1578 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001579 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001580 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001581
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001582 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001583 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1584 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001585
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001586 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001587 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001588}
1589
1590static void hci_power_off(struct work_struct *work)
1591{
Johan Hedberg32435532011-11-07 22:16:04 +02001592 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001593 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001594
1595 BT_DBG("%s", hdev->name);
1596
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001597 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001598}
1599
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001600static void hci_discov_off(struct work_struct *work)
1601{
1602 struct hci_dev *hdev;
1603 u8 scan = SCAN_PAGE;
1604
1605 hdev = container_of(work, struct hci_dev, discov_off.work);
1606
1607 BT_DBG("%s", hdev->name);
1608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001609 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001610
1611 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1612
1613 hdev->discov_timeout = 0;
1614
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001615 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001616}
1617
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001618int hci_uuids_clear(struct hci_dev *hdev)
1619{
Johan Hedberg48210022013-01-27 00:31:28 +02001620 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001621
Johan Hedberg48210022013-01-27 00:31:28 +02001622 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1623 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001624 kfree(uuid);
1625 }
1626
1627 return 0;
1628}
1629
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001630int hci_link_keys_clear(struct hci_dev *hdev)
1631{
1632 struct list_head *p, *n;
1633
1634 list_for_each_safe(p, n, &hdev->link_keys) {
1635 struct link_key *key;
1636
1637 key = list_entry(p, struct link_key, list);
1638
1639 list_del(p);
1640 kfree(key);
1641 }
1642
1643 return 0;
1644}
1645
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001646int hci_smp_ltks_clear(struct hci_dev *hdev)
1647{
1648 struct smp_ltk *k, *tmp;
1649
1650 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1651 list_del(&k->list);
1652 kfree(k);
1653 }
1654
1655 return 0;
1656}
1657
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001658struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1659{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001660 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001661
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001662 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001663 if (bacmp(bdaddr, &k->bdaddr) == 0)
1664 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001665
1666 return NULL;
1667}
1668
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301669static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001670 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001671{
1672 /* Legacy key */
1673 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301674 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001675
1676 /* Debug keys are insecure so don't store them persistently */
1677 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301678 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001679
1680 /* Changed combination key and there's no previous one */
1681 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301682 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001683
1684 /* Security mode 3 case */
1685 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301686 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001687
1688 /* Neither local nor remote side had no-bonding as requirement */
1689 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301690 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001691
1692 /* Local side had dedicated bonding as requirement */
1693 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301694 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001695
1696 /* Remote side had dedicated bonding as requirement */
1697 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301698 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001699
1700 /* If none of the above criteria match, then don't store the key
1701 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301702 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001703}
1704
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001705struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001706{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001707 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001708
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001709 list_for_each_entry(k, &hdev->long_term_keys, list) {
1710 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001711 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001712 continue;
1713
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001714 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001715 }
1716
1717 return NULL;
1718}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001719
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001720struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001721 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001722{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001723 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001724
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001725 list_for_each_entry(k, &hdev->long_term_keys, list)
1726 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001727 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001728 return k;
1729
1730 return NULL;
1731}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001732
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001733int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001734 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001735{
1736 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301737 u8 old_key_type;
1738 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001739
1740 old_key = hci_find_link_key(hdev, bdaddr);
1741 if (old_key) {
1742 old_key_type = old_key->type;
1743 key = old_key;
1744 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001745 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001746 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1747 if (!key)
1748 return -ENOMEM;
1749 list_add(&key->list, &hdev->link_keys);
1750 }
1751
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001752 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001753
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001754 /* Some buggy controller combinations generate a changed
1755 * combination key for legacy pairing even when there's no
1756 * previous key */
1757 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001758 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001759 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001760 if (conn)
1761 conn->key_type = type;
1762 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001763
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001764 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001765 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001766 key->pin_len = pin_len;
1767
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001768 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001769 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001770 else
1771 key->type = type;
1772
Johan Hedberg4df378a2011-04-28 11:29:03 -07001773 if (!new_key)
1774 return 0;
1775
1776 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1777
Johan Hedberg744cf192011-11-08 20:40:14 +02001778 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001779
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301780 if (conn)
1781 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001782
1783 return 0;
1784}
1785
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001786int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001787 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001788 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001789{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001790 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001792 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1793 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001795 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1796 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001797 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001798 else {
1799 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001800 if (!key)
1801 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001802 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001803 }
1804
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001805 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001806 key->bdaddr_type = addr_type;
1807 memcpy(key->val, tk, sizeof(key->val));
1808 key->authenticated = authenticated;
1809 key->ediv = ediv;
1810 key->enc_size = enc_size;
1811 key->type = type;
1812 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001813
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001814 if (!new_key)
1815 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001816
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001817 if (type & HCI_SMP_LTK)
1818 mgmt_new_ltk(hdev, key, 1);
1819
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001820 return 0;
1821}
1822
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001823int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1824{
1825 struct link_key *key;
1826
1827 key = hci_find_link_key(hdev, bdaddr);
1828 if (!key)
1829 return -ENOENT;
1830
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001831 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001832
1833 list_del(&key->list);
1834 kfree(key);
1835
1836 return 0;
1837}
1838
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001839int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1840{
1841 struct smp_ltk *k, *tmp;
1842
1843 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1844 if (bacmp(bdaddr, &k->bdaddr))
1845 continue;
1846
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001847 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001848
1849 list_del(&k->list);
1850 kfree(k);
1851 }
1852
1853 return 0;
1854}
1855
Ville Tervo6bd32322011-02-16 16:32:41 +02001856/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001857static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001858{
1859 struct hci_dev *hdev = (void *) arg;
1860
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001861 if (hdev->sent_cmd) {
1862 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1863 u16 opcode = __le16_to_cpu(sent->opcode);
1864
1865 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1866 } else {
1867 BT_ERR("%s command tx timeout", hdev->name);
1868 }
1869
Ville Tervo6bd32322011-02-16 16:32:41 +02001870 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001871 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001872}
1873
Szymon Janc2763eda2011-03-22 13:12:22 +01001874struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001875 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001876{
1877 struct oob_data *data;
1878
1879 list_for_each_entry(data, &hdev->remote_oob_data, list)
1880 if (bacmp(bdaddr, &data->bdaddr) == 0)
1881 return data;
1882
1883 return NULL;
1884}
1885
1886int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1887{
1888 struct oob_data *data;
1889
1890 data = hci_find_remote_oob_data(hdev, bdaddr);
1891 if (!data)
1892 return -ENOENT;
1893
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001894 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001895
1896 list_del(&data->list);
1897 kfree(data);
1898
1899 return 0;
1900}
1901
1902int hci_remote_oob_data_clear(struct hci_dev *hdev)
1903{
1904 struct oob_data *data, *n;
1905
1906 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1907 list_del(&data->list);
1908 kfree(data);
1909 }
1910
1911 return 0;
1912}
1913
1914int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001915 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001916{
1917 struct oob_data *data;
1918
1919 data = hci_find_remote_oob_data(hdev, bdaddr);
1920
1921 if (!data) {
1922 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1923 if (!data)
1924 return -ENOMEM;
1925
1926 bacpy(&data->bdaddr, bdaddr);
1927 list_add(&data->list, &hdev->remote_oob_data);
1928 }
1929
1930 memcpy(data->hash, hash, sizeof(data->hash));
1931 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1932
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001933 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001934
1935 return 0;
1936}
1937
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001938struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001939{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001940 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001941
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001942 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001943 if (bacmp(bdaddr, &b->bdaddr) == 0)
1944 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001945
1946 return NULL;
1947}
1948
1949int hci_blacklist_clear(struct hci_dev *hdev)
1950{
1951 struct list_head *p, *n;
1952
1953 list_for_each_safe(p, n, &hdev->blacklist) {
1954 struct bdaddr_list *b;
1955
1956 b = list_entry(p, struct bdaddr_list, list);
1957
1958 list_del(p);
1959 kfree(b);
1960 }
1961
1962 return 0;
1963}
1964
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001965int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001966{
1967 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001968
1969 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1970 return -EBADF;
1971
Antti Julku5e762442011-08-25 16:48:02 +03001972 if (hci_blacklist_lookup(hdev, bdaddr))
1973 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001974
1975 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001976 if (!entry)
1977 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001978
1979 bacpy(&entry->bdaddr, bdaddr);
1980
1981 list_add(&entry->list, &hdev->blacklist);
1982
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001983 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001984}
1985
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001986int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001987{
1988 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001989
Szymon Janc1ec918c2011-11-16 09:32:21 +01001990 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001991 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001992
1993 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001994 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001995 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001996
1997 list_del(&entry->list);
1998 kfree(entry);
1999
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002000 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002001}
2002
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002003static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002004{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002005 if (status) {
2006 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002007
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002008 hci_dev_lock(hdev);
2009 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2010 hci_dev_unlock(hdev);
2011 return;
2012 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002013}
2014
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002015static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002016{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002017 /* General inquiry access code (GIAC) */
2018 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2019 struct hci_request req;
2020 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002021 int err;
2022
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002023 if (status) {
2024 BT_ERR("Failed to disable LE scanning: status %d", status);
2025 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002026 }
2027
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002028 switch (hdev->discovery.type) {
2029 case DISCOV_TYPE_LE:
2030 hci_dev_lock(hdev);
2031 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2032 hci_dev_unlock(hdev);
2033 break;
2034
2035 case DISCOV_TYPE_INTERLEAVED:
2036 hci_req_init(&req, hdev);
2037
2038 memset(&cp, 0, sizeof(cp));
2039 memcpy(&cp.lap, lap, sizeof(cp.lap));
2040 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2041 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2042
2043 hci_dev_lock(hdev);
2044
2045 hci_inquiry_cache_flush(hdev);
2046
2047 err = hci_req_run(&req, inquiry_complete);
2048 if (err) {
2049 BT_ERR("Inquiry request failed: err %d", err);
2050 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2051 }
2052
2053 hci_dev_unlock(hdev);
2054 break;
2055 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002056}
2057
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002058static void le_scan_disable_work(struct work_struct *work)
2059{
2060 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002061 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002062 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002063 struct hci_request req;
2064 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002065
2066 BT_DBG("%s", hdev->name);
2067
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002068 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002069
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002070 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002071 cp.enable = LE_SCAN_DISABLE;
2072 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002073
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002074 err = hci_req_run(&req, le_scan_disable_work_complete);
2075 if (err)
2076 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002077}
2078
David Herrmann9be0dab2012-04-22 14:39:57 +02002079/* Alloc HCI device */
2080struct hci_dev *hci_alloc_dev(void)
2081{
2082 struct hci_dev *hdev;
2083
2084 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2085 if (!hdev)
2086 return NULL;
2087
David Herrmannb1b813d2012-04-22 14:39:58 +02002088 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2089 hdev->esco_type = (ESCO_HV1);
2090 hdev->link_mode = (HCI_LM_ACCEPT);
2091 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002092 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2093 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002094
David Herrmannb1b813d2012-04-22 14:39:58 +02002095 hdev->sniff_max_interval = 800;
2096 hdev->sniff_min_interval = 80;
2097
2098 mutex_init(&hdev->lock);
2099 mutex_init(&hdev->req_lock);
2100
2101 INIT_LIST_HEAD(&hdev->mgmt_pending);
2102 INIT_LIST_HEAD(&hdev->blacklist);
2103 INIT_LIST_HEAD(&hdev->uuids);
2104 INIT_LIST_HEAD(&hdev->link_keys);
2105 INIT_LIST_HEAD(&hdev->long_term_keys);
2106 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002107 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002108
2109 INIT_WORK(&hdev->rx_work, hci_rx_work);
2110 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2111 INIT_WORK(&hdev->tx_work, hci_tx_work);
2112 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002113
David Herrmannb1b813d2012-04-22 14:39:58 +02002114 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2115 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2116 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2117
David Herrmannb1b813d2012-04-22 14:39:58 +02002118 skb_queue_head_init(&hdev->rx_q);
2119 skb_queue_head_init(&hdev->cmd_q);
2120 skb_queue_head_init(&hdev->raw_q);
2121
2122 init_waitqueue_head(&hdev->req_wait_q);
2123
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002124 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002125
David Herrmannb1b813d2012-04-22 14:39:58 +02002126 hci_init_sysfs(hdev);
2127 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002128
2129 return hdev;
2130}
2131EXPORT_SYMBOL(hci_alloc_dev);
2132
2133/* Free HCI device */
2134void hci_free_dev(struct hci_dev *hdev)
2135{
David Herrmann9be0dab2012-04-22 14:39:57 +02002136 /* will free via device release */
2137 put_device(&hdev->dev);
2138}
2139EXPORT_SYMBOL(hci_free_dev);
2140
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141/* Register HCI device */
2142int hci_register_dev(struct hci_dev *hdev)
2143{
David Herrmannb1b813d2012-04-22 14:39:58 +02002144 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
David Herrmann010666a2012-01-07 15:47:07 +01002146 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 return -EINVAL;
2148
Mat Martineau08add512011-11-02 16:18:36 -07002149 /* Do not allow HCI_AMP devices to register at index 0,
2150 * so the index can be used as the AMP controller ID.
2151 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002152 switch (hdev->dev_type) {
2153 case HCI_BREDR:
2154 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2155 break;
2156 case HCI_AMP:
2157 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2158 break;
2159 default:
2160 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002161 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002162
Sasha Levin3df92b32012-05-27 22:36:56 +02002163 if (id < 0)
2164 return id;
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 sprintf(hdev->name, "hci%d", id);
2167 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002168
2169 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2170
Kees Cookd8537542013-07-03 15:04:57 -07002171 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2172 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002173 if (!hdev->workqueue) {
2174 error = -ENOMEM;
2175 goto err;
2176 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002177
Kees Cookd8537542013-07-03 15:04:57 -07002178 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2179 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002180 if (!hdev->req_workqueue) {
2181 destroy_workqueue(hdev->workqueue);
2182 error = -ENOMEM;
2183 goto err;
2184 }
2185
David Herrmann33ca9542011-10-08 14:58:49 +02002186 error = hci_add_sysfs(hdev);
2187 if (error < 0)
2188 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002190 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002191 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2192 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002193 if (hdev->rfkill) {
2194 if (rfkill_register(hdev->rfkill) < 0) {
2195 rfkill_destroy(hdev->rfkill);
2196 hdev->rfkill = NULL;
2197 }
2198 }
2199
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002200 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002201
2202 if (hdev->dev_type != HCI_AMP)
2203 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2204
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002205 write_lock(&hci_dev_list_lock);
2206 list_add(&hdev->list, &hci_dev_list);
2207 write_unlock(&hci_dev_list_lock);
2208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002210 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Johan Hedberg19202572013-01-14 22:33:51 +02002212 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002213
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002215
David Herrmann33ca9542011-10-08 14:58:49 +02002216err_wqueue:
2217 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002218 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002219err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002220 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002221
David Herrmann33ca9542011-10-08 14:58:49 +02002222 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223}
2224EXPORT_SYMBOL(hci_register_dev);
2225
2226/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002227void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228{
Sasha Levin3df92b32012-05-27 22:36:56 +02002229 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002230
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002231 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
Johan Hovold94324962012-03-15 14:48:41 +01002233 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2234
Sasha Levin3df92b32012-05-27 22:36:56 +02002235 id = hdev->id;
2236
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002237 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002239 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240
2241 hci_dev_do_close(hdev);
2242
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302243 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002244 kfree_skb(hdev->reassembly[i]);
2245
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002246 cancel_work_sync(&hdev->power_on);
2247
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002248 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002249 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002250 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002251 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002252 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002253 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002254
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002255 /* mgmt_index_removed should take care of emptying the
2256 * pending list */
2257 BUG_ON(!list_empty(&hdev->mgmt_pending));
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 hci_notify(hdev, HCI_DEV_UNREG);
2260
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002261 if (hdev->rfkill) {
2262 rfkill_unregister(hdev->rfkill);
2263 rfkill_destroy(hdev->rfkill);
2264 }
2265
David Herrmannce242972011-10-08 14:58:48 +02002266 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002267
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002268 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002269 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002270
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002271 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002272 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002273 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002274 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002275 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002276 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002277 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002278
David Herrmanndc946bd2012-01-07 15:47:24 +01002279 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002280
2281 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282}
2283EXPORT_SYMBOL(hci_unregister_dev);
2284
2285/* Suspend HCI device */
2286int hci_suspend_dev(struct hci_dev *hdev)
2287{
2288 hci_notify(hdev, HCI_DEV_SUSPEND);
2289 return 0;
2290}
2291EXPORT_SYMBOL(hci_suspend_dev);
2292
2293/* Resume HCI device */
2294int hci_resume_dev(struct hci_dev *hdev)
2295{
2296 hci_notify(hdev, HCI_DEV_RESUME);
2297 return 0;
2298}
2299EXPORT_SYMBOL(hci_resume_dev);
2300
Marcel Holtmann76bca882009-11-18 00:40:39 +01002301/* Receive frame from HCI drivers */
2302int hci_recv_frame(struct sk_buff *skb)
2303{
2304 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2305 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002306 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002307 kfree_skb(skb);
2308 return -ENXIO;
2309 }
2310
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002311 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002312 bt_cb(skb)->incoming = 1;
2313
2314 /* Time stamp */
2315 __net_timestamp(skb);
2316
Marcel Holtmann76bca882009-11-18 00:40:39 +01002317 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002318 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002319
Marcel Holtmann76bca882009-11-18 00:40:39 +01002320 return 0;
2321}
2322EXPORT_SYMBOL(hci_recv_frame);
2323
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302324static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002325 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302326{
2327 int len = 0;
2328 int hlen = 0;
2329 int remain = count;
2330 struct sk_buff *skb;
2331 struct bt_skb_cb *scb;
2332
2333 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002334 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302335 return -EILSEQ;
2336
2337 skb = hdev->reassembly[index];
2338
2339 if (!skb) {
2340 switch (type) {
2341 case HCI_ACLDATA_PKT:
2342 len = HCI_MAX_FRAME_SIZE;
2343 hlen = HCI_ACL_HDR_SIZE;
2344 break;
2345 case HCI_EVENT_PKT:
2346 len = HCI_MAX_EVENT_SIZE;
2347 hlen = HCI_EVENT_HDR_SIZE;
2348 break;
2349 case HCI_SCODATA_PKT:
2350 len = HCI_MAX_SCO_SIZE;
2351 hlen = HCI_SCO_HDR_SIZE;
2352 break;
2353 }
2354
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002355 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302356 if (!skb)
2357 return -ENOMEM;
2358
2359 scb = (void *) skb->cb;
2360 scb->expect = hlen;
2361 scb->pkt_type = type;
2362
2363 skb->dev = (void *) hdev;
2364 hdev->reassembly[index] = skb;
2365 }
2366
2367 while (count) {
2368 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002369 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302370
2371 memcpy(skb_put(skb, len), data, len);
2372
2373 count -= len;
2374 data += len;
2375 scb->expect -= len;
2376 remain = count;
2377
2378 switch (type) {
2379 case HCI_EVENT_PKT:
2380 if (skb->len == HCI_EVENT_HDR_SIZE) {
2381 struct hci_event_hdr *h = hci_event_hdr(skb);
2382 scb->expect = h->plen;
2383
2384 if (skb_tailroom(skb) < scb->expect) {
2385 kfree_skb(skb);
2386 hdev->reassembly[index] = NULL;
2387 return -ENOMEM;
2388 }
2389 }
2390 break;
2391
2392 case HCI_ACLDATA_PKT:
2393 if (skb->len == HCI_ACL_HDR_SIZE) {
2394 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2395 scb->expect = __le16_to_cpu(h->dlen);
2396
2397 if (skb_tailroom(skb) < scb->expect) {
2398 kfree_skb(skb);
2399 hdev->reassembly[index] = NULL;
2400 return -ENOMEM;
2401 }
2402 }
2403 break;
2404
2405 case HCI_SCODATA_PKT:
2406 if (skb->len == HCI_SCO_HDR_SIZE) {
2407 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2408 scb->expect = h->dlen;
2409
2410 if (skb_tailroom(skb) < scb->expect) {
2411 kfree_skb(skb);
2412 hdev->reassembly[index] = NULL;
2413 return -ENOMEM;
2414 }
2415 }
2416 break;
2417 }
2418
2419 if (scb->expect == 0) {
2420 /* Complete frame */
2421
2422 bt_cb(skb)->pkt_type = type;
2423 hci_recv_frame(skb);
2424
2425 hdev->reassembly[index] = NULL;
2426 return remain;
2427 }
2428 }
2429
2430 return remain;
2431}
2432
Marcel Holtmannef222012007-07-11 06:42:04 +02002433int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2434{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302435 int rem = 0;
2436
Marcel Holtmannef222012007-07-11 06:42:04 +02002437 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2438 return -EILSEQ;
2439
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002440 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002441 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302442 if (rem < 0)
2443 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002444
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302445 data += (count - rem);
2446 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002447 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002448
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302449 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002450}
2451EXPORT_SYMBOL(hci_recv_fragment);
2452
Suraj Sumangala99811512010-07-14 13:02:19 +05302453#define STREAM_REASSEMBLY 0
2454
2455int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2456{
2457 int type;
2458 int rem = 0;
2459
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002460 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302461 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2462
2463 if (!skb) {
2464 struct { char type; } *pkt;
2465
2466 /* Start of the frame */
2467 pkt = data;
2468 type = pkt->type;
2469
2470 data++;
2471 count--;
2472 } else
2473 type = bt_cb(skb)->pkt_type;
2474
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002475 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002476 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302477 if (rem < 0)
2478 return rem;
2479
2480 data += (count - rem);
2481 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002482 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302483
2484 return rem;
2485}
2486EXPORT_SYMBOL(hci_recv_stream_fragment);
2487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488/* ---- Interface to upper protocols ---- */
2489
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490int hci_register_cb(struct hci_cb *cb)
2491{
2492 BT_DBG("%p name %s", cb, cb->name);
2493
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002494 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002496 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497
2498 return 0;
2499}
2500EXPORT_SYMBOL(hci_register_cb);
2501
2502int hci_unregister_cb(struct hci_cb *cb)
2503{
2504 BT_DBG("%p name %s", cb, cb->name);
2505
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002506 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002508 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002509
2510 return 0;
2511}
2512EXPORT_SYMBOL(hci_unregister_cb);
2513
2514static int hci_send_frame(struct sk_buff *skb)
2515{
2516 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2517
2518 if (!hdev) {
2519 kfree_skb(skb);
2520 return -ENODEV;
2521 }
2522
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002523 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002525 /* Time stamp */
2526 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002528 /* Send copy to monitor */
2529 hci_send_to_monitor(hdev, skb);
2530
2531 if (atomic_read(&hdev->promisc)) {
2532 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002533 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002534 }
2535
2536 /* Get rid of skb owner, prior to sending to the driver. */
2537 skb_orphan(skb);
2538
2539 return hdev->send(skb);
2540}
2541
Johan Hedberg3119ae92013-03-05 20:37:44 +02002542void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2543{
2544 skb_queue_head_init(&req->cmd_q);
2545 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002546 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002547}
2548
2549int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2550{
2551 struct hci_dev *hdev = req->hdev;
2552 struct sk_buff *skb;
2553 unsigned long flags;
2554
2555 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2556
Andre Guedes5d73e032013-03-08 11:20:16 -03002557 /* If an error occured during request building, remove all HCI
2558 * commands queued on the HCI request queue.
2559 */
2560 if (req->err) {
2561 skb_queue_purge(&req->cmd_q);
2562 return req->err;
2563 }
2564
Johan Hedberg3119ae92013-03-05 20:37:44 +02002565 /* Do not allow empty requests */
2566 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002567 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002568
2569 skb = skb_peek_tail(&req->cmd_q);
2570 bt_cb(skb)->req.complete = complete;
2571
2572 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2573 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2574 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2575
2576 queue_work(hdev->workqueue, &hdev->cmd_work);
2577
2578 return 0;
2579}
2580
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002581static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002582 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583{
2584 int len = HCI_COMMAND_HDR_SIZE + plen;
2585 struct hci_command_hdr *hdr;
2586 struct sk_buff *skb;
2587
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002589 if (!skb)
2590 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591
2592 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002593 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594 hdr->plen = plen;
2595
2596 if (plen)
2597 memcpy(skb_put(skb, plen), param, plen);
2598
2599 BT_DBG("skb len %d", skb->len);
2600
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002601 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002603
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002604 return skb;
2605}
2606
2607/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002608int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2609 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002610{
2611 struct sk_buff *skb;
2612
2613 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2614
2615 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2616 if (!skb) {
2617 BT_ERR("%s no memory for command", hdev->name);
2618 return -ENOMEM;
2619 }
2620
Johan Hedberg11714b32013-03-05 20:37:47 +02002621 /* Stand-alone HCI commands must be flaged as
2622 * single-command requests.
2623 */
2624 bt_cb(skb)->req.start = true;
2625
Linus Torvalds1da177e2005-04-16 15:20:36 -07002626 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002627 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002628
2629 return 0;
2630}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631
Johan Hedberg71c76a12013-03-05 20:37:46 +02002632/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002633void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2634 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002635{
2636 struct hci_dev *hdev = req->hdev;
2637 struct sk_buff *skb;
2638
2639 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2640
Andre Guedes34739c12013-03-08 11:20:18 -03002641 /* If an error occured during request building, there is no point in
2642 * queueing the HCI command. We can simply return.
2643 */
2644 if (req->err)
2645 return;
2646
Johan Hedberg71c76a12013-03-05 20:37:46 +02002647 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2648 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002649 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2650 hdev->name, opcode);
2651 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002652 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002653 }
2654
2655 if (skb_queue_empty(&req->cmd_q))
2656 bt_cb(skb)->req.start = true;
2657
Johan Hedberg02350a72013-04-03 21:50:29 +03002658 bt_cb(skb)->req.event = event;
2659
Johan Hedberg71c76a12013-03-05 20:37:46 +02002660 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002661}
2662
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002663void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2664 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002665{
2666 hci_req_add_ev(req, opcode, plen, param, 0);
2667}
2668
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002670void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671{
2672 struct hci_command_hdr *hdr;
2673
2674 if (!hdev->sent_cmd)
2675 return NULL;
2676
2677 hdr = (void *) hdev->sent_cmd->data;
2678
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002679 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680 return NULL;
2681
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002682 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2685}
2686
2687/* Send ACL data */
2688static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2689{
2690 struct hci_acl_hdr *hdr;
2691 int len = skb->len;
2692
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002693 skb_push(skb, HCI_ACL_HDR_SIZE);
2694 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002695 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002696 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2697 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698}
2699
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002700static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002701 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002703 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 struct hci_dev *hdev = conn->hdev;
2705 struct sk_buff *list;
2706
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002707 skb->len = skb_headlen(skb);
2708 skb->data_len = 0;
2709
2710 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002711
2712 switch (hdev->dev_type) {
2713 case HCI_BREDR:
2714 hci_add_acl_hdr(skb, conn->handle, flags);
2715 break;
2716 case HCI_AMP:
2717 hci_add_acl_hdr(skb, chan->handle, flags);
2718 break;
2719 default:
2720 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2721 return;
2722 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002723
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002724 list = skb_shinfo(skb)->frag_list;
2725 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002726 /* Non fragmented */
2727 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2728
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002729 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 } else {
2731 /* Fragmented */
2732 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2733
2734 skb_shinfo(skb)->frag_list = NULL;
2735
2736 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002737 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002739 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002740
2741 flags &= ~ACL_START;
2742 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 do {
2744 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002745
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002747 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002748 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
2750 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2751
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002752 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 } while (list);
2754
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002755 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002757}
2758
2759void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2760{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002761 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002762
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002763 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002764
2765 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002766
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002767 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002769 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
2772/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002773void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774{
2775 struct hci_dev *hdev = conn->hdev;
2776 struct hci_sco_hdr hdr;
2777
2778 BT_DBG("%s len %d", hdev->name, skb->len);
2779
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002780 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 hdr.dlen = skb->len;
2782
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002783 skb_push(skb, HCI_SCO_HDR_SIZE);
2784 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002785 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
2787 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002788 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002791 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793
2794/* ---- HCI TX task (outgoing data) ---- */
2795
2796/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002797static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2798 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002801 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002802 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002804 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002806
2807 rcu_read_lock();
2808
2809 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002810 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002812
2813 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2814 continue;
2815
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 num++;
2817
2818 if (c->sent < min) {
2819 min = c->sent;
2820 conn = c;
2821 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002822
2823 if (hci_conn_num(hdev, type) == num)
2824 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 }
2826
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002827 rcu_read_unlock();
2828
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002830 int cnt, q;
2831
2832 switch (conn->type) {
2833 case ACL_LINK:
2834 cnt = hdev->acl_cnt;
2835 break;
2836 case SCO_LINK:
2837 case ESCO_LINK:
2838 cnt = hdev->sco_cnt;
2839 break;
2840 case LE_LINK:
2841 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2842 break;
2843 default:
2844 cnt = 0;
2845 BT_ERR("Unknown link type");
2846 }
2847
2848 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 *quote = q ? q : 1;
2850 } else
2851 *quote = 0;
2852
2853 BT_DBG("conn %p quote %d", conn, *quote);
2854 return conn;
2855}
2856
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002857static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858{
2859 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002860 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861
Ville Tervobae1f5d92011-02-10 22:38:53 -03002862 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002864 rcu_read_lock();
2865
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002867 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002868 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002869 BT_ERR("%s killing stalled connection %pMR",
2870 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002871 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 }
2873 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002874
2875 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876}
2877
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002878static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2879 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002880{
2881 struct hci_conn_hash *h = &hdev->conn_hash;
2882 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002883 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002884 struct hci_conn *conn;
2885 int cnt, q, conn_num = 0;
2886
2887 BT_DBG("%s", hdev->name);
2888
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002889 rcu_read_lock();
2890
2891 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002892 struct hci_chan *tmp;
2893
2894 if (conn->type != type)
2895 continue;
2896
2897 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2898 continue;
2899
2900 conn_num++;
2901
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002902 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002903 struct sk_buff *skb;
2904
2905 if (skb_queue_empty(&tmp->data_q))
2906 continue;
2907
2908 skb = skb_peek(&tmp->data_q);
2909 if (skb->priority < cur_prio)
2910 continue;
2911
2912 if (skb->priority > cur_prio) {
2913 num = 0;
2914 min = ~0;
2915 cur_prio = skb->priority;
2916 }
2917
2918 num++;
2919
2920 if (conn->sent < min) {
2921 min = conn->sent;
2922 chan = tmp;
2923 }
2924 }
2925
2926 if (hci_conn_num(hdev, type) == conn_num)
2927 break;
2928 }
2929
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002930 rcu_read_unlock();
2931
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002932 if (!chan)
2933 return NULL;
2934
2935 switch (chan->conn->type) {
2936 case ACL_LINK:
2937 cnt = hdev->acl_cnt;
2938 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002939 case AMP_LINK:
2940 cnt = hdev->block_cnt;
2941 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002942 case SCO_LINK:
2943 case ESCO_LINK:
2944 cnt = hdev->sco_cnt;
2945 break;
2946 case LE_LINK:
2947 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2948 break;
2949 default:
2950 cnt = 0;
2951 BT_ERR("Unknown link type");
2952 }
2953
2954 q = cnt / num;
2955 *quote = q ? q : 1;
2956 BT_DBG("chan %p quote %d", chan, *quote);
2957 return chan;
2958}
2959
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002960static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2961{
2962 struct hci_conn_hash *h = &hdev->conn_hash;
2963 struct hci_conn *conn;
2964 int num = 0;
2965
2966 BT_DBG("%s", hdev->name);
2967
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002968 rcu_read_lock();
2969
2970 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002971 struct hci_chan *chan;
2972
2973 if (conn->type != type)
2974 continue;
2975
2976 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2977 continue;
2978
2979 num++;
2980
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002981 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002982 struct sk_buff *skb;
2983
2984 if (chan->sent) {
2985 chan->sent = 0;
2986 continue;
2987 }
2988
2989 if (skb_queue_empty(&chan->data_q))
2990 continue;
2991
2992 skb = skb_peek(&chan->data_q);
2993 if (skb->priority >= HCI_PRIO_MAX - 1)
2994 continue;
2995
2996 skb->priority = HCI_PRIO_MAX - 1;
2997
2998 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002999 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003000 }
3001
3002 if (hci_conn_num(hdev, type) == num)
3003 break;
3004 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003005
3006 rcu_read_unlock();
3007
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003008}
3009
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003010static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3011{
3012 /* Calculate count of blocks used by this packet */
3013 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3014}
3015
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003016static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003017{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003018 if (!test_bit(HCI_RAW, &hdev->flags)) {
3019 /* ACL tx timeout must be longer than maximum
3020 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003021 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003022 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003023 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003025}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003027static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003028{
3029 unsigned int cnt = hdev->acl_cnt;
3030 struct hci_chan *chan;
3031 struct sk_buff *skb;
3032 int quote;
3033
3034 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003035
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003036 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003037 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003038 u32 priority = (skb_peek(&chan->data_q))->priority;
3039 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003040 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003041 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003042
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003043 /* Stop if priority has changed */
3044 if (skb->priority < priority)
3045 break;
3046
3047 skb = skb_dequeue(&chan->data_q);
3048
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003049 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003050 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003051
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052 hci_send_frame(skb);
3053 hdev->acl_last_tx = jiffies;
3054
3055 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003056 chan->sent++;
3057 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 }
3059 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003060
3061 if (cnt != hdev->acl_cnt)
3062 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003063}
3064
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003065static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003066{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003067 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003068 struct hci_chan *chan;
3069 struct sk_buff *skb;
3070 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003071 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003072
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003073 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003074
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003075 BT_DBG("%s", hdev->name);
3076
3077 if (hdev->dev_type == HCI_AMP)
3078 type = AMP_LINK;
3079 else
3080 type = ACL_LINK;
3081
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003082 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003083 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003084 u32 priority = (skb_peek(&chan->data_q))->priority;
3085 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3086 int blocks;
3087
3088 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003089 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003090
3091 /* Stop if priority has changed */
3092 if (skb->priority < priority)
3093 break;
3094
3095 skb = skb_dequeue(&chan->data_q);
3096
3097 blocks = __get_blocks(hdev, skb);
3098 if (blocks > hdev->block_cnt)
3099 return;
3100
3101 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003102 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003103
3104 hci_send_frame(skb);
3105 hdev->acl_last_tx = jiffies;
3106
3107 hdev->block_cnt -= blocks;
3108 quote -= blocks;
3109
3110 chan->sent += blocks;
3111 chan->conn->sent += blocks;
3112 }
3113 }
3114
3115 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003116 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003117}
3118
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003119static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003120{
3121 BT_DBG("%s", hdev->name);
3122
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003123 /* No ACL link over BR/EDR controller */
3124 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3125 return;
3126
3127 /* No AMP link over AMP controller */
3128 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003129 return;
3130
3131 switch (hdev->flow_ctl_mode) {
3132 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3133 hci_sched_acl_pkt(hdev);
3134 break;
3135
3136 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3137 hci_sched_acl_blk(hdev);
3138 break;
3139 }
3140}
3141
Linus Torvalds1da177e2005-04-16 15:20:36 -07003142/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003143static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003144{
3145 struct hci_conn *conn;
3146 struct sk_buff *skb;
3147 int quote;
3148
3149 BT_DBG("%s", hdev->name);
3150
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003151 if (!hci_conn_num(hdev, SCO_LINK))
3152 return;
3153
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3155 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3156 BT_DBG("skb %p len %d", skb, skb->len);
3157 hci_send_frame(skb);
3158
3159 conn->sent++;
3160 if (conn->sent == ~0)
3161 conn->sent = 0;
3162 }
3163 }
3164}
3165
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003166static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003167{
3168 struct hci_conn *conn;
3169 struct sk_buff *skb;
3170 int quote;
3171
3172 BT_DBG("%s", hdev->name);
3173
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003174 if (!hci_conn_num(hdev, ESCO_LINK))
3175 return;
3176
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3178 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003179 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3180 BT_DBG("skb %p len %d", skb, skb->len);
3181 hci_send_frame(skb);
3182
3183 conn->sent++;
3184 if (conn->sent == ~0)
3185 conn->sent = 0;
3186 }
3187 }
3188}
3189
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003190static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003191{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003192 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003193 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003194 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003195
3196 BT_DBG("%s", hdev->name);
3197
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003198 if (!hci_conn_num(hdev, LE_LINK))
3199 return;
3200
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003201 if (!test_bit(HCI_RAW, &hdev->flags)) {
3202 /* LE tx timeout must be longer than maximum
3203 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003204 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003205 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003206 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003207 }
3208
3209 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003210 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003211 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003212 u32 priority = (skb_peek(&chan->data_q))->priority;
3213 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003214 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003215 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003216
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003217 /* Stop if priority has changed */
3218 if (skb->priority < priority)
3219 break;
3220
3221 skb = skb_dequeue(&chan->data_q);
3222
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003223 hci_send_frame(skb);
3224 hdev->le_last_tx = jiffies;
3225
3226 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003227 chan->sent++;
3228 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003229 }
3230 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003231
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003232 if (hdev->le_pkts)
3233 hdev->le_cnt = cnt;
3234 else
3235 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003236
3237 if (cnt != tmp)
3238 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003239}
3240
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003241static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003242{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003243 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003244 struct sk_buff *skb;
3245
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003246 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003247 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248
3249 /* Schedule queues and send stuff to HCI driver */
3250
3251 hci_sched_acl(hdev);
3252
3253 hci_sched_sco(hdev);
3254
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003255 hci_sched_esco(hdev);
3256
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003257 hci_sched_le(hdev);
3258
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259 /* Send next queued raw (unknown type) packet */
3260 while ((skb = skb_dequeue(&hdev->raw_q)))
3261 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262}
3263
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003264/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003267static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268{
3269 struct hci_acl_hdr *hdr = (void *) skb->data;
3270 struct hci_conn *conn;
3271 __u16 handle, flags;
3272
3273 skb_pull(skb, HCI_ACL_HDR_SIZE);
3274
3275 handle = __le16_to_cpu(hdr->handle);
3276 flags = hci_flags(handle);
3277 handle = hci_handle(handle);
3278
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003279 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003280 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
3282 hdev->stat.acl_rx++;
3283
3284 hci_dev_lock(hdev);
3285 conn = hci_conn_hash_lookup_handle(hdev, handle);
3286 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003287
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003289 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003290
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003292 l2cap_recv_acldata(conn, skb, flags);
3293 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003295 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003296 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 }
3298
3299 kfree_skb(skb);
3300}
3301
3302/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003303static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304{
3305 struct hci_sco_hdr *hdr = (void *) skb->data;
3306 struct hci_conn *conn;
3307 __u16 handle;
3308
3309 skb_pull(skb, HCI_SCO_HDR_SIZE);
3310
3311 handle = __le16_to_cpu(hdr->handle);
3312
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003313 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314
3315 hdev->stat.sco_rx++;
3316
3317 hci_dev_lock(hdev);
3318 conn = hci_conn_hash_lookup_handle(hdev, handle);
3319 hci_dev_unlock(hdev);
3320
3321 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003322 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003323 sco_recv_scodata(conn, skb);
3324 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003326 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003327 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 }
3329
3330 kfree_skb(skb);
3331}
3332
Johan Hedberg9238f362013-03-05 20:37:48 +02003333static bool hci_req_is_complete(struct hci_dev *hdev)
3334{
3335 struct sk_buff *skb;
3336
3337 skb = skb_peek(&hdev->cmd_q);
3338 if (!skb)
3339 return true;
3340
3341 return bt_cb(skb)->req.start;
3342}
3343
Johan Hedberg42c6b122013-03-05 20:37:49 +02003344static void hci_resend_last(struct hci_dev *hdev)
3345{
3346 struct hci_command_hdr *sent;
3347 struct sk_buff *skb;
3348 u16 opcode;
3349
3350 if (!hdev->sent_cmd)
3351 return;
3352
3353 sent = (void *) hdev->sent_cmd->data;
3354 opcode = __le16_to_cpu(sent->opcode);
3355 if (opcode == HCI_OP_RESET)
3356 return;
3357
3358 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3359 if (!skb)
3360 return;
3361
3362 skb_queue_head(&hdev->cmd_q, skb);
3363 queue_work(hdev->workqueue, &hdev->cmd_work);
3364}
3365
Johan Hedberg9238f362013-03-05 20:37:48 +02003366void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3367{
3368 hci_req_complete_t req_complete = NULL;
3369 struct sk_buff *skb;
3370 unsigned long flags;
3371
3372 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3373
Johan Hedberg42c6b122013-03-05 20:37:49 +02003374 /* If the completed command doesn't match the last one that was
3375 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003376 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003377 if (!hci_sent_cmd_data(hdev, opcode)) {
3378 /* Some CSR based controllers generate a spontaneous
3379 * reset complete event during init and any pending
3380 * command will never be completed. In such a case we
3381 * need to resend whatever was the last sent
3382 * command.
3383 */
3384 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3385 hci_resend_last(hdev);
3386
Johan Hedberg9238f362013-03-05 20:37:48 +02003387 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003388 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003389
3390 /* If the command succeeded and there's still more commands in
3391 * this request the request is not yet complete.
3392 */
3393 if (!status && !hci_req_is_complete(hdev))
3394 return;
3395
3396 /* If this was the last command in a request the complete
3397 * callback would be found in hdev->sent_cmd instead of the
3398 * command queue (hdev->cmd_q).
3399 */
3400 if (hdev->sent_cmd) {
3401 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003402
3403 if (req_complete) {
3404 /* We must set the complete callback to NULL to
3405 * avoid calling the callback more than once if
3406 * this function gets called again.
3407 */
3408 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3409
Johan Hedberg9238f362013-03-05 20:37:48 +02003410 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003411 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003412 }
3413
3414 /* Remove all pending commands belonging to this request */
3415 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3416 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3417 if (bt_cb(skb)->req.start) {
3418 __skb_queue_head(&hdev->cmd_q, skb);
3419 break;
3420 }
3421
3422 req_complete = bt_cb(skb)->req.complete;
3423 kfree_skb(skb);
3424 }
3425 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3426
3427call_complete:
3428 if (req_complete)
3429 req_complete(hdev, status);
3430}
3431
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003432static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003434 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003435 struct sk_buff *skb;
3436
3437 BT_DBG("%s", hdev->name);
3438
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003440 /* Send copy to monitor */
3441 hci_send_to_monitor(hdev, skb);
3442
Linus Torvalds1da177e2005-04-16 15:20:36 -07003443 if (atomic_read(&hdev->promisc)) {
3444 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003445 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003446 }
3447
3448 if (test_bit(HCI_RAW, &hdev->flags)) {
3449 kfree_skb(skb);
3450 continue;
3451 }
3452
3453 if (test_bit(HCI_INIT, &hdev->flags)) {
3454 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003455 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 case HCI_ACLDATA_PKT:
3457 case HCI_SCODATA_PKT:
3458 kfree_skb(skb);
3459 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 }
3462
3463 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003464 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003466 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003467 hci_event_packet(hdev, skb);
3468 break;
3469
3470 case HCI_ACLDATA_PKT:
3471 BT_DBG("%s ACL data packet", hdev->name);
3472 hci_acldata_packet(hdev, skb);
3473 break;
3474
3475 case HCI_SCODATA_PKT:
3476 BT_DBG("%s SCO data packet", hdev->name);
3477 hci_scodata_packet(hdev, skb);
3478 break;
3479
3480 default:
3481 kfree_skb(skb);
3482 break;
3483 }
3484 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485}
3486
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003487static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003489 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 struct sk_buff *skb;
3491
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003492 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3493 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003494
Linus Torvalds1da177e2005-04-16 15:20:36 -07003495 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003496 if (atomic_read(&hdev->cmd_cnt)) {
3497 skb = skb_dequeue(&hdev->cmd_q);
3498 if (!skb)
3499 return;
3500
Wei Yongjun7585b972009-02-25 18:29:52 +08003501 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003502
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003503 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3504 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 atomic_dec(&hdev->cmd_cnt);
3506 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003507 if (test_bit(HCI_RESET, &hdev->flags))
3508 del_timer(&hdev->cmd_timer);
3509 else
3510 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003511 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003512 } else {
3513 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003514 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 }
3516 }
3517}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003518
Andre Guedes31f79562012-04-24 21:02:53 -03003519u8 bdaddr_to_le(u8 bdaddr_type)
3520{
3521 switch (bdaddr_type) {
3522 case BDADDR_LE_PUBLIC:
3523 return ADDR_LE_DEV_PUBLIC;
3524
3525 default:
3526 /* Fallback to LE Random address type */
3527 return ADDR_LE_DEV_RANDOM;
3528 }
3529}