blob: 99f83abbcaa616f93332ab98b3779283a6ec6c2d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Fengguang Wu77a63e02013-04-20 16:24:31 +030082static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
83 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030084{
85 struct hci_ev_cmd_complete *ev;
86 struct hci_event_hdr *hdr;
87 struct sk_buff *skb;
88
89 hci_dev_lock(hdev);
90
91 skb = hdev->recv_evt;
92 hdev->recv_evt = NULL;
93
94 hci_dev_unlock(hdev);
95
96 if (!skb)
97 return ERR_PTR(-ENODATA);
98
99 if (skb->len < sizeof(*hdr)) {
100 BT_ERR("Too short HCI event");
101 goto failed;
102 }
103
104 hdr = (void *) skb->data;
105 skb_pull(skb, HCI_EVENT_HDR_SIZE);
106
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300107 if (event) {
108 if (hdr->evt != event)
109 goto failed;
110 return skb;
111 }
112
Johan Hedberg75e84b72013-04-02 13:35:04 +0300113 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
114 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
115 goto failed;
116 }
117
118 if (skb->len < sizeof(*ev)) {
119 BT_ERR("Too short cmd_complete event");
120 goto failed;
121 }
122
123 ev = (void *) skb->data;
124 skb_pull(skb, sizeof(*ev));
125
126 if (opcode == __le16_to_cpu(ev->opcode))
127 return skb;
128
129 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
130 __le16_to_cpu(ev->opcode));
131
132failed:
133 kfree_skb(skb);
134 return ERR_PTR(-ENODATA);
135}
136
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300137struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300138 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300139{
140 DECLARE_WAITQUEUE(wait, current);
141 struct hci_request req;
142 int err = 0;
143
144 BT_DBG("%s", hdev->name);
145
146 hci_req_init(&req, hdev);
147
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300148 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300149
150 hdev->req_status = HCI_REQ_PEND;
151
152 err = hci_req_run(&req, hci_req_sync_complete);
153 if (err < 0)
154 return ERR_PTR(err);
155
156 add_wait_queue(&hdev->req_wait_q, &wait);
157 set_current_state(TASK_INTERRUPTIBLE);
158
159 schedule_timeout(timeout);
160
161 remove_wait_queue(&hdev->req_wait_q, &wait);
162
163 if (signal_pending(current))
164 return ERR_PTR(-EINTR);
165
166 switch (hdev->req_status) {
167 case HCI_REQ_DONE:
168 err = -bt_to_errno(hdev->req_result);
169 break;
170
171 case HCI_REQ_CANCELED:
172 err = -hdev->req_result;
173 break;
174
175 default:
176 err = -ETIMEDOUT;
177 break;
178 }
179
180 hdev->req_status = hdev->req_result = 0;
181
182 BT_DBG("%s end: err %d", hdev->name, err);
183
184 if (err < 0)
185 return ERR_PTR(err);
186
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300187 return hci_get_cmd_complete(hdev, opcode, event);
188}
189EXPORT_SYMBOL(__hci_cmd_sync_ev);
190
191struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300192 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300193{
194 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300195}
196EXPORT_SYMBOL(__hci_cmd_sync);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200199static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 void (*func)(struct hci_request *req,
201 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200202 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200204 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 DECLARE_WAITQUEUE(wait, current);
206 int err = 0;
207
208 BT_DBG("%s start", hdev->name);
209
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 hci_req_init(&req, hdev);
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 hdev->req_status = HCI_REQ_PEND;
213
Johan Hedberg42c6b122013-03-05 20:37:49 +0200214 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200215
Johan Hedberg42c6b122013-03-05 20:37:49 +0200216 err = hci_req_run(&req, hci_req_sync_complete);
217 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200218 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300219
220 /* ENODATA means the HCI request command queue is empty.
221 * This can happen when a request with conditionals doesn't
222 * trigger any commands to be sent. This is normal behavior
223 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200224 */
Andre Guedes920c8302013-03-08 11:20:15 -0300225 if (err == -ENODATA)
226 return 0;
227
228 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200229 }
230
Andre Guedesbc4445c2013-03-08 11:20:13 -0300231 add_wait_queue(&hdev->req_wait_q, &wait);
232 set_current_state(TASK_INTERRUPTIBLE);
233
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 schedule_timeout(timeout);
235
236 remove_wait_queue(&hdev->req_wait_q, &wait);
237
238 if (signal_pending(current))
239 return -EINTR;
240
241 switch (hdev->req_status) {
242 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700243 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 break;
245
246 case HCI_REQ_CANCELED:
247 err = -hdev->req_result;
248 break;
249
250 default:
251 err = -ETIMEDOUT;
252 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700253 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Johan Hedberga5040ef2011-01-10 13:28:59 +0200255 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256
257 BT_DBG("%s end: err %d", hdev->name, err);
258
259 return err;
260}
261
Johan Hedberg01178cd2013-03-05 20:37:41 +0200262static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200263 void (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200265 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266{
267 int ret;
268
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 /* Serialize all requests */
273 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 hci_req_unlock(hdev);
276
277 return ret;
278}
279
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200282 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
284 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200285 set_bit(HCI_RESET, &req->hdev->flags);
286 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200296 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200298
299 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200306
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200308 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300309
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700310 /* Read Local Supported Commands */
311 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
312
313 /* Read Local Supported Features */
314 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
315
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300316 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200317 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300318
319 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200321}
322
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200324{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200325 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200326
327 BT_DBG("%s %ld", hdev->name, opt);
328
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300329 /* Reset */
330 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300332
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200333 switch (hdev->dev_type) {
334 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200336 break;
337
338 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200339 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340 break;
341
342 default:
343 BT_ERR("Unknown device type %d", hdev->dev_type);
344 break;
345 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346}
347
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349{
Johan Hedberg2177bab2013-03-05 20:37:43 +0200350 __le16 param;
351 __u8 flt_type;
352
353 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200360 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200361
362 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200363 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200364
365 /* Clear Event Filters */
366 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200367 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200368
369 /* Connection accept timeout ~20 secs */
370 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
Johan Hedbergf332ec62013-03-15 17:07:11 -0500373 /* Read page scan parameters */
374 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
375 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
376 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
377 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378}
379
Johan Hedberg42c6b122013-03-05 20:37:49 +0200380static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200381{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300382 struct hci_dev *hdev = req->hdev;
383
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300398
399 /* LE-only controllers have LE implicitly enabled */
400 if (!lmp_bredr_capable(hdev))
401 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402}
403
404static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
405{
406 if (lmp_ext_inq_capable(hdev))
407 return 0x02;
408
409 if (lmp_inq_rssi_capable(hdev))
410 return 0x01;
411
412 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
413 hdev->lmp_subver == 0x0757)
414 return 0x01;
415
416 if (hdev->manufacturer == 15) {
417 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
418 return 0x01;
419 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
420 return 0x01;
421 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
422 return 0x01;
423 }
424
425 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
426 hdev->lmp_subver == 0x1805)
427 return 0x01;
428
429 return 0x00;
430}
431
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433{
434 u8 mode;
435
Johan Hedberg42c6b122013-03-05 20:37:49 +0200436 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439}
440
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200442{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200443 struct hci_dev *hdev = req->hdev;
444
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445 /* The second byte is 0xff instead of 0x9f (two reserved bits
446 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
447 * command otherwise.
448 */
449 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
450
451 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
452 * any event mask for pre 1.2 devices.
453 */
454 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
455 return;
456
457 if (lmp_bredr_capable(hdev)) {
458 events[4] |= 0x01; /* Flow Specification Complete */
459 events[4] |= 0x02; /* Inquiry Result with RSSI */
460 events[4] |= 0x04; /* Read Remote Extended Features Complete */
461 events[5] |= 0x08; /* Synchronous Connection Complete */
462 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700463 } else {
464 /* Use a different default for LE-only devices */
465 memset(events, 0, sizeof(events));
466 events[0] |= 0x10; /* Disconnection Complete */
467 events[0] |= 0x80; /* Encryption Change */
468 events[1] |= 0x08; /* Read Remote Version Information Complete */
469 events[1] |= 0x20; /* Command Complete */
470 events[1] |= 0x40; /* Command Status */
471 events[1] |= 0x80; /* Hardware Error */
472 events[2] |= 0x04; /* Number of Completed Packets */
473 events[3] |= 0x02; /* Data Buffer Overflow */
474 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475 }
476
477 if (lmp_inq_rssi_capable(hdev))
478 events[4] |= 0x02; /* Inquiry Result with RSSI */
479
480 if (lmp_sniffsubr_capable(hdev))
481 events[5] |= 0x20; /* Sniff Subrating */
482
483 if (lmp_pause_enc_capable(hdev))
484 events[5] |= 0x80; /* Encryption Key Refresh Complete */
485
486 if (lmp_ext_inq_capable(hdev))
487 events[5] |= 0x40; /* Extended Inquiry Result */
488
489 if (lmp_no_flush_capable(hdev))
490 events[7] |= 0x01; /* Enhanced Flush Complete */
491
492 if (lmp_lsto_capable(hdev))
493 events[6] |= 0x80; /* Link Supervision Timeout Changed */
494
495 if (lmp_ssp_capable(hdev)) {
496 events[6] |= 0x01; /* IO Capability Request */
497 events[6] |= 0x02; /* IO Capability Response */
498 events[6] |= 0x04; /* User Confirmation Request */
499 events[6] |= 0x08; /* User Passkey Request */
500 events[6] |= 0x10; /* Remote OOB Data Request */
501 events[6] |= 0x20; /* Simple Pairing Complete */
502 events[7] |= 0x04; /* User Passkey Notification */
503 events[7] |= 0x08; /* Keypress Notification */
504 events[7] |= 0x10; /* Remote Host Supported
505 * Features Notification
506 */
507 }
508
509 if (lmp_le_capable(hdev))
510 events[7] |= 0x20; /* LE Meta-Event */
511
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
514 if (lmp_le_capable(hdev)) {
515 memset(events, 0, sizeof(events));
516 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
518 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519 }
520}
521
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 struct hci_dev *hdev = req->hdev;
525
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300528 else
529 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200530
531 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300536 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
537 * local supported commands HCI command.
538 */
539 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
542 if (lmp_ssp_capable(hdev)) {
543 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
544 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
546 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200547 } else {
548 struct hci_cp_write_eir cp;
549
550 memset(hdev->eir, 0, sizeof(hdev->eir));
551 memset(&cp, 0, sizeof(cp));
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554 }
555 }
556
557 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200559
560 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200562
563 if (lmp_ext_feat_capable(hdev)) {
564 struct hci_cp_read_local_ext_features cp;
565
566 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200567 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
568 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200569 }
570
571 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
572 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
574 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 }
576}
577
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200579{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200580 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 struct hci_cp_write_def_link_policy cp;
582 u16 link_policy = 0;
583
584 if (lmp_rswitch_capable(hdev))
585 link_policy |= HCI_LP_RSWITCH;
586 if (lmp_hold_capable(hdev))
587 link_policy |= HCI_LP_HOLD;
588 if (lmp_sniff_capable(hdev))
589 link_policy |= HCI_LP_SNIFF;
590 if (lmp_park_capable(hdev))
591 link_policy |= HCI_LP_PARK;
592
593 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595}
596
Johan Hedberg42c6b122013-03-05 20:37:49 +0200597static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200598{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 struct hci_cp_write_le_host_supported cp;
601
Johan Hedbergc73eee92013-04-19 18:35:21 +0300602 /* LE-only devices do not support explicit enablement */
603 if (!lmp_bredr_capable(hdev))
604 return;
605
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606 memset(&cp, 0, sizeof(cp));
607
608 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
609 cp.le = 0x01;
610 cp.simul = lmp_le_br_capable(hdev);
611 }
612
613 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
615 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616}
617
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300618static void hci_set_event_mask_page_2(struct hci_request *req)
619{
620 struct hci_dev *hdev = req->hdev;
621 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
622
623 /* If Connectionless Slave Broadcast master role is supported
624 * enable all necessary events for it.
625 */
626 if (hdev->features[2][0] & 0x01) {
627 events[1] |= 0x40; /* Triggered Clock Capture */
628 events[1] |= 0x80; /* Synchronization Train Complete */
629 events[2] |= 0x10; /* Slave Page Response Timeout */
630 events[2] |= 0x20; /* CSB Channel Map Change */
631 }
632
633 /* If Connectionless Slave Broadcast slave role is supported
634 * enable all necessary events for it.
635 */
636 if (hdev->features[2][0] & 0x02) {
637 events[2] |= 0x01; /* Synchronization Train Received */
638 events[2] |= 0x02; /* CSB Receive */
639 events[2] |= 0x04; /* CSB Timeout */
640 events[2] |= 0x08; /* Truncated Page Complete */
641 }
642
643 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
644}
645
Johan Hedberg42c6b122013-03-05 20:37:49 +0200646static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200647{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300649 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200650
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100651 /* Some Broadcom based Bluetooth controllers do not support the
652 * Delete Stored Link Key command. They are clearly indicating its
653 * absence in the bit mask of supported commands.
654 *
655 * Check the supported commands and only if the the command is marked
656 * as supported send it. If not supported assume that the controller
657 * does not have actual support for stored link keys which makes this
658 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700659 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300660 if (hdev->commands[6] & 0x80) {
661 struct hci_cp_delete_stored_link_key cp;
662
663 bacpy(&cp.bdaddr, BDADDR_ANY);
664 cp.delete_all = 0x01;
665 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
666 sizeof(cp), &cp);
667 }
668
Johan Hedberg2177bab2013-03-05 20:37:43 +0200669 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200670 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200671
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500672 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200673 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500674 hci_update_ad(req);
675 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300676
677 /* Read features beyond page 1 if available */
678 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
679 struct hci_cp_read_local_ext_features cp;
680
681 cp.page = p;
682 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
683 sizeof(cp), &cp);
684 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200685}
686
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300687static void hci_init4_req(struct hci_request *req, unsigned long opt)
688{
689 struct hci_dev *hdev = req->hdev;
690
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300691 /* Set event mask page 2 if the HCI command for it is supported */
692 if (hdev->commands[22] & 0x04)
693 hci_set_event_mask_page_2(req);
694
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300695 /* Check for Synchronization Train support */
696 if (hdev->features[2][0] & 0x04)
697 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
698}
699
Johan Hedberg2177bab2013-03-05 20:37:43 +0200700static int __hci_init(struct hci_dev *hdev)
701{
702 int err;
703
704 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
705 if (err < 0)
706 return err;
707
708 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
709 * BR/EDR/LE type controllers. AMP controllers only need the
710 * first stage init.
711 */
712 if (hdev->dev_type != HCI_BREDR)
713 return 0;
714
715 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
716 if (err < 0)
717 return err;
718
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300719 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
720 if (err < 0)
721 return err;
722
723 return __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200724}
725
Johan Hedberg42c6b122013-03-05 20:37:49 +0200726static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727{
728 __u8 scan = opt;
729
Johan Hedberg42c6b122013-03-05 20:37:49 +0200730 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731
732 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200733 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734}
735
Johan Hedberg42c6b122013-03-05 20:37:49 +0200736static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737{
738 __u8 auth = opt;
739
Johan Hedberg42c6b122013-03-05 20:37:49 +0200740 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200743 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744}
745
Johan Hedberg42c6b122013-03-05 20:37:49 +0200746static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747{
748 __u8 encrypt = opt;
749
Johan Hedberg42c6b122013-03-05 20:37:49 +0200750 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200752 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200753 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754}
755
Johan Hedberg42c6b122013-03-05 20:37:49 +0200756static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200757{
758 __le16 policy = cpu_to_le16(opt);
759
Johan Hedberg42c6b122013-03-05 20:37:49 +0200760 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200761
762 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200763 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200764}
765
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900766/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 * Device is held on return. */
768struct hci_dev *hci_dev_get(int index)
769{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200770 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771
772 BT_DBG("%d", index);
773
774 if (index < 0)
775 return NULL;
776
777 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200778 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (d->id == index) {
780 hdev = hci_dev_hold(d);
781 break;
782 }
783 }
784 read_unlock(&hci_dev_list_lock);
785 return hdev;
786}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787
788/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200789
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200790bool hci_discovery_active(struct hci_dev *hdev)
791{
792 struct discovery_state *discov = &hdev->discovery;
793
Andre Guedes6fbe1952012-02-03 17:47:58 -0300794 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300795 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300796 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200797 return true;
798
Andre Guedes6fbe1952012-02-03 17:47:58 -0300799 default:
800 return false;
801 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200802}
803
Johan Hedbergff9ef572012-01-04 14:23:45 +0200804void hci_discovery_set_state(struct hci_dev *hdev, int state)
805{
806 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
807
808 if (hdev->discovery.state == state)
809 return;
810
811 switch (state) {
812 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300813 if (hdev->discovery.state != DISCOVERY_STARTING)
814 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200815 break;
816 case DISCOVERY_STARTING:
817 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300818 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200819 mgmt_discovering(hdev, 1);
820 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200821 case DISCOVERY_RESOLVING:
822 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200823 case DISCOVERY_STOPPING:
824 break;
825 }
826
827 hdev->discovery.state = state;
828}
829
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300830void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831{
Johan Hedberg30883512012-01-04 14:16:21 +0200832 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200833 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
Johan Hedberg561aafb2012-01-04 13:31:59 +0200835 list_for_each_entry_safe(p, n, &cache->all, all) {
836 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200837 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200839
840 INIT_LIST_HEAD(&cache->unknown);
841 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842}
843
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300844struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
845 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
Johan Hedberg30883512012-01-04 14:16:21 +0200847 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 struct inquiry_entry *e;
849
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300850 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
Johan Hedberg561aafb2012-01-04 13:31:59 +0200852 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200854 return e;
855 }
856
857 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858}
859
Johan Hedberg561aafb2012-01-04 13:31:59 +0200860struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300861 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200862{
Johan Hedberg30883512012-01-04 14:16:21 +0200863 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200864 struct inquiry_entry *e;
865
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300866 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200867
868 list_for_each_entry(e, &cache->unknown, list) {
869 if (!bacmp(&e->data.bdaddr, bdaddr))
870 return e;
871 }
872
873 return NULL;
874}
875
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200876struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300877 bdaddr_t *bdaddr,
878 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200879{
880 struct discovery_state *cache = &hdev->discovery;
881 struct inquiry_entry *e;
882
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300883 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200884
885 list_for_each_entry(e, &cache->resolve, list) {
886 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
887 return e;
888 if (!bacmp(&e->data.bdaddr, bdaddr))
889 return e;
890 }
891
892 return NULL;
893}
894
Johan Hedberga3d4e202012-01-09 00:53:02 +0200895void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300896 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200897{
898 struct discovery_state *cache = &hdev->discovery;
899 struct list_head *pos = &cache->resolve;
900 struct inquiry_entry *p;
901
902 list_del(&ie->list);
903
904 list_for_each_entry(p, &cache->resolve, list) {
905 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300906 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200907 break;
908 pos = &p->list;
909 }
910
911 list_add(&ie->list, pos);
912}
913
Johan Hedberg31754052012-01-04 13:39:52 +0200914bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300915 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
Johan Hedberg30883512012-01-04 14:16:21 +0200917 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200918 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300920 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921
Szymon Janc2b2fec42012-11-20 11:38:54 +0100922 hci_remove_remote_oob_data(hdev, &data->bdaddr);
923
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200924 if (ssp)
925 *ssp = data->ssp_mode;
926
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200927 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200928 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200929 if (ie->data.ssp_mode && ssp)
930 *ssp = true;
931
Johan Hedberga3d4e202012-01-09 00:53:02 +0200932 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300933 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200934 ie->data.rssi = data->rssi;
935 hci_inquiry_cache_update_resolve(hdev, ie);
936 }
937
Johan Hedberg561aafb2012-01-04 13:31:59 +0200938 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200939 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200940
Johan Hedberg561aafb2012-01-04 13:31:59 +0200941 /* Entry not in the cache. Add new one. */
942 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
943 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200944 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200945
946 list_add(&ie->all, &cache->all);
947
948 if (name_known) {
949 ie->name_state = NAME_KNOWN;
950 } else {
951 ie->name_state = NAME_NOT_KNOWN;
952 list_add(&ie->list, &cache->unknown);
953 }
954
955update:
956 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300957 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200958 ie->name_state = NAME_KNOWN;
959 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 }
961
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200962 memcpy(&ie->data, data, sizeof(*data));
963 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200965
966 if (ie->name_state == NAME_NOT_KNOWN)
967 return false;
968
969 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970}
971
972static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
973{
Johan Hedberg30883512012-01-04 14:16:21 +0200974 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 struct inquiry_info *info = (struct inquiry_info *) buf;
976 struct inquiry_entry *e;
977 int copied = 0;
978
Johan Hedberg561aafb2012-01-04 13:31:59 +0200979 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200981
982 if (copied >= num)
983 break;
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 bacpy(&info->bdaddr, &data->bdaddr);
986 info->pscan_rep_mode = data->pscan_rep_mode;
987 info->pscan_period_mode = data->pscan_period_mode;
988 info->pscan_mode = data->pscan_mode;
989 memcpy(info->dev_class, data->dev_class, 3);
990 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200991
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200993 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 }
995
996 BT_DBG("cache %p, copied %d", cache, copied);
997 return copied;
998}
999
Johan Hedberg42c6b122013-03-05 20:37:49 +02001000static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001{
1002 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001003 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 struct hci_cp_inquiry cp;
1005
1006 BT_DBG("%s", hdev->name);
1007
1008 if (test_bit(HCI_INQUIRY, &hdev->flags))
1009 return;
1010
1011 /* Start Inquiry */
1012 memcpy(&cp.lap, &ir->lap, 3);
1013 cp.length = ir->length;
1014 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001015 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016}
1017
Andre Guedes3e13fa12013-03-27 20:04:56 -03001018static int wait_inquiry(void *word)
1019{
1020 schedule();
1021 return signal_pending(current);
1022}
1023
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024int hci_inquiry(void __user *arg)
1025{
1026 __u8 __user *ptr = arg;
1027 struct hci_inquiry_req ir;
1028 struct hci_dev *hdev;
1029 int err = 0, do_inquiry = 0, max_rsp;
1030 long timeo;
1031 __u8 *buf;
1032
1033 if (copy_from_user(&ir, ptr, sizeof(ir)))
1034 return -EFAULT;
1035
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001036 hdev = hci_dev_get(ir.dev_id);
1037 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 return -ENODEV;
1039
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001040 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1041 err = -EBUSY;
1042 goto done;
1043 }
1044
Johan Hedberg56f87902013-10-02 13:43:13 +03001045 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1046 err = -EOPNOTSUPP;
1047 goto done;
1048 }
1049
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001050 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001051 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001052 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001053 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 do_inquiry = 1;
1055 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001056 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057
Marcel Holtmann04837f62006-07-03 10:02:33 +02001058 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001059
1060 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001061 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1062 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001063 if (err < 0)
1064 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001065
1066 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1067 * cleared). If it is interrupted by a signal, return -EINTR.
1068 */
1069 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1070 TASK_INTERRUPTIBLE))
1071 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001072 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001074 /* for unlimited number of responses we will use buffer with
1075 * 255 entries
1076 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1078
1079 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1080 * copy it to the user space.
1081 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001082 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001083 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 err = -ENOMEM;
1085 goto done;
1086 }
1087
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001088 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001090 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
1092 BT_DBG("num_rsp %d", ir.num_rsp);
1093
1094 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1095 ptr += sizeof(ir);
1096 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001097 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001099 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 err = -EFAULT;
1101
1102 kfree(buf);
1103
1104done:
1105 hci_dev_put(hdev);
1106 return err;
1107}
1108
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001109static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1110{
1111 u8 ad_len = 0, flags = 0;
1112 size_t name_len;
1113
Johan Hedbergf3d3444a2013-10-05 12:01:04 +02001114 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001115 flags |= LE_AD_GENERAL;
1116
Johan Hedberg11802b22013-10-02 16:02:24 +03001117 if (test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1118 if (lmp_le_br_capable(hdev))
1119 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1120 if (lmp_host_le_br_capable(hdev))
1121 flags |= LE_AD_SIM_LE_BREDR_HOST;
1122 } else {
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001123 flags |= LE_AD_NO_BREDR;
Johan Hedberg11802b22013-10-02 16:02:24 +03001124 }
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001125
1126 if (flags) {
1127 BT_DBG("adv flags 0x%02x", flags);
1128
1129 ptr[0] = 2;
1130 ptr[1] = EIR_FLAGS;
1131 ptr[2] = flags;
1132
1133 ad_len += 3;
1134 ptr += 3;
1135 }
1136
1137 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1138 ptr[0] = 2;
1139 ptr[1] = EIR_TX_POWER;
1140 ptr[2] = (u8) hdev->adv_tx_power;
1141
1142 ad_len += 3;
1143 ptr += 3;
1144 }
1145
1146 name_len = strlen(hdev->dev_name);
1147 if (name_len > 0) {
1148 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1149
1150 if (name_len > max_len) {
1151 name_len = max_len;
1152 ptr[1] = EIR_NAME_SHORT;
1153 } else
1154 ptr[1] = EIR_NAME_COMPLETE;
1155
1156 ptr[0] = name_len + 1;
1157
1158 memcpy(ptr + 2, hdev->dev_name, name_len);
1159
1160 ad_len += (name_len + 2);
1161 ptr += (name_len + 2);
1162 }
1163
1164 return ad_len;
1165}
1166
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001167void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001168{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001169 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001170 struct hci_cp_le_set_adv_data cp;
1171 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001172
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001173 if (!lmp_le_capable(hdev))
1174 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001175
1176 memset(&cp, 0, sizeof(cp));
1177
1178 len = create_ad(hdev, cp.data);
1179
1180 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001181 memcmp(cp.data, hdev->adv_data, len) == 0)
1182 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001183
1184 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1185 hdev->adv_data_len = len;
1186
1187 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001188
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001189 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001190}
1191
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001192static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 int ret = 0;
1195
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196 BT_DBG("%s %p", hdev->name, hdev);
1197
1198 hci_req_lock(hdev);
1199
Johan Hovold94324962012-03-15 14:48:41 +01001200 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1201 ret = -ENODEV;
1202 goto done;
1203 }
1204
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001205 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1206 /* Check for rfkill but allow the HCI setup stage to
1207 * proceed (which in itself doesn't cause any RF activity).
1208 */
1209 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1210 ret = -ERFKILL;
1211 goto done;
1212 }
1213
1214 /* Check for valid public address or a configured static
1215 * random adddress, but let the HCI setup proceed to
1216 * be able to determine if there is a public address
1217 * or not.
1218 *
1219 * This check is only valid for BR/EDR controllers
1220 * since AMP controllers do not have an address.
1221 */
1222 if (hdev->dev_type == HCI_BREDR &&
1223 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1224 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1225 ret = -EADDRNOTAVAIL;
1226 goto done;
1227 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001228 }
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 if (test_bit(HCI_UP, &hdev->flags)) {
1231 ret = -EALREADY;
1232 goto done;
1233 }
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 if (hdev->open(hdev)) {
1236 ret = -EIO;
1237 goto done;
1238 }
1239
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001240 atomic_set(&hdev->cmd_cnt, 1);
1241 set_bit(HCI_INIT, &hdev->flags);
1242
1243 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1244 ret = hdev->setup(hdev);
1245
1246 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001247 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1248 set_bit(HCI_RAW, &hdev->flags);
1249
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001250 if (!test_bit(HCI_RAW, &hdev->flags) &&
1251 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001252 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253 }
1254
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001255 clear_bit(HCI_INIT, &hdev->flags);
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 if (!ret) {
1258 hci_dev_hold(hdev);
1259 set_bit(HCI_UP, &hdev->flags);
1260 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001261 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001262 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001263 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001264 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001265 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001266 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001267 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001268 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001270 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001271 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001272 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001273
1274 skb_queue_purge(&hdev->cmd_q);
1275 skb_queue_purge(&hdev->rx_q);
1276
1277 if (hdev->flush)
1278 hdev->flush(hdev);
1279
1280 if (hdev->sent_cmd) {
1281 kfree_skb(hdev->sent_cmd);
1282 hdev->sent_cmd = NULL;
1283 }
1284
1285 hdev->close(hdev);
1286 hdev->flags = 0;
1287 }
1288
1289done:
1290 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 return ret;
1292}
1293
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001294/* ---- HCI ioctl helpers ---- */
1295
1296int hci_dev_open(__u16 dev)
1297{
1298 struct hci_dev *hdev;
1299 int err;
1300
1301 hdev = hci_dev_get(dev);
1302 if (!hdev)
1303 return -ENODEV;
1304
Johan Hedberge1d08f42013-10-01 22:44:50 +03001305 /* We need to ensure that no other power on/off work is pending
1306 * before proceeding to call hci_dev_do_open. This is
1307 * particularly important if the setup procedure has not yet
1308 * completed.
1309 */
1310 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1311 cancel_delayed_work(&hdev->power_off);
1312
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001313 /* After this call it is guaranteed that the setup procedure
1314 * has finished. This means that error conditions like RFKILL
1315 * or no valid public or static random address apply.
1316 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001317 flush_workqueue(hdev->req_workqueue);
1318
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001319 err = hci_dev_do_open(hdev);
1320
1321 hci_dev_put(hdev);
1322
1323 return err;
1324}
1325
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326static int hci_dev_do_close(struct hci_dev *hdev)
1327{
1328 BT_DBG("%s %p", hdev->name, hdev);
1329
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001330 cancel_delayed_work(&hdev->power_off);
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_req_cancel(hdev, ENODEV);
1333 hci_req_lock(hdev);
1334
1335 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001336 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 hci_req_unlock(hdev);
1338 return 0;
1339 }
1340
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001341 /* Flush RX and TX works */
1342 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001343 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001345 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001346 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001347 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001348 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001349 }
1350
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001351 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001352 cancel_delayed_work(&hdev->service_cache);
1353
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001354 cancel_delayed_work_sync(&hdev->le_scan_disable);
1355
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001356 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001357 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001358 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001359 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360
1361 hci_notify(hdev, HCI_DEV_DOWN);
1362
1363 if (hdev->flush)
1364 hdev->flush(hdev);
1365
1366 /* Reset device */
1367 skb_queue_purge(&hdev->cmd_q);
1368 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001369 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001370 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001372 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 clear_bit(HCI_INIT, &hdev->flags);
1374 }
1375
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001376 /* flush cmd work */
1377 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378
1379 /* Drop queues */
1380 skb_queue_purge(&hdev->rx_q);
1381 skb_queue_purge(&hdev->cmd_q);
1382 skb_queue_purge(&hdev->raw_q);
1383
1384 /* Drop last sent command */
1385 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001386 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001387 kfree_skb(hdev->sent_cmd);
1388 hdev->sent_cmd = NULL;
1389 }
1390
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001391 kfree_skb(hdev->recv_evt);
1392 hdev->recv_evt = NULL;
1393
Linus Torvalds1da177e2005-04-16 15:20:36 -07001394 /* After this point our queues are empty
1395 * and no tasks are scheduled. */
1396 hdev->close(hdev);
1397
Johan Hedberg35b973c2013-03-15 17:06:59 -05001398 /* Clear flags */
1399 hdev->flags = 0;
1400 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1401
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001402 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1403 if (hdev->dev_type == HCI_BREDR) {
1404 hci_dev_lock(hdev);
1405 mgmt_powered(hdev, 0);
1406 hci_dev_unlock(hdev);
1407 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001408 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001409
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001410 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001411 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001412
Johan Hedberge59fda82012-02-22 18:11:53 +02001413 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001414 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001415
Linus Torvalds1da177e2005-04-16 15:20:36 -07001416 hci_req_unlock(hdev);
1417
1418 hci_dev_put(hdev);
1419 return 0;
1420}
1421
1422int hci_dev_close(__u16 dev)
1423{
1424 struct hci_dev *hdev;
1425 int err;
1426
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001427 hdev = hci_dev_get(dev);
1428 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001430
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001431 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1432 err = -EBUSY;
1433 goto done;
1434 }
1435
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001436 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1437 cancel_delayed_work(&hdev->power_off);
1438
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001440
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001441done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001442 hci_dev_put(hdev);
1443 return err;
1444}
1445
1446int hci_dev_reset(__u16 dev)
1447{
1448 struct hci_dev *hdev;
1449 int ret = 0;
1450
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001451 hdev = hci_dev_get(dev);
1452 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 return -ENODEV;
1454
1455 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456
Marcel Holtmann808a0492013-08-26 20:57:58 -07001457 if (!test_bit(HCI_UP, &hdev->flags)) {
1458 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001459 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001460 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001462 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1463 ret = -EBUSY;
1464 goto done;
1465 }
1466
Linus Torvalds1da177e2005-04-16 15:20:36 -07001467 /* Drop queues */
1468 skb_queue_purge(&hdev->rx_q);
1469 skb_queue_purge(&hdev->cmd_q);
1470
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001471 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001472 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001474 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 if (hdev->flush)
1477 hdev->flush(hdev);
1478
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001479 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001480 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001481
1482 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001483 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484
1485done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 hci_req_unlock(hdev);
1487 hci_dev_put(hdev);
1488 return ret;
1489}
1490
1491int hci_dev_reset_stat(__u16 dev)
1492{
1493 struct hci_dev *hdev;
1494 int ret = 0;
1495
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001496 hdev = hci_dev_get(dev);
1497 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001498 return -ENODEV;
1499
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001500 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1501 ret = -EBUSY;
1502 goto done;
1503 }
1504
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1506
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001507done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001508 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509 return ret;
1510}
1511
1512int hci_dev_cmd(unsigned int cmd, void __user *arg)
1513{
1514 struct hci_dev *hdev;
1515 struct hci_dev_req dr;
1516 int err = 0;
1517
1518 if (copy_from_user(&dr, arg, sizeof(dr)))
1519 return -EFAULT;
1520
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001521 hdev = hci_dev_get(dr.dev_id);
1522 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 return -ENODEV;
1524
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001525 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1526 err = -EBUSY;
1527 goto done;
1528 }
1529
Johan Hedberg56f87902013-10-02 13:43:13 +03001530 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1531 err = -EOPNOTSUPP;
1532 goto done;
1533 }
1534
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 switch (cmd) {
1536 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001537 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1538 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001539 break;
1540
1541 case HCISETENCRYPT:
1542 if (!lmp_encrypt_capable(hdev)) {
1543 err = -EOPNOTSUPP;
1544 break;
1545 }
1546
1547 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1548 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001549 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1550 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 if (err)
1552 break;
1553 }
1554
Johan Hedberg01178cd2013-03-05 20:37:41 +02001555 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1556 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557 break;
1558
1559 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001560 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1561 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 break;
1563
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001564 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001565 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1566 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001567 break;
1568
1569 case HCISETLINKMODE:
1570 hdev->link_mode = ((__u16) dr.dev_opt) &
1571 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1572 break;
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 case HCISETPTYPE:
1575 hdev->pkt_type = (__u16) dr.dev_opt;
1576 break;
1577
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001579 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1580 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581 break;
1582
1583 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001584 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1585 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 break;
1587
1588 default:
1589 err = -EINVAL;
1590 break;
1591 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001592
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001593done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594 hci_dev_put(hdev);
1595 return err;
1596}
1597
1598int hci_get_dev_list(void __user *arg)
1599{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001600 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001601 struct hci_dev_list_req *dl;
1602 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001603 int n = 0, size, err;
1604 __u16 dev_num;
1605
1606 if (get_user(dev_num, (__u16 __user *) arg))
1607 return -EFAULT;
1608
1609 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1610 return -EINVAL;
1611
1612 size = sizeof(*dl) + dev_num * sizeof(*dr);
1613
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001614 dl = kzalloc(size, GFP_KERNEL);
1615 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001616 return -ENOMEM;
1617
1618 dr = dl->dev_req;
1619
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001620 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001621 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001622 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001623 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001624
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001625 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1626 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 (dr + n)->dev_id = hdev->id;
1629 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001630
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631 if (++n >= dev_num)
1632 break;
1633 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001634 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001635
1636 dl->dev_num = n;
1637 size = sizeof(*dl) + n * sizeof(*dr);
1638
1639 err = copy_to_user(arg, dl, size);
1640 kfree(dl);
1641
1642 return err ? -EFAULT : 0;
1643}
1644
1645int hci_get_dev_info(void __user *arg)
1646{
1647 struct hci_dev *hdev;
1648 struct hci_dev_info di;
1649 int err = 0;
1650
1651 if (copy_from_user(&di, arg, sizeof(di)))
1652 return -EFAULT;
1653
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001654 hdev = hci_dev_get(di.dev_id);
1655 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656 return -ENODEV;
1657
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001658 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001659 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001660
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001661 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1662 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001663
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664 strcpy(di.name, hdev->name);
1665 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001666 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001667 di.flags = hdev->flags;
1668 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001669 if (lmp_bredr_capable(hdev)) {
1670 di.acl_mtu = hdev->acl_mtu;
1671 di.acl_pkts = hdev->acl_pkts;
1672 di.sco_mtu = hdev->sco_mtu;
1673 di.sco_pkts = hdev->sco_pkts;
1674 } else {
1675 di.acl_mtu = hdev->le_mtu;
1676 di.acl_pkts = hdev->le_pkts;
1677 di.sco_mtu = 0;
1678 di.sco_pkts = 0;
1679 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 di.link_policy = hdev->link_policy;
1681 di.link_mode = hdev->link_mode;
1682
1683 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1684 memcpy(&di.features, &hdev->features, sizeof(di.features));
1685
1686 if (copy_to_user(arg, &di, sizeof(di)))
1687 err = -EFAULT;
1688
1689 hci_dev_put(hdev);
1690
1691 return err;
1692}
1693
1694/* ---- Interface to HCI drivers ---- */
1695
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001696static int hci_rfkill_set_block(void *data, bool blocked)
1697{
1698 struct hci_dev *hdev = data;
1699
1700 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1701
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001702 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1703 return -EBUSY;
1704
Johan Hedberg5e130362013-09-13 08:58:17 +03001705 if (blocked) {
1706 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001707 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1708 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001709 } else {
1710 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001711 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001712
1713 return 0;
1714}
1715
1716static const struct rfkill_ops hci_rfkill_ops = {
1717 .set_block = hci_rfkill_set_block,
1718};
1719
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001720static void hci_power_on(struct work_struct *work)
1721{
1722 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001723 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001724
1725 BT_DBG("%s", hdev->name);
1726
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001727 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001728 if (err < 0) {
1729 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001730 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001731 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001732
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001733 /* During the HCI setup phase, a few error conditions are
1734 * ignored and they need to be checked now. If they are still
1735 * valid, it is important to turn the device back off.
1736 */
1737 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1738 (hdev->dev_type == HCI_BREDR &&
1739 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1740 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001741 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1742 hci_dev_do_close(hdev);
1743 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001744 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1745 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001746 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001747
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001748 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001749 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001750}
1751
1752static void hci_power_off(struct work_struct *work)
1753{
Johan Hedberg32435532011-11-07 22:16:04 +02001754 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001755 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756
1757 BT_DBG("%s", hdev->name);
1758
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001759 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001760}
1761
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001762static void hci_discov_off(struct work_struct *work)
1763{
1764 struct hci_dev *hdev;
1765 u8 scan = SCAN_PAGE;
1766
1767 hdev = container_of(work, struct hci_dev, discov_off.work);
1768
1769 BT_DBG("%s", hdev->name);
1770
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001771 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001772
1773 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1774
1775 hdev->discov_timeout = 0;
1776
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001777 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001778}
1779
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001780int hci_uuids_clear(struct hci_dev *hdev)
1781{
Johan Hedberg48210022013-01-27 00:31:28 +02001782 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001783
Johan Hedberg48210022013-01-27 00:31:28 +02001784 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1785 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001786 kfree(uuid);
1787 }
1788
1789 return 0;
1790}
1791
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001792int hci_link_keys_clear(struct hci_dev *hdev)
1793{
1794 struct list_head *p, *n;
1795
1796 list_for_each_safe(p, n, &hdev->link_keys) {
1797 struct link_key *key;
1798
1799 key = list_entry(p, struct link_key, list);
1800
1801 list_del(p);
1802 kfree(key);
1803 }
1804
1805 return 0;
1806}
1807
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001808int hci_smp_ltks_clear(struct hci_dev *hdev)
1809{
1810 struct smp_ltk *k, *tmp;
1811
1812 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1813 list_del(&k->list);
1814 kfree(k);
1815 }
1816
1817 return 0;
1818}
1819
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001820struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001822 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001823
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001824 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001825 if (bacmp(bdaddr, &k->bdaddr) == 0)
1826 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001827
1828 return NULL;
1829}
1830
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301831static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001832 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001833{
1834 /* Legacy key */
1835 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301836 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001837
1838 /* Debug keys are insecure so don't store them persistently */
1839 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301840 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001841
1842 /* Changed combination key and there's no previous one */
1843 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301844 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001845
1846 /* Security mode 3 case */
1847 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301848 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001849
1850 /* Neither local nor remote side had no-bonding as requirement */
1851 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301852 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001853
1854 /* Local side had dedicated bonding as requirement */
1855 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301856 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001857
1858 /* Remote side had dedicated bonding as requirement */
1859 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301860 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001861
1862 /* If none of the above criteria match, then don't store the key
1863 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301864 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001865}
1866
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001867struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001868{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001869 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001870
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001871 list_for_each_entry(k, &hdev->long_term_keys, list) {
1872 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001873 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001874 continue;
1875
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001876 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001877 }
1878
1879 return NULL;
1880}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001881
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001882struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001883 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001884{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001885 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001886
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001887 list_for_each_entry(k, &hdev->long_term_keys, list)
1888 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001889 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001890 return k;
1891
1892 return NULL;
1893}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001894
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001895int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001896 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001897{
1898 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301899 u8 old_key_type;
1900 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001901
1902 old_key = hci_find_link_key(hdev, bdaddr);
1903 if (old_key) {
1904 old_key_type = old_key->type;
1905 key = old_key;
1906 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001907 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001908 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1909 if (!key)
1910 return -ENOMEM;
1911 list_add(&key->list, &hdev->link_keys);
1912 }
1913
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001914 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001915
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001916 /* Some buggy controller combinations generate a changed
1917 * combination key for legacy pairing even when there's no
1918 * previous key */
1919 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001920 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001921 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001922 if (conn)
1923 conn->key_type = type;
1924 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001925
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001926 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001927 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001928 key->pin_len = pin_len;
1929
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001930 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001931 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001932 else
1933 key->type = type;
1934
Johan Hedberg4df378a2011-04-28 11:29:03 -07001935 if (!new_key)
1936 return 0;
1937
1938 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1939
Johan Hedberg744cf192011-11-08 20:40:14 +02001940 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001941
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301942 if (conn)
1943 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001944
1945 return 0;
1946}
1947
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001948int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001949 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001950 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001951{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001952 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001953
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001954 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1955 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001956
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001957 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1958 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001959 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001960 else {
1961 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001962 if (!key)
1963 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001964 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001965 }
1966
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001967 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001968 key->bdaddr_type = addr_type;
1969 memcpy(key->val, tk, sizeof(key->val));
1970 key->authenticated = authenticated;
1971 key->ediv = ediv;
1972 key->enc_size = enc_size;
1973 key->type = type;
1974 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001975
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001976 if (!new_key)
1977 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001978
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001979 if (type & HCI_SMP_LTK)
1980 mgmt_new_ltk(hdev, key, 1);
1981
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001982 return 0;
1983}
1984
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001985int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1986{
1987 struct link_key *key;
1988
1989 key = hci_find_link_key(hdev, bdaddr);
1990 if (!key)
1991 return -ENOENT;
1992
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001993 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001994
1995 list_del(&key->list);
1996 kfree(key);
1997
1998 return 0;
1999}
2000
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002001int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
2002{
2003 struct smp_ltk *k, *tmp;
2004
2005 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
2006 if (bacmp(bdaddr, &k->bdaddr))
2007 continue;
2008
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002009 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002010
2011 list_del(&k->list);
2012 kfree(k);
2013 }
2014
2015 return 0;
2016}
2017
Ville Tervo6bd32322011-02-16 16:32:41 +02002018/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002019static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002020{
2021 struct hci_dev *hdev = (void *) arg;
2022
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002023 if (hdev->sent_cmd) {
2024 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2025 u16 opcode = __le16_to_cpu(sent->opcode);
2026
2027 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2028 } else {
2029 BT_ERR("%s command tx timeout", hdev->name);
2030 }
2031
Ville Tervo6bd32322011-02-16 16:32:41 +02002032 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002033 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002034}
2035
Szymon Janc2763eda2011-03-22 13:12:22 +01002036struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002037 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002038{
2039 struct oob_data *data;
2040
2041 list_for_each_entry(data, &hdev->remote_oob_data, list)
2042 if (bacmp(bdaddr, &data->bdaddr) == 0)
2043 return data;
2044
2045 return NULL;
2046}
2047
2048int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2049{
2050 struct oob_data *data;
2051
2052 data = hci_find_remote_oob_data(hdev, bdaddr);
2053 if (!data)
2054 return -ENOENT;
2055
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002056 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002057
2058 list_del(&data->list);
2059 kfree(data);
2060
2061 return 0;
2062}
2063
2064int hci_remote_oob_data_clear(struct hci_dev *hdev)
2065{
2066 struct oob_data *data, *n;
2067
2068 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2069 list_del(&data->list);
2070 kfree(data);
2071 }
2072
2073 return 0;
2074}
2075
2076int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002077 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002078{
2079 struct oob_data *data;
2080
2081 data = hci_find_remote_oob_data(hdev, bdaddr);
2082
2083 if (!data) {
2084 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2085 if (!data)
2086 return -ENOMEM;
2087
2088 bacpy(&data->bdaddr, bdaddr);
2089 list_add(&data->list, &hdev->remote_oob_data);
2090 }
2091
2092 memcpy(data->hash, hash, sizeof(data->hash));
2093 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2094
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002095 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002096
2097 return 0;
2098}
2099
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002100struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002101{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002102 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002103
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002104 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002105 if (bacmp(bdaddr, &b->bdaddr) == 0)
2106 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002107
2108 return NULL;
2109}
2110
2111int hci_blacklist_clear(struct hci_dev *hdev)
2112{
2113 struct list_head *p, *n;
2114
2115 list_for_each_safe(p, n, &hdev->blacklist) {
2116 struct bdaddr_list *b;
2117
2118 b = list_entry(p, struct bdaddr_list, list);
2119
2120 list_del(p);
2121 kfree(b);
2122 }
2123
2124 return 0;
2125}
2126
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002127int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002128{
2129 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002130
2131 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2132 return -EBADF;
2133
Antti Julku5e762442011-08-25 16:48:02 +03002134 if (hci_blacklist_lookup(hdev, bdaddr))
2135 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002136
2137 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002138 if (!entry)
2139 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002140
2141 bacpy(&entry->bdaddr, bdaddr);
2142
2143 list_add(&entry->list, &hdev->blacklist);
2144
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002145 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002146}
2147
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002148int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002149{
2150 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002151
Szymon Janc1ec918c2011-11-16 09:32:21 +01002152 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002153 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002154
2155 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002156 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002157 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002158
2159 list_del(&entry->list);
2160 kfree(entry);
2161
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002162 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002163}
2164
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002165static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002166{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002167 if (status) {
2168 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002169
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002170 hci_dev_lock(hdev);
2171 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2172 hci_dev_unlock(hdev);
2173 return;
2174 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002175}
2176
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002177static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002178{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002179 /* General inquiry access code (GIAC) */
2180 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2181 struct hci_request req;
2182 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002183 int err;
2184
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002185 if (status) {
2186 BT_ERR("Failed to disable LE scanning: status %d", status);
2187 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002188 }
2189
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002190 switch (hdev->discovery.type) {
2191 case DISCOV_TYPE_LE:
2192 hci_dev_lock(hdev);
2193 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2194 hci_dev_unlock(hdev);
2195 break;
2196
2197 case DISCOV_TYPE_INTERLEAVED:
2198 hci_req_init(&req, hdev);
2199
2200 memset(&cp, 0, sizeof(cp));
2201 memcpy(&cp.lap, lap, sizeof(cp.lap));
2202 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2203 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2204
2205 hci_dev_lock(hdev);
2206
2207 hci_inquiry_cache_flush(hdev);
2208
2209 err = hci_req_run(&req, inquiry_complete);
2210 if (err) {
2211 BT_ERR("Inquiry request failed: err %d", err);
2212 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2213 }
2214
2215 hci_dev_unlock(hdev);
2216 break;
2217 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002218}
2219
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002220static void le_scan_disable_work(struct work_struct *work)
2221{
2222 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002223 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002224 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002225 struct hci_request req;
2226 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002227
2228 BT_DBG("%s", hdev->name);
2229
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002230 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002231
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002232 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002233 cp.enable = LE_SCAN_DISABLE;
2234 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002235
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002236 err = hci_req_run(&req, le_scan_disable_work_complete);
2237 if (err)
2238 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002239}
2240
David Herrmann9be0dab2012-04-22 14:39:57 +02002241/* Alloc HCI device */
2242struct hci_dev *hci_alloc_dev(void)
2243{
2244 struct hci_dev *hdev;
2245
2246 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2247 if (!hdev)
2248 return NULL;
2249
David Herrmannb1b813d2012-04-22 14:39:58 +02002250 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2251 hdev->esco_type = (ESCO_HV1);
2252 hdev->link_mode = (HCI_LM_ACCEPT);
2253 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002254 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2255 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002256
David Herrmannb1b813d2012-04-22 14:39:58 +02002257 hdev->sniff_max_interval = 800;
2258 hdev->sniff_min_interval = 80;
2259
2260 mutex_init(&hdev->lock);
2261 mutex_init(&hdev->req_lock);
2262
2263 INIT_LIST_HEAD(&hdev->mgmt_pending);
2264 INIT_LIST_HEAD(&hdev->blacklist);
2265 INIT_LIST_HEAD(&hdev->uuids);
2266 INIT_LIST_HEAD(&hdev->link_keys);
2267 INIT_LIST_HEAD(&hdev->long_term_keys);
2268 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002269 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002270
2271 INIT_WORK(&hdev->rx_work, hci_rx_work);
2272 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2273 INIT_WORK(&hdev->tx_work, hci_tx_work);
2274 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002275
David Herrmannb1b813d2012-04-22 14:39:58 +02002276 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2277 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2278 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2279
David Herrmannb1b813d2012-04-22 14:39:58 +02002280 skb_queue_head_init(&hdev->rx_q);
2281 skb_queue_head_init(&hdev->cmd_q);
2282 skb_queue_head_init(&hdev->raw_q);
2283
2284 init_waitqueue_head(&hdev->req_wait_q);
2285
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002286 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002287
David Herrmannb1b813d2012-04-22 14:39:58 +02002288 hci_init_sysfs(hdev);
2289 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002290
2291 return hdev;
2292}
2293EXPORT_SYMBOL(hci_alloc_dev);
2294
2295/* Free HCI device */
2296void hci_free_dev(struct hci_dev *hdev)
2297{
David Herrmann9be0dab2012-04-22 14:39:57 +02002298 /* will free via device release */
2299 put_device(&hdev->dev);
2300}
2301EXPORT_SYMBOL(hci_free_dev);
2302
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303/* Register HCI device */
2304int hci_register_dev(struct hci_dev *hdev)
2305{
David Herrmannb1b813d2012-04-22 14:39:58 +02002306 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
David Herrmann010666a2012-01-07 15:47:07 +01002308 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 return -EINVAL;
2310
Mat Martineau08add512011-11-02 16:18:36 -07002311 /* Do not allow HCI_AMP devices to register at index 0,
2312 * so the index can be used as the AMP controller ID.
2313 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002314 switch (hdev->dev_type) {
2315 case HCI_BREDR:
2316 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2317 break;
2318 case HCI_AMP:
2319 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2320 break;
2321 default:
2322 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002324
Sasha Levin3df92b32012-05-27 22:36:56 +02002325 if (id < 0)
2326 return id;
2327
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328 sprintf(hdev->name, "hci%d", id);
2329 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002330
2331 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2332
Kees Cookd8537542013-07-03 15:04:57 -07002333 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2334 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002335 if (!hdev->workqueue) {
2336 error = -ENOMEM;
2337 goto err;
2338 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002339
Kees Cookd8537542013-07-03 15:04:57 -07002340 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2341 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002342 if (!hdev->req_workqueue) {
2343 destroy_workqueue(hdev->workqueue);
2344 error = -ENOMEM;
2345 goto err;
2346 }
2347
David Herrmann33ca9542011-10-08 14:58:49 +02002348 error = hci_add_sysfs(hdev);
2349 if (error < 0)
2350 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002352 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002353 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2354 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002355 if (hdev->rfkill) {
2356 if (rfkill_register(hdev->rfkill) < 0) {
2357 rfkill_destroy(hdev->rfkill);
2358 hdev->rfkill = NULL;
2359 }
2360 }
2361
Johan Hedberg5e130362013-09-13 08:58:17 +03002362 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2363 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2364
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002365 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002366 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002367
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002368 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002369 /* Assume BR/EDR support until proven otherwise (such as
2370 * through reading supported features during init.
2371 */
2372 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2373 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002374
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002375 write_lock(&hci_dev_list_lock);
2376 list_add(&hdev->list, &hci_dev_list);
2377 write_unlock(&hci_dev_list_lock);
2378
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002380 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381
Johan Hedberg19202572013-01-14 22:33:51 +02002382 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002383
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002385
David Herrmann33ca9542011-10-08 14:58:49 +02002386err_wqueue:
2387 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002388 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002389err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002390 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002391
David Herrmann33ca9542011-10-08 14:58:49 +02002392 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393}
2394EXPORT_SYMBOL(hci_register_dev);
2395
2396/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002397void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398{
Sasha Levin3df92b32012-05-27 22:36:56 +02002399 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002400
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002401 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
Johan Hovold94324962012-03-15 14:48:41 +01002403 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2404
Sasha Levin3df92b32012-05-27 22:36:56 +02002405 id = hdev->id;
2406
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002407 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002408 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002409 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410
2411 hci_dev_do_close(hdev);
2412
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302413 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002414 kfree_skb(hdev->reassembly[i]);
2415
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002416 cancel_work_sync(&hdev->power_on);
2417
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002418 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002419 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002420 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002421 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002422 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002423 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002424
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002425 /* mgmt_index_removed should take care of emptying the
2426 * pending list */
2427 BUG_ON(!list_empty(&hdev->mgmt_pending));
2428
Linus Torvalds1da177e2005-04-16 15:20:36 -07002429 hci_notify(hdev, HCI_DEV_UNREG);
2430
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002431 if (hdev->rfkill) {
2432 rfkill_unregister(hdev->rfkill);
2433 rfkill_destroy(hdev->rfkill);
2434 }
2435
David Herrmannce242972011-10-08 14:58:48 +02002436 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002437
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002438 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002439 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002440
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002441 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002442 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002443 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002444 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002445 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002446 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002447 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002448
David Herrmanndc946bd2012-01-07 15:47:24 +01002449 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002450
2451 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452}
2453EXPORT_SYMBOL(hci_unregister_dev);
2454
2455/* Suspend HCI device */
2456int hci_suspend_dev(struct hci_dev *hdev)
2457{
2458 hci_notify(hdev, HCI_DEV_SUSPEND);
2459 return 0;
2460}
2461EXPORT_SYMBOL(hci_suspend_dev);
2462
2463/* Resume HCI device */
2464int hci_resume_dev(struct hci_dev *hdev)
2465{
2466 hci_notify(hdev, HCI_DEV_RESUME);
2467 return 0;
2468}
2469EXPORT_SYMBOL(hci_resume_dev);
2470
Marcel Holtmann76bca882009-11-18 00:40:39 +01002471/* Receive frame from HCI drivers */
2472int hci_recv_frame(struct sk_buff *skb)
2473{
2474 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2475 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002476 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002477 kfree_skb(skb);
2478 return -ENXIO;
2479 }
2480
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002481 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002482 bt_cb(skb)->incoming = 1;
2483
2484 /* Time stamp */
2485 __net_timestamp(skb);
2486
Marcel Holtmann76bca882009-11-18 00:40:39 +01002487 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002488 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002489
Marcel Holtmann76bca882009-11-18 00:40:39 +01002490 return 0;
2491}
2492EXPORT_SYMBOL(hci_recv_frame);
2493
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302494static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002495 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302496{
2497 int len = 0;
2498 int hlen = 0;
2499 int remain = count;
2500 struct sk_buff *skb;
2501 struct bt_skb_cb *scb;
2502
2503 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002504 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302505 return -EILSEQ;
2506
2507 skb = hdev->reassembly[index];
2508
2509 if (!skb) {
2510 switch (type) {
2511 case HCI_ACLDATA_PKT:
2512 len = HCI_MAX_FRAME_SIZE;
2513 hlen = HCI_ACL_HDR_SIZE;
2514 break;
2515 case HCI_EVENT_PKT:
2516 len = HCI_MAX_EVENT_SIZE;
2517 hlen = HCI_EVENT_HDR_SIZE;
2518 break;
2519 case HCI_SCODATA_PKT:
2520 len = HCI_MAX_SCO_SIZE;
2521 hlen = HCI_SCO_HDR_SIZE;
2522 break;
2523 }
2524
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002525 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302526 if (!skb)
2527 return -ENOMEM;
2528
2529 scb = (void *) skb->cb;
2530 scb->expect = hlen;
2531 scb->pkt_type = type;
2532
2533 skb->dev = (void *) hdev;
2534 hdev->reassembly[index] = skb;
2535 }
2536
2537 while (count) {
2538 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002539 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302540
2541 memcpy(skb_put(skb, len), data, len);
2542
2543 count -= len;
2544 data += len;
2545 scb->expect -= len;
2546 remain = count;
2547
2548 switch (type) {
2549 case HCI_EVENT_PKT:
2550 if (skb->len == HCI_EVENT_HDR_SIZE) {
2551 struct hci_event_hdr *h = hci_event_hdr(skb);
2552 scb->expect = h->plen;
2553
2554 if (skb_tailroom(skb) < scb->expect) {
2555 kfree_skb(skb);
2556 hdev->reassembly[index] = NULL;
2557 return -ENOMEM;
2558 }
2559 }
2560 break;
2561
2562 case HCI_ACLDATA_PKT:
2563 if (skb->len == HCI_ACL_HDR_SIZE) {
2564 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2565 scb->expect = __le16_to_cpu(h->dlen);
2566
2567 if (skb_tailroom(skb) < scb->expect) {
2568 kfree_skb(skb);
2569 hdev->reassembly[index] = NULL;
2570 return -ENOMEM;
2571 }
2572 }
2573 break;
2574
2575 case HCI_SCODATA_PKT:
2576 if (skb->len == HCI_SCO_HDR_SIZE) {
2577 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2578 scb->expect = h->dlen;
2579
2580 if (skb_tailroom(skb) < scb->expect) {
2581 kfree_skb(skb);
2582 hdev->reassembly[index] = NULL;
2583 return -ENOMEM;
2584 }
2585 }
2586 break;
2587 }
2588
2589 if (scb->expect == 0) {
2590 /* Complete frame */
2591
2592 bt_cb(skb)->pkt_type = type;
2593 hci_recv_frame(skb);
2594
2595 hdev->reassembly[index] = NULL;
2596 return remain;
2597 }
2598 }
2599
2600 return remain;
2601}
2602
Marcel Holtmannef222012007-07-11 06:42:04 +02002603int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2604{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302605 int rem = 0;
2606
Marcel Holtmannef222012007-07-11 06:42:04 +02002607 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2608 return -EILSEQ;
2609
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002610 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002611 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302612 if (rem < 0)
2613 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002614
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302615 data += (count - rem);
2616 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002617 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002618
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302619 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002620}
2621EXPORT_SYMBOL(hci_recv_fragment);
2622
Suraj Sumangala99811512010-07-14 13:02:19 +05302623#define STREAM_REASSEMBLY 0
2624
2625int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2626{
2627 int type;
2628 int rem = 0;
2629
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002630 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302631 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2632
2633 if (!skb) {
2634 struct { char type; } *pkt;
2635
2636 /* Start of the frame */
2637 pkt = data;
2638 type = pkt->type;
2639
2640 data++;
2641 count--;
2642 } else
2643 type = bt_cb(skb)->pkt_type;
2644
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002645 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002646 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302647 if (rem < 0)
2648 return rem;
2649
2650 data += (count - rem);
2651 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002652 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302653
2654 return rem;
2655}
2656EXPORT_SYMBOL(hci_recv_stream_fragment);
2657
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658/* ---- Interface to upper protocols ---- */
2659
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660int hci_register_cb(struct hci_cb *cb)
2661{
2662 BT_DBG("%p name %s", cb, cb->name);
2663
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002664 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002666 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 return 0;
2669}
2670EXPORT_SYMBOL(hci_register_cb);
2671
2672int hci_unregister_cb(struct hci_cb *cb)
2673{
2674 BT_DBG("%p name %s", cb, cb->name);
2675
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002676 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002678 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
2680 return 0;
2681}
2682EXPORT_SYMBOL(hci_unregister_cb);
2683
2684static int hci_send_frame(struct sk_buff *skb)
2685{
2686 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2687
2688 if (!hdev) {
2689 kfree_skb(skb);
2690 return -ENODEV;
2691 }
2692
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002693 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002695 /* Time stamp */
2696 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002698 /* Send copy to monitor */
2699 hci_send_to_monitor(hdev, skb);
2700
2701 if (atomic_read(&hdev->promisc)) {
2702 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002703 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 }
2705
2706 /* Get rid of skb owner, prior to sending to the driver. */
2707 skb_orphan(skb);
2708
2709 return hdev->send(skb);
2710}
2711
Johan Hedberg3119ae92013-03-05 20:37:44 +02002712void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2713{
2714 skb_queue_head_init(&req->cmd_q);
2715 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002716 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002717}
2718
2719int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2720{
2721 struct hci_dev *hdev = req->hdev;
2722 struct sk_buff *skb;
2723 unsigned long flags;
2724
2725 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2726
Andre Guedes5d73e032013-03-08 11:20:16 -03002727 /* If an error occured during request building, remove all HCI
2728 * commands queued on the HCI request queue.
2729 */
2730 if (req->err) {
2731 skb_queue_purge(&req->cmd_q);
2732 return req->err;
2733 }
2734
Johan Hedberg3119ae92013-03-05 20:37:44 +02002735 /* Do not allow empty requests */
2736 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002737 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002738
2739 skb = skb_peek_tail(&req->cmd_q);
2740 bt_cb(skb)->req.complete = complete;
2741
2742 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2743 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2744 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2745
2746 queue_work(hdev->workqueue, &hdev->cmd_work);
2747
2748 return 0;
2749}
2750
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002751static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002752 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753{
2754 int len = HCI_COMMAND_HDR_SIZE + plen;
2755 struct hci_command_hdr *hdr;
2756 struct sk_buff *skb;
2757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002759 if (!skb)
2760 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
2762 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002763 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 hdr->plen = plen;
2765
2766 if (plen)
2767 memcpy(skb_put(skb, plen), param, plen);
2768
2769 BT_DBG("skb len %d", skb->len);
2770
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002771 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002773
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002774 return skb;
2775}
2776
2777/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002778int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2779 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002780{
2781 struct sk_buff *skb;
2782
2783 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2784
2785 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2786 if (!skb) {
2787 BT_ERR("%s no memory for command", hdev->name);
2788 return -ENOMEM;
2789 }
2790
Johan Hedberg11714b32013-03-05 20:37:47 +02002791 /* Stand-alone HCI commands must be flaged as
2792 * single-command requests.
2793 */
2794 bt_cb(skb)->req.start = true;
2795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002797 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799 return 0;
2800}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
Johan Hedberg71c76a12013-03-05 20:37:46 +02002802/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002803void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2804 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002805{
2806 struct hci_dev *hdev = req->hdev;
2807 struct sk_buff *skb;
2808
2809 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2810
Andre Guedes34739c12013-03-08 11:20:18 -03002811 /* If an error occured during request building, there is no point in
2812 * queueing the HCI command. We can simply return.
2813 */
2814 if (req->err)
2815 return;
2816
Johan Hedberg71c76a12013-03-05 20:37:46 +02002817 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2818 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002819 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2820 hdev->name, opcode);
2821 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002822 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002823 }
2824
2825 if (skb_queue_empty(&req->cmd_q))
2826 bt_cb(skb)->req.start = true;
2827
Johan Hedberg02350a72013-04-03 21:50:29 +03002828 bt_cb(skb)->req.event = event;
2829
Johan Hedberg71c76a12013-03-05 20:37:46 +02002830 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002831}
2832
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002833void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2834 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002835{
2836 hci_req_add_ev(req, opcode, plen, param, 0);
2837}
2838
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002840void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841{
2842 struct hci_command_hdr *hdr;
2843
2844 if (!hdev->sent_cmd)
2845 return NULL;
2846
2847 hdr = (void *) hdev->sent_cmd->data;
2848
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002849 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 return NULL;
2851
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002852 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2855}
2856
2857/* Send ACL data */
2858static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2859{
2860 struct hci_acl_hdr *hdr;
2861 int len = skb->len;
2862
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002863 skb_push(skb, HCI_ACL_HDR_SIZE);
2864 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002865 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002866 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2867 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868}
2869
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002870static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002871 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002873 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 struct hci_dev *hdev = conn->hdev;
2875 struct sk_buff *list;
2876
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002877 skb->len = skb_headlen(skb);
2878 skb->data_len = 0;
2879
2880 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002881
2882 switch (hdev->dev_type) {
2883 case HCI_BREDR:
2884 hci_add_acl_hdr(skb, conn->handle, flags);
2885 break;
2886 case HCI_AMP:
2887 hci_add_acl_hdr(skb, chan->handle, flags);
2888 break;
2889 default:
2890 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2891 return;
2892 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002894 list = skb_shinfo(skb)->frag_list;
2895 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 /* Non fragmented */
2897 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2898
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002899 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900 } else {
2901 /* Fragmented */
2902 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2903
2904 skb_shinfo(skb)->frag_list = NULL;
2905
2906 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002907 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002909 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002910
2911 flags &= ~ACL_START;
2912 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 do {
2914 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002915
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002917 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002918 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
2920 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2921
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002923 } while (list);
2924
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002925 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002927}
2928
2929void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2930{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002931 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002932
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002933 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002934
2935 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002936
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002937 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002939 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002941
2942/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002943void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944{
2945 struct hci_dev *hdev = conn->hdev;
2946 struct hci_sco_hdr hdr;
2947
2948 BT_DBG("%s len %d", hdev->name, skb->len);
2949
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002950 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 hdr.dlen = skb->len;
2952
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002953 skb_push(skb, HCI_SCO_HDR_SIZE);
2954 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002955 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956
2957 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002958 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002959
Linus Torvalds1da177e2005-04-16 15:20:36 -07002960 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002961 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963
2964/* ---- HCI TX task (outgoing data) ---- */
2965
2966/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002967static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2968 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969{
2970 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002971 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002972 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002973
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002974 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002976
2977 rcu_read_lock();
2978
2979 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002980 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002981 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002982
2983 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2984 continue;
2985
Linus Torvalds1da177e2005-04-16 15:20:36 -07002986 num++;
2987
2988 if (c->sent < min) {
2989 min = c->sent;
2990 conn = c;
2991 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002992
2993 if (hci_conn_num(hdev, type) == num)
2994 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002995 }
2996
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002997 rcu_read_unlock();
2998
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003000 int cnt, q;
3001
3002 switch (conn->type) {
3003 case ACL_LINK:
3004 cnt = hdev->acl_cnt;
3005 break;
3006 case SCO_LINK:
3007 case ESCO_LINK:
3008 cnt = hdev->sco_cnt;
3009 break;
3010 case LE_LINK:
3011 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3012 break;
3013 default:
3014 cnt = 0;
3015 BT_ERR("Unknown link type");
3016 }
3017
3018 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003019 *quote = q ? q : 1;
3020 } else
3021 *quote = 0;
3022
3023 BT_DBG("conn %p quote %d", conn, *quote);
3024 return conn;
3025}
3026
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003027static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028{
3029 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003030 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003031
Ville Tervobae1f5d92011-02-10 22:38:53 -03003032 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003033
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003034 rcu_read_lock();
3035
Linus Torvalds1da177e2005-04-16 15:20:36 -07003036 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003037 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003038 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003039 BT_ERR("%s killing stalled connection %pMR",
3040 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003041 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003042 }
3043 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003044
3045 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046}
3047
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003048static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3049 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003050{
3051 struct hci_conn_hash *h = &hdev->conn_hash;
3052 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003053 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003054 struct hci_conn *conn;
3055 int cnt, q, conn_num = 0;
3056
3057 BT_DBG("%s", hdev->name);
3058
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003059 rcu_read_lock();
3060
3061 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003062 struct hci_chan *tmp;
3063
3064 if (conn->type != type)
3065 continue;
3066
3067 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3068 continue;
3069
3070 conn_num++;
3071
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003072 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003073 struct sk_buff *skb;
3074
3075 if (skb_queue_empty(&tmp->data_q))
3076 continue;
3077
3078 skb = skb_peek(&tmp->data_q);
3079 if (skb->priority < cur_prio)
3080 continue;
3081
3082 if (skb->priority > cur_prio) {
3083 num = 0;
3084 min = ~0;
3085 cur_prio = skb->priority;
3086 }
3087
3088 num++;
3089
3090 if (conn->sent < min) {
3091 min = conn->sent;
3092 chan = tmp;
3093 }
3094 }
3095
3096 if (hci_conn_num(hdev, type) == conn_num)
3097 break;
3098 }
3099
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003100 rcu_read_unlock();
3101
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003102 if (!chan)
3103 return NULL;
3104
3105 switch (chan->conn->type) {
3106 case ACL_LINK:
3107 cnt = hdev->acl_cnt;
3108 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003109 case AMP_LINK:
3110 cnt = hdev->block_cnt;
3111 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003112 case SCO_LINK:
3113 case ESCO_LINK:
3114 cnt = hdev->sco_cnt;
3115 break;
3116 case LE_LINK:
3117 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3118 break;
3119 default:
3120 cnt = 0;
3121 BT_ERR("Unknown link type");
3122 }
3123
3124 q = cnt / num;
3125 *quote = q ? q : 1;
3126 BT_DBG("chan %p quote %d", chan, *quote);
3127 return chan;
3128}
3129
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003130static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3131{
3132 struct hci_conn_hash *h = &hdev->conn_hash;
3133 struct hci_conn *conn;
3134 int num = 0;
3135
3136 BT_DBG("%s", hdev->name);
3137
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003138 rcu_read_lock();
3139
3140 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003141 struct hci_chan *chan;
3142
3143 if (conn->type != type)
3144 continue;
3145
3146 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3147 continue;
3148
3149 num++;
3150
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003151 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003152 struct sk_buff *skb;
3153
3154 if (chan->sent) {
3155 chan->sent = 0;
3156 continue;
3157 }
3158
3159 if (skb_queue_empty(&chan->data_q))
3160 continue;
3161
3162 skb = skb_peek(&chan->data_q);
3163 if (skb->priority >= HCI_PRIO_MAX - 1)
3164 continue;
3165
3166 skb->priority = HCI_PRIO_MAX - 1;
3167
3168 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003169 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003170 }
3171
3172 if (hci_conn_num(hdev, type) == num)
3173 break;
3174 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003175
3176 rcu_read_unlock();
3177
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003178}
3179
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003180static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3181{
3182 /* Calculate count of blocks used by this packet */
3183 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3184}
3185
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003186static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003188 if (!test_bit(HCI_RAW, &hdev->flags)) {
3189 /* ACL tx timeout must be longer than maximum
3190 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003191 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003192 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003193 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003194 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003195}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003196
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003197static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003198{
3199 unsigned int cnt = hdev->acl_cnt;
3200 struct hci_chan *chan;
3201 struct sk_buff *skb;
3202 int quote;
3203
3204 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003205
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003206 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003207 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003208 u32 priority = (skb_peek(&chan->data_q))->priority;
3209 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003210 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003211 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003212
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003213 /* Stop if priority has changed */
3214 if (skb->priority < priority)
3215 break;
3216
3217 skb = skb_dequeue(&chan->data_q);
3218
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003219 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003220 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003221
Linus Torvalds1da177e2005-04-16 15:20:36 -07003222 hci_send_frame(skb);
3223 hdev->acl_last_tx = jiffies;
3224
3225 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003226 chan->sent++;
3227 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003228 }
3229 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003230
3231 if (cnt != hdev->acl_cnt)
3232 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003233}
3234
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003235static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003236{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003237 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003238 struct hci_chan *chan;
3239 struct sk_buff *skb;
3240 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003241 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003242
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003243 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003244
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003245 BT_DBG("%s", hdev->name);
3246
3247 if (hdev->dev_type == HCI_AMP)
3248 type = AMP_LINK;
3249 else
3250 type = ACL_LINK;
3251
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003252 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003253 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003254 u32 priority = (skb_peek(&chan->data_q))->priority;
3255 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3256 int blocks;
3257
3258 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003259 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003260
3261 /* Stop if priority has changed */
3262 if (skb->priority < priority)
3263 break;
3264
3265 skb = skb_dequeue(&chan->data_q);
3266
3267 blocks = __get_blocks(hdev, skb);
3268 if (blocks > hdev->block_cnt)
3269 return;
3270
3271 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003272 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003273
3274 hci_send_frame(skb);
3275 hdev->acl_last_tx = jiffies;
3276
3277 hdev->block_cnt -= blocks;
3278 quote -= blocks;
3279
3280 chan->sent += blocks;
3281 chan->conn->sent += blocks;
3282 }
3283 }
3284
3285 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003286 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003287}
3288
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003289static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003290{
3291 BT_DBG("%s", hdev->name);
3292
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003293 /* No ACL link over BR/EDR controller */
3294 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3295 return;
3296
3297 /* No AMP link over AMP controller */
3298 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003299 return;
3300
3301 switch (hdev->flow_ctl_mode) {
3302 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3303 hci_sched_acl_pkt(hdev);
3304 break;
3305
3306 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3307 hci_sched_acl_blk(hdev);
3308 break;
3309 }
3310}
3311
Linus Torvalds1da177e2005-04-16 15:20:36 -07003312/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003313static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314{
3315 struct hci_conn *conn;
3316 struct sk_buff *skb;
3317 int quote;
3318
3319 BT_DBG("%s", hdev->name);
3320
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003321 if (!hci_conn_num(hdev, SCO_LINK))
3322 return;
3323
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3325 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3326 BT_DBG("skb %p len %d", skb, skb->len);
3327 hci_send_frame(skb);
3328
3329 conn->sent++;
3330 if (conn->sent == ~0)
3331 conn->sent = 0;
3332 }
3333 }
3334}
3335
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003336static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003337{
3338 struct hci_conn *conn;
3339 struct sk_buff *skb;
3340 int quote;
3341
3342 BT_DBG("%s", hdev->name);
3343
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003344 if (!hci_conn_num(hdev, ESCO_LINK))
3345 return;
3346
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003347 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3348 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003349 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3350 BT_DBG("skb %p len %d", skb, skb->len);
3351 hci_send_frame(skb);
3352
3353 conn->sent++;
3354 if (conn->sent == ~0)
3355 conn->sent = 0;
3356 }
3357 }
3358}
3359
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003360static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003361{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003362 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003363 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003364 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003365
3366 BT_DBG("%s", hdev->name);
3367
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003368 if (!hci_conn_num(hdev, LE_LINK))
3369 return;
3370
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003371 if (!test_bit(HCI_RAW, &hdev->flags)) {
3372 /* LE tx timeout must be longer than maximum
3373 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003374 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003375 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003376 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003377 }
3378
3379 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003380 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003381 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003382 u32 priority = (skb_peek(&chan->data_q))->priority;
3383 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003384 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003385 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003386
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003387 /* Stop if priority has changed */
3388 if (skb->priority < priority)
3389 break;
3390
3391 skb = skb_dequeue(&chan->data_q);
3392
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003393 hci_send_frame(skb);
3394 hdev->le_last_tx = jiffies;
3395
3396 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003397 chan->sent++;
3398 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003399 }
3400 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003401
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003402 if (hdev->le_pkts)
3403 hdev->le_cnt = cnt;
3404 else
3405 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003406
3407 if (cnt != tmp)
3408 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003409}
3410
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003411static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003412{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003413 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003414 struct sk_buff *skb;
3415
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003416 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003417 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003418
Marcel Holtmann52de5992013-09-03 18:08:38 -07003419 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3420 /* Schedule queues and send stuff to HCI driver */
3421 hci_sched_acl(hdev);
3422 hci_sched_sco(hdev);
3423 hci_sched_esco(hdev);
3424 hci_sched_le(hdev);
3425 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003426
Linus Torvalds1da177e2005-04-16 15:20:36 -07003427 /* Send next queued raw (unknown type) packet */
3428 while ((skb = skb_dequeue(&hdev->raw_q)))
3429 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003430}
3431
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003432/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433
3434/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003435static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436{
3437 struct hci_acl_hdr *hdr = (void *) skb->data;
3438 struct hci_conn *conn;
3439 __u16 handle, flags;
3440
3441 skb_pull(skb, HCI_ACL_HDR_SIZE);
3442
3443 handle = __le16_to_cpu(hdr->handle);
3444 flags = hci_flags(handle);
3445 handle = hci_handle(handle);
3446
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003447 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003448 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003449
3450 hdev->stat.acl_rx++;
3451
3452 hci_dev_lock(hdev);
3453 conn = hci_conn_hash_lookup_handle(hdev, handle);
3454 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003455
Linus Torvalds1da177e2005-04-16 15:20:36 -07003456 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003457 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003458
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003460 l2cap_recv_acldata(conn, skb, flags);
3461 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003463 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003464 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 }
3466
3467 kfree_skb(skb);
3468}
3469
3470/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003471static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003472{
3473 struct hci_sco_hdr *hdr = (void *) skb->data;
3474 struct hci_conn *conn;
3475 __u16 handle;
3476
3477 skb_pull(skb, HCI_SCO_HDR_SIZE);
3478
3479 handle = __le16_to_cpu(hdr->handle);
3480
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003481 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482
3483 hdev->stat.sco_rx++;
3484
3485 hci_dev_lock(hdev);
3486 conn = hci_conn_hash_lookup_handle(hdev, handle);
3487 hci_dev_unlock(hdev);
3488
3489 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003491 sco_recv_scodata(conn, skb);
3492 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003494 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003495 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003496 }
3497
3498 kfree_skb(skb);
3499}
3500
Johan Hedberg9238f362013-03-05 20:37:48 +02003501static bool hci_req_is_complete(struct hci_dev *hdev)
3502{
3503 struct sk_buff *skb;
3504
3505 skb = skb_peek(&hdev->cmd_q);
3506 if (!skb)
3507 return true;
3508
3509 return bt_cb(skb)->req.start;
3510}
3511
Johan Hedberg42c6b122013-03-05 20:37:49 +02003512static void hci_resend_last(struct hci_dev *hdev)
3513{
3514 struct hci_command_hdr *sent;
3515 struct sk_buff *skb;
3516 u16 opcode;
3517
3518 if (!hdev->sent_cmd)
3519 return;
3520
3521 sent = (void *) hdev->sent_cmd->data;
3522 opcode = __le16_to_cpu(sent->opcode);
3523 if (opcode == HCI_OP_RESET)
3524 return;
3525
3526 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3527 if (!skb)
3528 return;
3529
3530 skb_queue_head(&hdev->cmd_q, skb);
3531 queue_work(hdev->workqueue, &hdev->cmd_work);
3532}
3533
Johan Hedberg9238f362013-03-05 20:37:48 +02003534void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3535{
3536 hci_req_complete_t req_complete = NULL;
3537 struct sk_buff *skb;
3538 unsigned long flags;
3539
3540 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3541
Johan Hedberg42c6b122013-03-05 20:37:49 +02003542 /* If the completed command doesn't match the last one that was
3543 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003544 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003545 if (!hci_sent_cmd_data(hdev, opcode)) {
3546 /* Some CSR based controllers generate a spontaneous
3547 * reset complete event during init and any pending
3548 * command will never be completed. In such a case we
3549 * need to resend whatever was the last sent
3550 * command.
3551 */
3552 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3553 hci_resend_last(hdev);
3554
Johan Hedberg9238f362013-03-05 20:37:48 +02003555 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003556 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003557
3558 /* If the command succeeded and there's still more commands in
3559 * this request the request is not yet complete.
3560 */
3561 if (!status && !hci_req_is_complete(hdev))
3562 return;
3563
3564 /* If this was the last command in a request the complete
3565 * callback would be found in hdev->sent_cmd instead of the
3566 * command queue (hdev->cmd_q).
3567 */
3568 if (hdev->sent_cmd) {
3569 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003570
3571 if (req_complete) {
3572 /* We must set the complete callback to NULL to
3573 * avoid calling the callback more than once if
3574 * this function gets called again.
3575 */
3576 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3577
Johan Hedberg9238f362013-03-05 20:37:48 +02003578 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003579 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003580 }
3581
3582 /* Remove all pending commands belonging to this request */
3583 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3584 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3585 if (bt_cb(skb)->req.start) {
3586 __skb_queue_head(&hdev->cmd_q, skb);
3587 break;
3588 }
3589
3590 req_complete = bt_cb(skb)->req.complete;
3591 kfree_skb(skb);
3592 }
3593 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3594
3595call_complete:
3596 if (req_complete)
3597 req_complete(hdev, status);
3598}
3599
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003600static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003602 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603 struct sk_buff *skb;
3604
3605 BT_DBG("%s", hdev->name);
3606
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003608 /* Send copy to monitor */
3609 hci_send_to_monitor(hdev, skb);
3610
Linus Torvalds1da177e2005-04-16 15:20:36 -07003611 if (atomic_read(&hdev->promisc)) {
3612 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003613 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 }
3615
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003616 if (test_bit(HCI_RAW, &hdev->flags) ||
3617 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003618 kfree_skb(skb);
3619 continue;
3620 }
3621
3622 if (test_bit(HCI_INIT, &hdev->flags)) {
3623 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003624 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003625 case HCI_ACLDATA_PKT:
3626 case HCI_SCODATA_PKT:
3627 kfree_skb(skb);
3628 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003630 }
3631
3632 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003633 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003635 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003636 hci_event_packet(hdev, skb);
3637 break;
3638
3639 case HCI_ACLDATA_PKT:
3640 BT_DBG("%s ACL data packet", hdev->name);
3641 hci_acldata_packet(hdev, skb);
3642 break;
3643
3644 case HCI_SCODATA_PKT:
3645 BT_DBG("%s SCO data packet", hdev->name);
3646 hci_scodata_packet(hdev, skb);
3647 break;
3648
3649 default:
3650 kfree_skb(skb);
3651 break;
3652 }
3653 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654}
3655
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003656static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003657{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003658 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003659 struct sk_buff *skb;
3660
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003661 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3662 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003663
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003665 if (atomic_read(&hdev->cmd_cnt)) {
3666 skb = skb_dequeue(&hdev->cmd_q);
3667 if (!skb)
3668 return;
3669
Wei Yongjun7585b972009-02-25 18:29:52 +08003670 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003671
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003672 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003673 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003674 atomic_dec(&hdev->cmd_cnt);
3675 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003676 if (test_bit(HCI_RESET, &hdev->flags))
3677 del_timer(&hdev->cmd_timer);
3678 else
3679 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003680 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003681 } else {
3682 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003683 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003684 }
3685 }
3686}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003687
Andre Guedes31f79562012-04-24 21:02:53 -03003688u8 bdaddr_to_le(u8 bdaddr_type)
3689{
3690 switch (bdaddr_type) {
3691 case BDADDR_LE_PUBLIC:
3692 return ADDR_LE_DEV_PUBLIC;
3693
3694 default:
3695 /* Fallback to LE Random address type */
3696 return ADDR_LE_DEV_RANDOM;
3697 }
3698}