blob: 73c8def0c3276ac63e603244887819b2d77f8963 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070031#include <linux/debugfs.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
Marcel Holtmannbaf27f62013-10-16 03:28:55 -070058/* ---- HCI debugfs entries ---- */
59
60static int inquiry_cache_show(struct seq_file *f, void *p)
61{
62 struct hci_dev *hdev = f->private;
63 struct discovery_state *cache = &hdev->discovery;
64 struct inquiry_entry *e;
65
66 hci_dev_lock(hdev);
67
68 list_for_each_entry(e, &cache->all, all) {
69 struct inquiry_data *data = &e->data;
70 seq_printf(f, "%pMR %d %d %d 0x%.2x%.2x%.2x 0x%.4x %d %d %u\n",
71 &data->bdaddr,
72 data->pscan_rep_mode, data->pscan_period_mode,
73 data->pscan_mode, data->dev_class[2],
74 data->dev_class[1], data->dev_class[0],
75 __le16_to_cpu(data->clock_offset),
76 data->rssi, data->ssp_mode, e->timestamp);
77 }
78
79 hci_dev_unlock(hdev);
80
81 return 0;
82}
83
84static int inquiry_cache_open(struct inode *inode, struct file *file)
85{
86 return single_open(file, inquiry_cache_show, inode->i_private);
87}
88
89static const struct file_operations inquiry_cache_fops = {
90 .open = inquiry_cache_open,
91 .read = seq_read,
92 .llseek = seq_lseek,
93 .release = single_release,
94};
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096/* ---- HCI requests ---- */
97
Johan Hedberg42c6b122013-03-05 20:37:49 +020098static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070099{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109static void hci_req_cancel(struct hci_dev *hdev, int err)
110{
111 BT_DBG("%s err 0x%2.2x", hdev->name, err);
112
113 if (hdev->req_status == HCI_REQ_PEND) {
114 hdev->req_result = err;
115 hdev->req_status = HCI_REQ_CANCELED;
116 wake_up_interruptible(&hdev->req_wait_q);
117 }
118}
119
Fengguang Wu77a63e02013-04-20 16:24:31 +0300120static struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode,
121 u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300122{
123 struct hci_ev_cmd_complete *ev;
124 struct hci_event_hdr *hdr;
125 struct sk_buff *skb;
126
127 hci_dev_lock(hdev);
128
129 skb = hdev->recv_evt;
130 hdev->recv_evt = NULL;
131
132 hci_dev_unlock(hdev);
133
134 if (!skb)
135 return ERR_PTR(-ENODATA);
136
137 if (skb->len < sizeof(*hdr)) {
138 BT_ERR("Too short HCI event");
139 goto failed;
140 }
141
142 hdr = (void *) skb->data;
143 skb_pull(skb, HCI_EVENT_HDR_SIZE);
144
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300145 if (event) {
146 if (hdr->evt != event)
147 goto failed;
148 return skb;
149 }
150
Johan Hedberg75e84b72013-04-02 13:35:04 +0300151 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
152 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
153 goto failed;
154 }
155
156 if (skb->len < sizeof(*ev)) {
157 BT_ERR("Too short cmd_complete event");
158 goto failed;
159 }
160
161 ev = (void *) skb->data;
162 skb_pull(skb, sizeof(*ev));
163
164 if (opcode == __le16_to_cpu(ev->opcode))
165 return skb;
166
167 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
168 __le16_to_cpu(ev->opcode));
169
170failed:
171 kfree_skb(skb);
172 return ERR_PTR(-ENODATA);
173}
174
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300175struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300176 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300177{
178 DECLARE_WAITQUEUE(wait, current);
179 struct hci_request req;
180 int err = 0;
181
182 BT_DBG("%s", hdev->name);
183
184 hci_req_init(&req, hdev);
185
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300186 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300187
188 hdev->req_status = HCI_REQ_PEND;
189
190 err = hci_req_run(&req, hci_req_sync_complete);
191 if (err < 0)
192 return ERR_PTR(err);
193
194 add_wait_queue(&hdev->req_wait_q, &wait);
195 set_current_state(TASK_INTERRUPTIBLE);
196
197 schedule_timeout(timeout);
198
199 remove_wait_queue(&hdev->req_wait_q, &wait);
200
201 if (signal_pending(current))
202 return ERR_PTR(-EINTR);
203
204 switch (hdev->req_status) {
205 case HCI_REQ_DONE:
206 err = -bt_to_errno(hdev->req_result);
207 break;
208
209 case HCI_REQ_CANCELED:
210 err = -hdev->req_result;
211 break;
212
213 default:
214 err = -ETIMEDOUT;
215 break;
216 }
217
218 hdev->req_status = hdev->req_result = 0;
219
220 BT_DBG("%s end: err %d", hdev->name, err);
221
222 if (err < 0)
223 return ERR_PTR(err);
224
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300225 return hci_get_cmd_complete(hdev, opcode, event);
226}
227EXPORT_SYMBOL(__hci_cmd_sync_ev);
228
229struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300230 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300231{
232 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300233}
234EXPORT_SYMBOL(__hci_cmd_sync);
235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200237static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200238 void (*func)(struct hci_request *req,
239 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200240 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200242 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 DECLARE_WAITQUEUE(wait, current);
244 int err = 0;
245
246 BT_DBG("%s start", hdev->name);
247
Johan Hedberg42c6b122013-03-05 20:37:49 +0200248 hci_req_init(&req, hdev);
249
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 hdev->req_status = HCI_REQ_PEND;
251
Johan Hedberg42c6b122013-03-05 20:37:49 +0200252 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200253
Johan Hedberg42c6b122013-03-05 20:37:49 +0200254 err = hci_req_run(&req, hci_req_sync_complete);
255 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200256 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300257
258 /* ENODATA means the HCI request command queue is empty.
259 * This can happen when a request with conditionals doesn't
260 * trigger any commands to be sent. This is normal behavior
261 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 */
Andre Guedes920c8302013-03-08 11:20:15 -0300263 if (err == -ENODATA)
264 return 0;
265
266 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200267 }
268
Andre Guedesbc4445c2013-03-08 11:20:13 -0300269 add_wait_queue(&hdev->req_wait_q, &wait);
270 set_current_state(TASK_INTERRUPTIBLE);
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 schedule_timeout(timeout);
273
274 remove_wait_queue(&hdev->req_wait_q, &wait);
275
276 if (signal_pending(current))
277 return -EINTR;
278
279 switch (hdev->req_status) {
280 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700281 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 break;
283
284 case HCI_REQ_CANCELED:
285 err = -hdev->req_result;
286 break;
287
288 default:
289 err = -ETIMEDOUT;
290 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Johan Hedberga5040ef2011-01-10 13:28:59 +0200293 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 BT_DBG("%s end: err %d", hdev->name, err);
296
297 return err;
298}
299
Johan Hedberg01178cd2013-03-05 20:37:41 +0200300static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200301 void (*req)(struct hci_request *req,
302 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200303 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
305 int ret;
306
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200307 if (!test_bit(HCI_UP, &hdev->flags))
308 return -ENETDOWN;
309
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 /* Serialize all requests */
311 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200312 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313 hci_req_unlock(hdev);
314
315 return ret;
316}
317
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321
322 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323 set_bit(HCI_RESET, &req->hdev->flags);
324 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325}
326
Johan Hedberg42c6b122013-03-05 20:37:49 +0200327static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200330
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200334 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200336
337 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339}
340
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200342{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200343 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200344
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200345 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200346 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300347
Marcel Holtmannf6996cf2013-10-07 02:31:39 -0700348 /* Read Local Supported Commands */
349 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
350
351 /* Read Local Supported Features */
352 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
353
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300354 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200355 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300356
357 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200358 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700359
Marcel Holtmannf38ba942013-10-07 03:55:53 -0700360 /* Read Flow Control Mode */
361 hci_req_add(req, HCI_OP_READ_FLOW_CONTROL_MODE, 0, NULL);
362
Marcel Holtmann7528ca12013-10-07 03:55:52 -0700363 /* Read Location Data */
364 hci_req_add(req, HCI_OP_READ_LOCATION_DATA, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200365}
366
Johan Hedberg42c6b122013-03-05 20:37:49 +0200367static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200368{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200370
371 BT_DBG("%s %ld", hdev->name, opt);
372
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300373 /* Reset */
374 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200375 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300376
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200377 switch (hdev->dev_type) {
378 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200379 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200380 break;
381
382 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200384 break;
385
386 default:
387 BT_ERR("Unknown device type %d", hdev->dev_type);
388 break;
389 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200390}
391
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200393{
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700394 struct hci_dev *hdev = req->hdev;
395
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396 __le16 param;
397 __u8 flt_type;
398
399 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401
402 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200404
405 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200407
408 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200410
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -0700411 /* Read Number of Supported IAC */
412 hci_req_add(req, HCI_OP_READ_NUM_SUPPORTED_IAC, 0, NULL);
413
Marcel Holtmann4b836f32013-10-14 14:06:36 -0700414 /* Read Current IAC LAP */
415 hci_req_add(req, HCI_OP_READ_CURRENT_IAC_LAP, 0, NULL);
416
Johan Hedberg2177bab2013-03-05 20:37:43 +0200417 /* Clear Event Filters */
418 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200420
421 /* Connection accept timeout ~20 secs */
422 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200423 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200424
Marcel Holtmann4ca048e2013-10-11 16:42:07 -0700425 /* AVM Berlin (31), aka "BlueFRITZ!", reports version 1.2,
426 * but it does not support page scan related HCI commands.
427 */
428 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1) {
Johan Hedbergf332ec62013-03-15 17:07:11 -0500429 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
430 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
431 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432}
433
Johan Hedberg42c6b122013-03-05 20:37:49 +0200434static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200435{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300436 struct hci_dev *hdev = req->hdev;
437
Johan Hedberg2177bab2013-03-05 20:37:43 +0200438 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440
441 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443
444 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200445 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200446
447 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449
450 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200451 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300452
453 /* LE-only controllers have LE implicitly enabled */
454 if (!lmp_bredr_capable(hdev))
455 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200456}
457
458static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
459{
460 if (lmp_ext_inq_capable(hdev))
461 return 0x02;
462
463 if (lmp_inq_rssi_capable(hdev))
464 return 0x01;
465
466 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
467 hdev->lmp_subver == 0x0757)
468 return 0x01;
469
470 if (hdev->manufacturer == 15) {
471 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
472 return 0x01;
473 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
474 return 0x01;
475 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
476 return 0x01;
477 }
478
479 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
480 hdev->lmp_subver == 0x1805)
481 return 0x01;
482
483 return 0x00;
484}
485
Johan Hedberg42c6b122013-03-05 20:37:49 +0200486static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200487{
488 u8 mode;
489
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493}
494
Johan Hedberg42c6b122013-03-05 20:37:49 +0200495static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200496{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200497 struct hci_dev *hdev = req->hdev;
498
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499 /* The second byte is 0xff instead of 0x9f (two reserved bits
500 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
501 * command otherwise.
502 */
503 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
504
505 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
506 * any event mask for pre 1.2 devices.
507 */
508 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
509 return;
510
511 if (lmp_bredr_capable(hdev)) {
512 events[4] |= 0x01; /* Flow Specification Complete */
513 events[4] |= 0x02; /* Inquiry Result with RSSI */
514 events[4] |= 0x04; /* Read Remote Extended Features Complete */
515 events[5] |= 0x08; /* Synchronous Connection Complete */
516 events[5] |= 0x10; /* Synchronous Connection Changed */
Marcel Holtmannc7882cb2013-08-13 10:00:54 -0700517 } else {
518 /* Use a different default for LE-only devices */
519 memset(events, 0, sizeof(events));
520 events[0] |= 0x10; /* Disconnection Complete */
521 events[0] |= 0x80; /* Encryption Change */
522 events[1] |= 0x08; /* Read Remote Version Information Complete */
523 events[1] |= 0x20; /* Command Complete */
524 events[1] |= 0x40; /* Command Status */
525 events[1] |= 0x80; /* Hardware Error */
526 events[2] |= 0x04; /* Number of Completed Packets */
527 events[3] |= 0x02; /* Data Buffer Overflow */
528 events[5] |= 0x80; /* Encryption Key Refresh Complete */
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 }
530
531 if (lmp_inq_rssi_capable(hdev))
532 events[4] |= 0x02; /* Inquiry Result with RSSI */
533
534 if (lmp_sniffsubr_capable(hdev))
535 events[5] |= 0x20; /* Sniff Subrating */
536
537 if (lmp_pause_enc_capable(hdev))
538 events[5] |= 0x80; /* Encryption Key Refresh Complete */
539
540 if (lmp_ext_inq_capable(hdev))
541 events[5] |= 0x40; /* Extended Inquiry Result */
542
543 if (lmp_no_flush_capable(hdev))
544 events[7] |= 0x01; /* Enhanced Flush Complete */
545
546 if (lmp_lsto_capable(hdev))
547 events[6] |= 0x80; /* Link Supervision Timeout Changed */
548
549 if (lmp_ssp_capable(hdev)) {
550 events[6] |= 0x01; /* IO Capability Request */
551 events[6] |= 0x02; /* IO Capability Response */
552 events[6] |= 0x04; /* User Confirmation Request */
553 events[6] |= 0x08; /* User Passkey Request */
554 events[6] |= 0x10; /* Remote OOB Data Request */
555 events[6] |= 0x20; /* Simple Pairing Complete */
556 events[7] |= 0x04; /* User Passkey Notification */
557 events[7] |= 0x08; /* Keypress Notification */
558 events[7] |= 0x10; /* Remote Host Supported
559 * Features Notification
560 */
561 }
562
563 if (lmp_le_capable(hdev))
564 events[7] |= 0x20; /* LE Meta-Event */
565
Johan Hedberg42c6b122013-03-05 20:37:49 +0200566 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200567
568 if (lmp_le_capable(hdev)) {
569 memset(events, 0, sizeof(events));
570 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200571 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
572 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573 }
574}
575
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578 struct hci_dev *hdev = req->hdev;
579
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581 bredr_setup(req);
Johan Hedberg56f87902013-10-02 13:43:13 +0300582 else
583 clear_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200584
585 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200586 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587
Johan Hedberg42c6b122013-03-05 20:37:49 +0200588 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200589
Johan Hedberg3f8e2d72013-07-24 02:32:46 +0300590 /* AVM Berlin (31), aka "BlueFRITZ!", doesn't support the read
591 * local supported commands HCI command.
592 */
593 if (hdev->manufacturer != 31 && hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595
596 if (lmp_ssp_capable(hdev)) {
597 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
598 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
600 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200601 } else {
602 struct hci_cp_write_eir cp;
603
604 memset(hdev->eir, 0, sizeof(hdev->eir));
605 memset(&cp, 0, sizeof(cp));
606
Johan Hedberg42c6b122013-03-05 20:37:49 +0200607 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200608 }
609 }
610
611 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200612 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613
614 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200615 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200616
617 if (lmp_ext_feat_capable(hdev)) {
618 struct hci_cp_read_local_ext_features cp;
619
620 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200621 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
622 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200623 }
624
625 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
626 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200627 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
628 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200629 }
630}
631
Johan Hedberg42c6b122013-03-05 20:37:49 +0200632static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200633{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200635 struct hci_cp_write_def_link_policy cp;
636 u16 link_policy = 0;
637
638 if (lmp_rswitch_capable(hdev))
639 link_policy |= HCI_LP_RSWITCH;
640 if (lmp_hold_capable(hdev))
641 link_policy |= HCI_LP_HOLD;
642 if (lmp_sniff_capable(hdev))
643 link_policy |= HCI_LP_SNIFF;
644 if (lmp_park_capable(hdev))
645 link_policy |= HCI_LP_PARK;
646
647 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200649}
650
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200652{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200653 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200654 struct hci_cp_write_le_host_supported cp;
655
Johan Hedbergc73eee92013-04-19 18:35:21 +0300656 /* LE-only devices do not support explicit enablement */
657 if (!lmp_bredr_capable(hdev))
658 return;
659
Johan Hedberg2177bab2013-03-05 20:37:43 +0200660 memset(&cp, 0, sizeof(cp));
661
662 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
663 cp.le = 0x01;
664 cp.simul = lmp_le_br_capable(hdev);
665 }
666
667 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
669 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200670}
671
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300672static void hci_set_event_mask_page_2(struct hci_request *req)
673{
674 struct hci_dev *hdev = req->hdev;
675 u8 events[8] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
676
677 /* If Connectionless Slave Broadcast master role is supported
678 * enable all necessary events for it.
679 */
680 if (hdev->features[2][0] & 0x01) {
681 events[1] |= 0x40; /* Triggered Clock Capture */
682 events[1] |= 0x80; /* Synchronization Train Complete */
683 events[2] |= 0x10; /* Slave Page Response Timeout */
684 events[2] |= 0x20; /* CSB Channel Map Change */
685 }
686
687 /* If Connectionless Slave Broadcast slave role is supported
688 * enable all necessary events for it.
689 */
690 if (hdev->features[2][0] & 0x02) {
691 events[2] |= 0x01; /* Synchronization Train Received */
692 events[2] |= 0x02; /* CSB Receive */
693 events[2] |= 0x04; /* CSB Timeout */
694 events[2] |= 0x08; /* Truncated Page Complete */
695 }
696
697 hci_req_add(req, HCI_OP_SET_EVENT_MASK_PAGE_2, sizeof(events), events);
698}
699
Johan Hedberg42c6b122013-03-05 20:37:49 +0200700static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200701{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200702 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300703 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200704
Gustavo Padovanb8f4e062013-06-13 12:34:31 +0100705 /* Some Broadcom based Bluetooth controllers do not support the
706 * Delete Stored Link Key command. They are clearly indicating its
707 * absence in the bit mask of supported commands.
708 *
709 * Check the supported commands and only if the the command is marked
710 * as supported send it. If not supported assume that the controller
711 * does not have actual support for stored link keys which makes this
712 * command redundant anyway.
Marcel Holtmann637b4ca2013-07-01 14:14:46 -0700713 */
Johan Hedberg59f45d52013-06-13 11:01:13 +0300714 if (hdev->commands[6] & 0x80) {
715 struct hci_cp_delete_stored_link_key cp;
716
717 bacpy(&cp.bdaddr, BDADDR_ANY);
718 cp.delete_all = 0x01;
719 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY,
720 sizeof(cp), &cp);
721 }
722
Johan Hedberg2177bab2013-03-05 20:37:43 +0200723 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200724 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200725
Marcel Holtmann441ad2d2013-10-15 06:33:52 -0700726 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200727 hci_set_le_support(req);
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300728
729 /* Read features beyond page 1 if available */
730 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
731 struct hci_cp_read_local_ext_features cp;
732
733 cp.page = p;
734 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
735 sizeof(cp), &cp);
736 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200737}
738
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300739static void hci_init4_req(struct hci_request *req, unsigned long opt)
740{
741 struct hci_dev *hdev = req->hdev;
742
Johan Hedbergd62e6d62013-09-13 11:40:02 +0300743 /* Set event mask page 2 if the HCI command for it is supported */
744 if (hdev->commands[22] & 0x04)
745 hci_set_event_mask_page_2(req);
746
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300747 /* Check for Synchronization Train support */
748 if (hdev->features[2][0] & 0x04)
749 hci_req_add(req, HCI_OP_READ_SYNC_TRAIN_PARAMS, 0, NULL);
750}
751
Johan Hedberg2177bab2013-03-05 20:37:43 +0200752static int __hci_init(struct hci_dev *hdev)
753{
754 int err;
755
756 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
757 if (err < 0)
758 return err;
759
760 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
761 * BR/EDR/LE type controllers. AMP controllers only need the
762 * first stage init.
763 */
764 if (hdev->dev_type != HCI_BREDR)
765 return 0;
766
767 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
768 if (err < 0)
769 return err;
770
Johan Hedberg5d4e7e82013-09-13 11:40:01 +0300771 err = __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
772 if (err < 0)
773 return err;
774
Marcel Holtmannbaf27f62013-10-16 03:28:55 -0700775 err = __hci_req_sync(hdev, hci_init4_req, 0, HCI_INIT_TIMEOUT);
776 if (err < 0)
777 return err;
778
779 /* Only create debugfs entries during the initial setup
780 * phase and not every time the controller gets powered on.
781 */
782 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
783 return 0;
784
785 if (lmp_bredr_capable(hdev)) {
786 debugfs_create_file("inquiry_cache", 0444, hdev->debugfs,
787 hdev, &inquiry_cache_fops);
788 }
789
790 return 0;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200791}
792
Johan Hedberg42c6b122013-03-05 20:37:49 +0200793static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794{
795 __u8 scan = opt;
796
Johan Hedberg42c6b122013-03-05 20:37:49 +0200797 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798
799 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200800 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801}
802
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 __u8 auth = opt;
806
Johan Hedberg42c6b122013-03-05 20:37:49 +0200807 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808
809 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200810 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}
812
Johan Hedberg42c6b122013-03-05 20:37:49 +0200813static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814{
815 __u8 encrypt = opt;
816
Johan Hedberg42c6b122013-03-05 20:37:49 +0200817 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200819 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200820 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821}
822
Johan Hedberg42c6b122013-03-05 20:37:49 +0200823static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200824{
825 __le16 policy = cpu_to_le16(opt);
826
Johan Hedberg42c6b122013-03-05 20:37:49 +0200827 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200828
829 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200830 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200831}
832
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900833/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 * Device is held on return. */
835struct hci_dev *hci_dev_get(int index)
836{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200837 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 BT_DBG("%d", index);
840
841 if (index < 0)
842 return NULL;
843
844 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200845 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 if (d->id == index) {
847 hdev = hci_dev_hold(d);
848 break;
849 }
850 }
851 read_unlock(&hci_dev_list_lock);
852 return hdev;
853}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854
855/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200856
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200857bool hci_discovery_active(struct hci_dev *hdev)
858{
859 struct discovery_state *discov = &hdev->discovery;
860
Andre Guedes6fbe1952012-02-03 17:47:58 -0300861 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300862 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300863 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200864 return true;
865
Andre Guedes6fbe1952012-02-03 17:47:58 -0300866 default:
867 return false;
868 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200869}
870
Johan Hedbergff9ef572012-01-04 14:23:45 +0200871void hci_discovery_set_state(struct hci_dev *hdev, int state)
872{
873 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
874
875 if (hdev->discovery.state == state)
876 return;
877
878 switch (state) {
879 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300880 if (hdev->discovery.state != DISCOVERY_STARTING)
881 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200882 break;
883 case DISCOVERY_STARTING:
884 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300885 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200886 mgmt_discovering(hdev, 1);
887 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200888 case DISCOVERY_RESOLVING:
889 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200890 case DISCOVERY_STOPPING:
891 break;
892 }
893
894 hdev->discovery.state = state;
895}
896
Andre Guedes1f9b9a52013-04-30 15:29:27 -0300897void hci_inquiry_cache_flush(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898{
Johan Hedberg30883512012-01-04 14:16:21 +0200899 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200900 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901
Johan Hedberg561aafb2012-01-04 13:31:59 +0200902 list_for_each_entry_safe(p, n, &cache->all, all) {
903 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200904 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200906
907 INIT_LIST_HEAD(&cache->unknown);
908 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909}
910
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300911struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
912 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913{
Johan Hedberg30883512012-01-04 14:16:21 +0200914 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 struct inquiry_entry *e;
916
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300917 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918
Johan Hedberg561aafb2012-01-04 13:31:59 +0200919 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200921 return e;
922 }
923
924 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925}
926
Johan Hedberg561aafb2012-01-04 13:31:59 +0200927struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300928 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200929{
Johan Hedberg30883512012-01-04 14:16:21 +0200930 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200931 struct inquiry_entry *e;
932
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300933 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200934
935 list_for_each_entry(e, &cache->unknown, list) {
936 if (!bacmp(&e->data.bdaddr, bdaddr))
937 return e;
938 }
939
940 return NULL;
941}
942
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200943struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300944 bdaddr_t *bdaddr,
945 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200946{
947 struct discovery_state *cache = &hdev->discovery;
948 struct inquiry_entry *e;
949
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300950 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200951
952 list_for_each_entry(e, &cache->resolve, list) {
953 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
954 return e;
955 if (!bacmp(&e->data.bdaddr, bdaddr))
956 return e;
957 }
958
959 return NULL;
960}
961
Johan Hedberga3d4e202012-01-09 00:53:02 +0200962void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300963 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200964{
965 struct discovery_state *cache = &hdev->discovery;
966 struct list_head *pos = &cache->resolve;
967 struct inquiry_entry *p;
968
969 list_del(&ie->list);
970
971 list_for_each_entry(p, &cache->resolve, list) {
972 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300973 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200974 break;
975 pos = &p->list;
976 }
977
978 list_add(&ie->list, pos);
979}
980
Johan Hedberg31754052012-01-04 13:39:52 +0200981bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300982 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983{
Johan Hedberg30883512012-01-04 14:16:21 +0200984 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200985 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300987 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988
Szymon Janc2b2fec42012-11-20 11:38:54 +0100989 hci_remove_remote_oob_data(hdev, &data->bdaddr);
990
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200991 if (ssp)
992 *ssp = data->ssp_mode;
993
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200994 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200995 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200996 if (ie->data.ssp_mode && ssp)
997 *ssp = true;
998
Johan Hedberga3d4e202012-01-09 00:53:02 +0200999 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001000 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +02001001 ie->data.rssi = data->rssi;
1002 hci_inquiry_cache_update_resolve(hdev, ie);
1003 }
1004
Johan Hedberg561aafb2012-01-04 13:31:59 +02001005 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +02001006 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001007
Johan Hedberg561aafb2012-01-04 13:31:59 +02001008 /* Entry not in the cache. Add new one. */
1009 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
1010 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +02001011 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +02001012
1013 list_add(&ie->all, &cache->all);
1014
1015 if (name_known) {
1016 ie->name_state = NAME_KNOWN;
1017 } else {
1018 ie->name_state = NAME_NOT_KNOWN;
1019 list_add(&ie->list, &cache->unknown);
1020 }
1021
1022update:
1023 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001024 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +02001025 ie->name_state = NAME_KNOWN;
1026 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 }
1028
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001029 memcpy(&ie->data, data, sizeof(*data));
1030 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +02001032
1033 if (ie->name_state == NAME_NOT_KNOWN)
1034 return false;
1035
1036 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037}
1038
1039static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
1040{
Johan Hedberg30883512012-01-04 14:16:21 +02001041 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 struct inquiry_info *info = (struct inquiry_info *) buf;
1043 struct inquiry_entry *e;
1044 int copied = 0;
1045
Johan Hedberg561aafb2012-01-04 13:31:59 +02001046 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001048
1049 if (copied >= num)
1050 break;
1051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 bacpy(&info->bdaddr, &data->bdaddr);
1053 info->pscan_rep_mode = data->pscan_rep_mode;
1054 info->pscan_period_mode = data->pscan_period_mode;
1055 info->pscan_mode = data->pscan_mode;
1056 memcpy(info->dev_class, data->dev_class, 3);
1057 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001058
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +02001060 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061 }
1062
1063 BT_DBG("cache %p, copied %d", cache, copied);
1064 return copied;
1065}
1066
Johan Hedberg42c6b122013-03-05 20:37:49 +02001067static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068{
1069 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001070 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071 struct hci_cp_inquiry cp;
1072
1073 BT_DBG("%s", hdev->name);
1074
1075 if (test_bit(HCI_INQUIRY, &hdev->flags))
1076 return;
1077
1078 /* Start Inquiry */
1079 memcpy(&cp.lap, &ir->lap, 3);
1080 cp.length = ir->length;
1081 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +02001082 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001083}
1084
Andre Guedes3e13fa12013-03-27 20:04:56 -03001085static int wait_inquiry(void *word)
1086{
1087 schedule();
1088 return signal_pending(current);
1089}
1090
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091int hci_inquiry(void __user *arg)
1092{
1093 __u8 __user *ptr = arg;
1094 struct hci_inquiry_req ir;
1095 struct hci_dev *hdev;
1096 int err = 0, do_inquiry = 0, max_rsp;
1097 long timeo;
1098 __u8 *buf;
1099
1100 if (copy_from_user(&ir, ptr, sizeof(ir)))
1101 return -EFAULT;
1102
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001103 hdev = hci_dev_get(ir.dev_id);
1104 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 return -ENODEV;
1106
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001107 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1108 err = -EBUSY;
1109 goto done;
1110 }
1111
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001112 if (hdev->dev_type != HCI_BREDR) {
1113 err = -EOPNOTSUPP;
1114 goto done;
1115 }
1116
Johan Hedberg56f87902013-10-02 13:43:13 +03001117 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1118 err = -EOPNOTSUPP;
1119 goto done;
1120 }
1121
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001122 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001123 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001124 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001125 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126 do_inquiry = 1;
1127 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001128 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129
Marcel Holtmann04837f62006-07-03 10:02:33 +02001130 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001131
1132 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +02001133 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
1134 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001135 if (err < 0)
1136 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -03001137
1138 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
1139 * cleared). If it is interrupted by a signal, return -EINTR.
1140 */
1141 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
1142 TASK_INTERRUPTIBLE))
1143 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03001146 /* for unlimited number of responses we will use buffer with
1147 * 255 entries
1148 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
1150
1151 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
1152 * copy it to the user space.
1153 */
Szymon Janc01df8c32011-02-17 16:46:47 +01001154 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001155 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 err = -ENOMEM;
1157 goto done;
1158 }
1159
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001160 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001162 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163
1164 BT_DBG("num_rsp %d", ir.num_rsp);
1165
1166 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1167 ptr += sizeof(ir);
1168 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001169 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001171 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172 err = -EFAULT;
1173
1174 kfree(buf);
1175
1176done:
1177 hci_dev_put(hdev);
1178 return err;
1179}
1180
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001181static int hci_dev_do_open(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182{
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 int ret = 0;
1184
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185 BT_DBG("%s %p", hdev->name, hdev);
1186
1187 hci_req_lock(hdev);
1188
Johan Hovold94324962012-03-15 14:48:41 +01001189 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1190 ret = -ENODEV;
1191 goto done;
1192 }
1193
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001194 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
1195 /* Check for rfkill but allow the HCI setup stage to
1196 * proceed (which in itself doesn't cause any RF activity).
1197 */
1198 if (test_bit(HCI_RFKILLED, &hdev->dev_flags)) {
1199 ret = -ERFKILL;
1200 goto done;
1201 }
1202
1203 /* Check for valid public address or a configured static
1204 * random adddress, but let the HCI setup proceed to
1205 * be able to determine if there is a public address
1206 * or not.
1207 *
1208 * This check is only valid for BR/EDR controllers
1209 * since AMP controllers do not have an address.
1210 */
1211 if (hdev->dev_type == HCI_BREDR &&
1212 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1213 !bacmp(&hdev->static_addr, BDADDR_ANY)) {
1214 ret = -EADDRNOTAVAIL;
1215 goto done;
1216 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001217 }
1218
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219 if (test_bit(HCI_UP, &hdev->flags)) {
1220 ret = -EALREADY;
1221 goto done;
1222 }
1223
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224 if (hdev->open(hdev)) {
1225 ret = -EIO;
1226 goto done;
1227 }
1228
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001229 atomic_set(&hdev->cmd_cnt, 1);
1230 set_bit(HCI_INIT, &hdev->flags);
1231
1232 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1233 ret = hdev->setup(hdev);
1234
1235 if (!ret) {
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001236 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1237 set_bit(HCI_RAW, &hdev->flags);
1238
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001239 if (!test_bit(HCI_RAW, &hdev->flags) &&
1240 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001241 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001242 }
1243
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001244 clear_bit(HCI_INIT, &hdev->flags);
1245
Linus Torvalds1da177e2005-04-16 15:20:36 -07001246 if (!ret) {
1247 hci_dev_hold(hdev);
1248 set_bit(HCI_UP, &hdev->flags);
1249 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001250 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001251 !test_bit(HCI_USER_CHANNEL, &hdev->dev_flags) &&
Marcel Holtmann1514b892013-10-06 08:25:01 -07001252 hdev->dev_type == HCI_BREDR) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001253 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001254 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001255 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001256 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001257 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001259 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001260 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001261 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 skb_queue_purge(&hdev->cmd_q);
1264 skb_queue_purge(&hdev->rx_q);
1265
1266 if (hdev->flush)
1267 hdev->flush(hdev);
1268
1269 if (hdev->sent_cmd) {
1270 kfree_skb(hdev->sent_cmd);
1271 hdev->sent_cmd = NULL;
1272 }
1273
1274 hdev->close(hdev);
1275 hdev->flags = 0;
1276 }
1277
1278done:
1279 hci_req_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 return ret;
1281}
1282
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001283/* ---- HCI ioctl helpers ---- */
1284
1285int hci_dev_open(__u16 dev)
1286{
1287 struct hci_dev *hdev;
1288 int err;
1289
1290 hdev = hci_dev_get(dev);
1291 if (!hdev)
1292 return -ENODEV;
1293
Johan Hedberge1d08f42013-10-01 22:44:50 +03001294 /* We need to ensure that no other power on/off work is pending
1295 * before proceeding to call hci_dev_do_open. This is
1296 * particularly important if the setup procedure has not yet
1297 * completed.
1298 */
1299 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1300 cancel_delayed_work(&hdev->power_off);
1301
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001302 /* After this call it is guaranteed that the setup procedure
1303 * has finished. This means that error conditions like RFKILL
1304 * or no valid public or static random address apply.
1305 */
Johan Hedberge1d08f42013-10-01 22:44:50 +03001306 flush_workqueue(hdev->req_workqueue);
1307
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001308 err = hci_dev_do_open(hdev);
1309
1310 hci_dev_put(hdev);
1311
1312 return err;
1313}
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315static int hci_dev_do_close(struct hci_dev *hdev)
1316{
1317 BT_DBG("%s %p", hdev->name, hdev);
1318
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001319 cancel_delayed_work(&hdev->power_off);
1320
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 hci_req_cancel(hdev, ENODEV);
1322 hci_req_lock(hdev);
1323
1324 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001325 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 hci_req_unlock(hdev);
1327 return 0;
1328 }
1329
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001330 /* Flush RX and TX works */
1331 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001332 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001334 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001335 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001336 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001337 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Marcel Holtmann310a3d42013-10-15 09:13:39 -07001338 clear_bit(HCI_LIMITED_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001339 }
1340
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001341 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001342 cancel_delayed_work(&hdev->service_cache);
1343
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001344 cancel_delayed_work_sync(&hdev->le_scan_disable);
1345
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001346 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001347 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001349 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350
1351 hci_notify(hdev, HCI_DEV_DOWN);
1352
1353 if (hdev->flush)
1354 hdev->flush(hdev);
1355
1356 /* Reset device */
1357 skb_queue_purge(&hdev->cmd_q);
1358 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001359 if (!test_bit(HCI_RAW, &hdev->flags) &&
Marcel Holtmann3a6afbd2013-10-11 09:44:12 -07001360 !test_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001361 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001362 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001363 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 clear_bit(HCI_INIT, &hdev->flags);
1365 }
1366
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001367 /* flush cmd work */
1368 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369
1370 /* Drop queues */
1371 skb_queue_purge(&hdev->rx_q);
1372 skb_queue_purge(&hdev->cmd_q);
1373 skb_queue_purge(&hdev->raw_q);
1374
1375 /* Drop last sent command */
1376 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001377 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 kfree_skb(hdev->sent_cmd);
1379 hdev->sent_cmd = NULL;
1380 }
1381
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001382 kfree_skb(hdev->recv_evt);
1383 hdev->recv_evt = NULL;
1384
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 /* After this point our queues are empty
1386 * and no tasks are scheduled. */
1387 hdev->close(hdev);
1388
Johan Hedberg35b973c2013-03-15 17:06:59 -05001389 /* Clear flags */
1390 hdev->flags = 0;
1391 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1392
Marcel Holtmann93c311a2013-10-07 00:58:33 -07001393 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
1394 if (hdev->dev_type == HCI_BREDR) {
1395 hci_dev_lock(hdev);
1396 mgmt_powered(hdev, 0);
1397 hci_dev_unlock(hdev);
1398 }
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001399 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001400
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001401 /* Controller radio is available but is currently powered down */
Marcel Holtmann536619e2013-10-05 11:47:45 -07001402 hdev->amp_status = AMP_STATUS_POWERED_DOWN;
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001403
Johan Hedberge59fda82012-02-22 18:11:53 +02001404 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001405 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001406
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 hci_req_unlock(hdev);
1408
1409 hci_dev_put(hdev);
1410 return 0;
1411}
1412
1413int hci_dev_close(__u16 dev)
1414{
1415 struct hci_dev *hdev;
1416 int err;
1417
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001418 hdev = hci_dev_get(dev);
1419 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001421
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001422 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1423 err = -EBUSY;
1424 goto done;
1425 }
1426
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001427 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1428 cancel_delayed_work(&hdev->power_off);
1429
Linus Torvalds1da177e2005-04-16 15:20:36 -07001430 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001431
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001432done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 hci_dev_put(hdev);
1434 return err;
1435}
1436
1437int hci_dev_reset(__u16 dev)
1438{
1439 struct hci_dev *hdev;
1440 int ret = 0;
1441
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001442 hdev = hci_dev_get(dev);
1443 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 return -ENODEV;
1445
1446 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447
Marcel Holtmann808a0492013-08-26 20:57:58 -07001448 if (!test_bit(HCI_UP, &hdev->flags)) {
1449 ret = -ENETDOWN;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450 goto done;
Marcel Holtmann808a0492013-08-26 20:57:58 -07001451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001452
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001453 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1454 ret = -EBUSY;
1455 goto done;
1456 }
1457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 /* Drop queues */
1459 skb_queue_purge(&hdev->rx_q);
1460 skb_queue_purge(&hdev->cmd_q);
1461
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001462 hci_dev_lock(hdev);
Andre Guedes1f9b9a52013-04-30 15:29:27 -03001463 hci_inquiry_cache_flush(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001465 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466
1467 if (hdev->flush)
1468 hdev->flush(hdev);
1469
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001470 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001471 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001472
1473 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001474 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477 hci_req_unlock(hdev);
1478 hci_dev_put(hdev);
1479 return ret;
1480}
1481
1482int hci_dev_reset_stat(__u16 dev)
1483{
1484 struct hci_dev *hdev;
1485 int ret = 0;
1486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001487 hdev = hci_dev_get(dev);
1488 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -ENODEV;
1490
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001491 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1492 ret = -EBUSY;
1493 goto done;
1494 }
1495
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1497
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001498done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 return ret;
1501}
1502
1503int hci_dev_cmd(unsigned int cmd, void __user *arg)
1504{
1505 struct hci_dev *hdev;
1506 struct hci_dev_req dr;
1507 int err = 0;
1508
1509 if (copy_from_user(&dr, arg, sizeof(dr)))
1510 return -EFAULT;
1511
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001512 hdev = hci_dev_get(dr.dev_id);
1513 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 return -ENODEV;
1515
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001516 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
1517 err = -EBUSY;
1518 goto done;
1519 }
1520
Marcel Holtmann5b69bef52013-10-10 10:02:08 -07001521 if (hdev->dev_type != HCI_BREDR) {
1522 err = -EOPNOTSUPP;
1523 goto done;
1524 }
1525
Johan Hedberg56f87902013-10-02 13:43:13 +03001526 if (!test_bit(HCI_BREDR_ENABLED, &hdev->dev_flags)) {
1527 err = -EOPNOTSUPP;
1528 goto done;
1529 }
1530
Linus Torvalds1da177e2005-04-16 15:20:36 -07001531 switch (cmd) {
1532 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001533 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1534 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 break;
1536
1537 case HCISETENCRYPT:
1538 if (!lmp_encrypt_capable(hdev)) {
1539 err = -EOPNOTSUPP;
1540 break;
1541 }
1542
1543 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1544 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001545 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1546 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001547 if (err)
1548 break;
1549 }
1550
Johan Hedberg01178cd2013-03-05 20:37:41 +02001551 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1552 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001553 break;
1554
1555 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001556 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1557 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 break;
1559
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001560 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001561 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1562 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001563 break;
1564
1565 case HCISETLINKMODE:
1566 hdev->link_mode = ((__u16) dr.dev_opt) &
1567 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1568 break;
1569
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570 case HCISETPTYPE:
1571 hdev->pkt_type = (__u16) dr.dev_opt;
1572 break;
1573
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001575 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1576 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 break;
1578
1579 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001580 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1581 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582 break;
1583
1584 default:
1585 err = -EINVAL;
1586 break;
1587 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001588
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001589done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590 hci_dev_put(hdev);
1591 return err;
1592}
1593
1594int hci_get_dev_list(void __user *arg)
1595{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001596 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001597 struct hci_dev_list_req *dl;
1598 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 int n = 0, size, err;
1600 __u16 dev_num;
1601
1602 if (get_user(dev_num, (__u16 __user *) arg))
1603 return -EFAULT;
1604
1605 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1606 return -EINVAL;
1607
1608 size = sizeof(*dl) + dev_num * sizeof(*dr);
1609
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001610 dl = kzalloc(size, GFP_KERNEL);
1611 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612 return -ENOMEM;
1613
1614 dr = dl->dev_req;
1615
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001616 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001617 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001618 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001619 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001620
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001621 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1622 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001623
Linus Torvalds1da177e2005-04-16 15:20:36 -07001624 (dr + n)->dev_id = hdev->id;
1625 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001626
Linus Torvalds1da177e2005-04-16 15:20:36 -07001627 if (++n >= dev_num)
1628 break;
1629 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001630 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001631
1632 dl->dev_num = n;
1633 size = sizeof(*dl) + n * sizeof(*dr);
1634
1635 err = copy_to_user(arg, dl, size);
1636 kfree(dl);
1637
1638 return err ? -EFAULT : 0;
1639}
1640
1641int hci_get_dev_info(void __user *arg)
1642{
1643 struct hci_dev *hdev;
1644 struct hci_dev_info di;
1645 int err = 0;
1646
1647 if (copy_from_user(&di, arg, sizeof(di)))
1648 return -EFAULT;
1649
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001650 hdev = hci_dev_get(di.dev_id);
1651 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 return -ENODEV;
1653
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001654 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001655 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001656
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001657 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1658 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660 strcpy(di.name, hdev->name);
1661 di.bdaddr = hdev->bdaddr;
Marcel Holtmann60f2a3e2013-10-01 22:59:20 -07001662 di.type = (hdev->bus & 0x0f) | ((hdev->dev_type & 0x03) << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 di.flags = hdev->flags;
1664 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001665 if (lmp_bredr_capable(hdev)) {
1666 di.acl_mtu = hdev->acl_mtu;
1667 di.acl_pkts = hdev->acl_pkts;
1668 di.sco_mtu = hdev->sco_mtu;
1669 di.sco_pkts = hdev->sco_pkts;
1670 } else {
1671 di.acl_mtu = hdev->le_mtu;
1672 di.acl_pkts = hdev->le_pkts;
1673 di.sco_mtu = 0;
1674 di.sco_pkts = 0;
1675 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001676 di.link_policy = hdev->link_policy;
1677 di.link_mode = hdev->link_mode;
1678
1679 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1680 memcpy(&di.features, &hdev->features, sizeof(di.features));
1681
1682 if (copy_to_user(arg, &di, sizeof(di)))
1683 err = -EFAULT;
1684
1685 hci_dev_put(hdev);
1686
1687 return err;
1688}
1689
1690/* ---- Interface to HCI drivers ---- */
1691
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001692static int hci_rfkill_set_block(void *data, bool blocked)
1693{
1694 struct hci_dev *hdev = data;
1695
1696 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1697
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07001698 if (test_bit(HCI_USER_CHANNEL, &hdev->dev_flags))
1699 return -EBUSY;
1700
Johan Hedberg5e130362013-09-13 08:58:17 +03001701 if (blocked) {
1702 set_bit(HCI_RFKILLED, &hdev->dev_flags);
Johan Hedbergbf543032013-09-13 08:58:18 +03001703 if (!test_bit(HCI_SETUP, &hdev->dev_flags))
1704 hci_dev_do_close(hdev);
Johan Hedberg5e130362013-09-13 08:58:17 +03001705 } else {
1706 clear_bit(HCI_RFKILLED, &hdev->dev_flags);
Gustavo Padovan1025c042013-09-27 11:56:14 -03001707 }
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001708
1709 return 0;
1710}
1711
1712static const struct rfkill_ops hci_rfkill_ops = {
1713 .set_block = hci_rfkill_set_block,
1714};
1715
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001716static void hci_power_on(struct work_struct *work)
1717{
1718 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001719 int err;
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001720
1721 BT_DBG("%s", hdev->name);
1722
Johan Hedbergcbed0ca2013-10-01 22:44:49 +03001723 err = hci_dev_do_open(hdev);
Johan Hedberg96570ff2013-05-29 09:51:29 +03001724 if (err < 0) {
1725 mgmt_set_powered_failed(hdev, err);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001726 return;
Johan Hedberg96570ff2013-05-29 09:51:29 +03001727 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001728
Marcel Holtmanna5c8f272013-10-06 01:08:57 -07001729 /* During the HCI setup phase, a few error conditions are
1730 * ignored and they need to be checked now. If they are still
1731 * valid, it is important to turn the device back off.
1732 */
1733 if (test_bit(HCI_RFKILLED, &hdev->dev_flags) ||
1734 (hdev->dev_type == HCI_BREDR &&
1735 !bacmp(&hdev->bdaddr, BDADDR_ANY) &&
1736 !bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedbergbf543032013-09-13 08:58:18 +03001737 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1738 hci_dev_do_close(hdev);
1739 } else if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
Johan Hedberg19202572013-01-14 22:33:51 +02001740 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1741 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergbf543032013-09-13 08:58:18 +03001742 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001743
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001744 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001745 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001746}
1747
1748static void hci_power_off(struct work_struct *work)
1749{
Johan Hedberg32435532011-11-07 22:16:04 +02001750 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001751 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001752
1753 BT_DBG("%s", hdev->name);
1754
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001755 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756}
1757
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001758static void hci_discov_off(struct work_struct *work)
1759{
1760 struct hci_dev *hdev;
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001761
1762 hdev = container_of(work, struct hci_dev, discov_off.work);
1763
1764 BT_DBG("%s", hdev->name);
1765
Marcel Holtmannd1967ff2013-10-15 10:57:40 -07001766 mgmt_discoverable_timeout(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001767}
1768
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001769int hci_uuids_clear(struct hci_dev *hdev)
1770{
Johan Hedberg48210022013-01-27 00:31:28 +02001771 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001772
Johan Hedberg48210022013-01-27 00:31:28 +02001773 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1774 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001775 kfree(uuid);
1776 }
1777
1778 return 0;
1779}
1780
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001781int hci_link_keys_clear(struct hci_dev *hdev)
1782{
1783 struct list_head *p, *n;
1784
1785 list_for_each_safe(p, n, &hdev->link_keys) {
1786 struct link_key *key;
1787
1788 key = list_entry(p, struct link_key, list);
1789
1790 list_del(p);
1791 kfree(key);
1792 }
1793
1794 return 0;
1795}
1796
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001797int hci_smp_ltks_clear(struct hci_dev *hdev)
1798{
1799 struct smp_ltk *k, *tmp;
1800
1801 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1802 list_del(&k->list);
1803 kfree(k);
1804 }
1805
1806 return 0;
1807}
1808
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001809struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1810{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001811 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001813 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001814 if (bacmp(bdaddr, &k->bdaddr) == 0)
1815 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001816
1817 return NULL;
1818}
1819
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301820static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001821 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001822{
1823 /* Legacy key */
1824 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301825 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001826
1827 /* Debug keys are insecure so don't store them persistently */
1828 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301829 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001830
1831 /* Changed combination key and there's no previous one */
1832 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301833 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001834
1835 /* Security mode 3 case */
1836 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301837 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001838
1839 /* Neither local nor remote side had no-bonding as requirement */
1840 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301841 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001842
1843 /* Local side had dedicated bonding as requirement */
1844 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301845 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001846
1847 /* Remote side had dedicated bonding as requirement */
1848 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301849 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001850
1851 /* If none of the above criteria match, then don't store the key
1852 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301853 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001854}
1855
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001856struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001857{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001858 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001859
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001860 list_for_each_entry(k, &hdev->long_term_keys, list) {
1861 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001862 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001863 continue;
1864
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001865 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001866 }
1867
1868 return NULL;
1869}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001870
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001871struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001872 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001873{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001874 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001875
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001876 list_for_each_entry(k, &hdev->long_term_keys, list)
1877 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001878 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001879 return k;
1880
1881 return NULL;
1882}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001883
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001884int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001885 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001886{
1887 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301888 u8 old_key_type;
1889 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001890
1891 old_key = hci_find_link_key(hdev, bdaddr);
1892 if (old_key) {
1893 old_key_type = old_key->type;
1894 key = old_key;
1895 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001896 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001897 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1898 if (!key)
1899 return -ENOMEM;
1900 list_add(&key->list, &hdev->link_keys);
1901 }
1902
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001903 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001904
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001905 /* Some buggy controller combinations generate a changed
1906 * combination key for legacy pairing even when there's no
1907 * previous key */
1908 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001909 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001910 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001911 if (conn)
1912 conn->key_type = type;
1913 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001914
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001915 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001916 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001917 key->pin_len = pin_len;
1918
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001919 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001920 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001921 else
1922 key->type = type;
1923
Johan Hedberg4df378a2011-04-28 11:29:03 -07001924 if (!new_key)
1925 return 0;
1926
1927 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1928
Johan Hedberg744cf192011-11-08 20:40:14 +02001929 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001930
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301931 if (conn)
1932 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001933
1934 return 0;
1935}
1936
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001937int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001938 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001939 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001940{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001941 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001942
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001943 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1944 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001945
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001946 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1947 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001948 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001949 else {
1950 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001951 if (!key)
1952 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001953 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001954 }
1955
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001956 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001957 key->bdaddr_type = addr_type;
1958 memcpy(key->val, tk, sizeof(key->val));
1959 key->authenticated = authenticated;
1960 key->ediv = ediv;
1961 key->enc_size = enc_size;
1962 key->type = type;
1963 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001964
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001965 if (!new_key)
1966 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001967
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001968 if (type & HCI_SMP_LTK)
1969 mgmt_new_ltk(hdev, key, 1);
1970
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001971 return 0;
1972}
1973
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001974int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1975{
1976 struct link_key *key;
1977
1978 key = hci_find_link_key(hdev, bdaddr);
1979 if (!key)
1980 return -ENOENT;
1981
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001982 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001983
1984 list_del(&key->list);
1985 kfree(key);
1986
1987 return 0;
1988}
1989
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001990int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1991{
1992 struct smp_ltk *k, *tmp;
1993
1994 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1995 if (bacmp(bdaddr, &k->bdaddr))
1996 continue;
1997
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001998 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001999
2000 list_del(&k->list);
2001 kfree(k);
2002 }
2003
2004 return 0;
2005}
2006
Ville Tervo6bd32322011-02-16 16:32:41 +02002007/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002008static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02002009{
2010 struct hci_dev *hdev = (void *) arg;
2011
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002012 if (hdev->sent_cmd) {
2013 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
2014 u16 opcode = __le16_to_cpu(sent->opcode);
2015
2016 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
2017 } else {
2018 BT_ERR("%s command tx timeout", hdev->name);
2019 }
2020
Ville Tervo6bd32322011-02-16 16:32:41 +02002021 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002022 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02002023}
2024
Szymon Janc2763eda2011-03-22 13:12:22 +01002025struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002026 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01002027{
2028 struct oob_data *data;
2029
2030 list_for_each_entry(data, &hdev->remote_oob_data, list)
2031 if (bacmp(bdaddr, &data->bdaddr) == 0)
2032 return data;
2033
2034 return NULL;
2035}
2036
2037int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
2038{
2039 struct oob_data *data;
2040
2041 data = hci_find_remote_oob_data(hdev, bdaddr);
2042 if (!data)
2043 return -ENOENT;
2044
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002045 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002046
2047 list_del(&data->list);
2048 kfree(data);
2049
2050 return 0;
2051}
2052
2053int hci_remote_oob_data_clear(struct hci_dev *hdev)
2054{
2055 struct oob_data *data, *n;
2056
2057 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
2058 list_del(&data->list);
2059 kfree(data);
2060 }
2061
2062 return 0;
2063}
2064
2065int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002066 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01002067{
2068 struct oob_data *data;
2069
2070 data = hci_find_remote_oob_data(hdev, bdaddr);
2071
2072 if (!data) {
2073 data = kmalloc(sizeof(*data), GFP_ATOMIC);
2074 if (!data)
2075 return -ENOMEM;
2076
2077 bacpy(&data->bdaddr, bdaddr);
2078 list_add(&data->list, &hdev->remote_oob_data);
2079 }
2080
2081 memcpy(data->hash, hash, sizeof(data->hash));
2082 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
2083
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002084 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01002085
2086 return 0;
2087}
2088
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002089struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002090{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002091 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002092
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002093 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002094 if (bacmp(bdaddr, &b->bdaddr) == 0)
2095 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002096
2097 return NULL;
2098}
2099
2100int hci_blacklist_clear(struct hci_dev *hdev)
2101{
2102 struct list_head *p, *n;
2103
2104 list_for_each_safe(p, n, &hdev->blacklist) {
2105 struct bdaddr_list *b;
2106
2107 b = list_entry(p, struct bdaddr_list, list);
2108
2109 list_del(p);
2110 kfree(b);
2111 }
2112
2113 return 0;
2114}
2115
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002116int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002117{
2118 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002119
2120 if (bacmp(bdaddr, BDADDR_ANY) == 0)
2121 return -EBADF;
2122
Antti Julku5e762442011-08-25 16:48:02 +03002123 if (hci_blacklist_lookup(hdev, bdaddr))
2124 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002125
2126 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03002127 if (!entry)
2128 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002129
2130 bacpy(&entry->bdaddr, bdaddr);
2131
2132 list_add(&entry->list, &hdev->blacklist);
2133
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002134 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002135}
2136
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002137int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03002138{
2139 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002140
Szymon Janc1ec918c2011-11-16 09:32:21 +01002141 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03002142 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002143
2144 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01002145 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03002146 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03002147
2148 list_del(&entry->list);
2149 kfree(entry);
2150
Johan Hedberg88c1fe42012-02-09 15:56:11 +02002151 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03002152}
2153
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002154static void inquiry_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002155{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002156 if (status) {
2157 BT_ERR("Failed to start inquiry: status %d", status);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002158
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002159 hci_dev_lock(hdev);
2160 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2161 hci_dev_unlock(hdev);
2162 return;
2163 }
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002164}
2165
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002166static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002167{
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002168 /* General inquiry access code (GIAC) */
2169 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2170 struct hci_request req;
2171 struct hci_cp_inquiry cp;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002172 int err;
2173
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002174 if (status) {
2175 BT_ERR("Failed to disable LE scanning: status %d", status);
2176 return;
Andre Guedes7dbfac12012-03-15 16:52:07 -03002177 }
2178
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002179 switch (hdev->discovery.type) {
2180 case DISCOV_TYPE_LE:
2181 hci_dev_lock(hdev);
2182 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2183 hci_dev_unlock(hdev);
2184 break;
2185
2186 case DISCOV_TYPE_INTERLEAVED:
2187 hci_req_init(&req, hdev);
2188
2189 memset(&cp, 0, sizeof(cp));
2190 memcpy(&cp.lap, lap, sizeof(cp.lap));
2191 cp.length = DISCOV_INTERLEAVED_INQUIRY_LEN;
2192 hci_req_add(&req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2193
2194 hci_dev_lock(hdev);
2195
2196 hci_inquiry_cache_flush(hdev);
2197
2198 err = hci_req_run(&req, inquiry_complete);
2199 if (err) {
2200 BT_ERR("Inquiry request failed: err %d", err);
2201 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2202 }
2203
2204 hci_dev_unlock(hdev);
2205 break;
2206 }
Andre Guedes7dbfac12012-03-15 16:52:07 -03002207}
2208
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002209static void le_scan_disable_work(struct work_struct *work)
2210{
2211 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002212 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002213 struct hci_cp_le_set_scan_enable cp;
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002214 struct hci_request req;
2215 int err;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002216
2217 BT_DBG("%s", hdev->name);
2218
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002219 hci_req_init(&req, hdev);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002220
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002221 memset(&cp, 0, sizeof(cp));
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002222 cp.enable = LE_SCAN_DISABLE;
2223 hci_req_add(&req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002224
Andre Guedes4c87eaa2013-04-30 15:29:32 -03002225 err = hci_req_run(&req, le_scan_disable_work_complete);
2226 if (err)
2227 BT_ERR("Disable LE scanning request failed: err %d", err);
Andre Guedes28b75a82012-02-03 17:48:00 -03002228}
2229
David Herrmann9be0dab2012-04-22 14:39:57 +02002230/* Alloc HCI device */
2231struct hci_dev *hci_alloc_dev(void)
2232{
2233 struct hci_dev *hdev;
2234
2235 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2236 if (!hdev)
2237 return NULL;
2238
David Herrmannb1b813d2012-04-22 14:39:58 +02002239 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2240 hdev->esco_type = (ESCO_HV1);
2241 hdev->link_mode = (HCI_LM_ACCEPT);
Marcel Holtmannb4cb9fb2013-10-14 13:56:16 -07002242 hdev->num_iac = 0x01; /* One IAC support is mandatory */
2243 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002244 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2245 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002246
David Herrmannb1b813d2012-04-22 14:39:58 +02002247 hdev->sniff_max_interval = 800;
2248 hdev->sniff_min_interval = 80;
2249
Marcel Holtmannbef64732013-10-11 08:23:19 -07002250 hdev->le_scan_interval = 0x0060;
2251 hdev->le_scan_window = 0x0030;
2252
David Herrmannb1b813d2012-04-22 14:39:58 +02002253 mutex_init(&hdev->lock);
2254 mutex_init(&hdev->req_lock);
2255
2256 INIT_LIST_HEAD(&hdev->mgmt_pending);
2257 INIT_LIST_HEAD(&hdev->blacklist);
2258 INIT_LIST_HEAD(&hdev->uuids);
2259 INIT_LIST_HEAD(&hdev->link_keys);
2260 INIT_LIST_HEAD(&hdev->long_term_keys);
2261 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002262 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002263
2264 INIT_WORK(&hdev->rx_work, hci_rx_work);
2265 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2266 INIT_WORK(&hdev->tx_work, hci_tx_work);
2267 INIT_WORK(&hdev->power_on, hci_power_on);
David Herrmannb1b813d2012-04-22 14:39:58 +02002268
David Herrmannb1b813d2012-04-22 14:39:58 +02002269 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2270 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2271 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2272
David Herrmannb1b813d2012-04-22 14:39:58 +02002273 skb_queue_head_init(&hdev->rx_q);
2274 skb_queue_head_init(&hdev->cmd_q);
2275 skb_queue_head_init(&hdev->raw_q);
2276
2277 init_waitqueue_head(&hdev->req_wait_q);
2278
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002279 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002280
David Herrmannb1b813d2012-04-22 14:39:58 +02002281 hci_init_sysfs(hdev);
2282 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002283
2284 return hdev;
2285}
2286EXPORT_SYMBOL(hci_alloc_dev);
2287
2288/* Free HCI device */
2289void hci_free_dev(struct hci_dev *hdev)
2290{
David Herrmann9be0dab2012-04-22 14:39:57 +02002291 /* will free via device release */
2292 put_device(&hdev->dev);
2293}
2294EXPORT_SYMBOL(hci_free_dev);
2295
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296/* Register HCI device */
2297int hci_register_dev(struct hci_dev *hdev)
2298{
David Herrmannb1b813d2012-04-22 14:39:58 +02002299 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
David Herrmann010666a2012-01-07 15:47:07 +01002301 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 return -EINVAL;
2303
Mat Martineau08add512011-11-02 16:18:36 -07002304 /* Do not allow HCI_AMP devices to register at index 0,
2305 * so the index can be used as the AMP controller ID.
2306 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002307 switch (hdev->dev_type) {
2308 case HCI_BREDR:
2309 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2310 break;
2311 case HCI_AMP:
2312 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2313 break;
2314 default:
2315 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002317
Sasha Levin3df92b32012-05-27 22:36:56 +02002318 if (id < 0)
2319 return id;
2320
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 sprintf(hdev->name, "hci%d", id);
2322 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002323
2324 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2325
Kees Cookd8537542013-07-03 15:04:57 -07002326 hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2327 WQ_MEM_RECLAIM, 1, hdev->name);
David Herrmann33ca9542011-10-08 14:58:49 +02002328 if (!hdev->workqueue) {
2329 error = -ENOMEM;
2330 goto err;
2331 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002332
Kees Cookd8537542013-07-03 15:04:57 -07002333 hdev->req_workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
2334 WQ_MEM_RECLAIM, 1, hdev->name);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002335 if (!hdev->req_workqueue) {
2336 destroy_workqueue(hdev->workqueue);
2337 error = -ENOMEM;
2338 goto err;
2339 }
2340
David Herrmann33ca9542011-10-08 14:58:49 +02002341 error = hci_add_sysfs(hdev);
2342 if (error < 0)
2343 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002345 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002346 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2347 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002348 if (hdev->rfkill) {
2349 if (rfkill_register(hdev->rfkill) < 0) {
2350 rfkill_destroy(hdev->rfkill);
2351 hdev->rfkill = NULL;
2352 }
2353 }
2354
Johan Hedberg5e130362013-09-13 08:58:17 +03002355 if (hdev->rfkill && rfkill_blocked(hdev->rfkill))
2356 set_bit(HCI_RFKILLED, &hdev->dev_flags);
2357
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002358 set_bit(HCI_SETUP, &hdev->dev_flags);
Marcel Holtmann004b0252013-10-07 00:58:32 -07002359 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002360
Marcel Holtmann01cd3402013-10-06 01:16:22 -07002361 if (hdev->dev_type == HCI_BREDR) {
Johan Hedberg56f87902013-10-02 13:43:13 +03002362 /* Assume BR/EDR support until proven otherwise (such as
2363 * through reading supported features during init.
2364 */
2365 set_bit(HCI_BREDR_ENABLED, &hdev->dev_flags);
2366 }
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002367
Gustavo Padovanfcee3372013-07-11 11:34:28 +01002368 write_lock(&hci_dev_list_lock);
2369 list_add(&hdev->list, &hci_dev_list);
2370 write_unlock(&hci_dev_list_lock);
2371
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002373 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374
Johan Hedberg19202572013-01-14 22:33:51 +02002375 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002376
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002378
David Herrmann33ca9542011-10-08 14:58:49 +02002379err_wqueue:
2380 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002381 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002382err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002383 ida_simple_remove(&hci_index_ida, hdev->id);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002384
David Herrmann33ca9542011-10-08 14:58:49 +02002385 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386}
2387EXPORT_SYMBOL(hci_register_dev);
2388
2389/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002390void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391{
Sasha Levin3df92b32012-05-27 22:36:56 +02002392 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002393
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002394 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395
Johan Hovold94324962012-03-15 14:48:41 +01002396 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2397
Sasha Levin3df92b32012-05-27 22:36:56 +02002398 id = hdev->id;
2399
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002400 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002402 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
2404 hci_dev_do_close(hdev);
2405
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302406 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002407 kfree_skb(hdev->reassembly[i]);
2408
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002409 cancel_work_sync(&hdev->power_on);
2410
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002411 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002412 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002413 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002414 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002415 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002416 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002417
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002418 /* mgmt_index_removed should take care of emptying the
2419 * pending list */
2420 BUG_ON(!list_empty(&hdev->mgmt_pending));
2421
Linus Torvalds1da177e2005-04-16 15:20:36 -07002422 hci_notify(hdev, HCI_DEV_UNREG);
2423
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002424 if (hdev->rfkill) {
2425 rfkill_unregister(hdev->rfkill);
2426 rfkill_destroy(hdev->rfkill);
2427 }
2428
David Herrmannce242972011-10-08 14:58:48 +02002429 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002430
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002431 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002432 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002433
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002434 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002435 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002436 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002437 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002438 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002439 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002440 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002441
David Herrmanndc946bd2012-01-07 15:47:24 +01002442 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002443
2444 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445}
2446EXPORT_SYMBOL(hci_unregister_dev);
2447
2448/* Suspend HCI device */
2449int hci_suspend_dev(struct hci_dev *hdev)
2450{
2451 hci_notify(hdev, HCI_DEV_SUSPEND);
2452 return 0;
2453}
2454EXPORT_SYMBOL(hci_suspend_dev);
2455
2456/* Resume HCI device */
2457int hci_resume_dev(struct hci_dev *hdev)
2458{
2459 hci_notify(hdev, HCI_DEV_RESUME);
2460 return 0;
2461}
2462EXPORT_SYMBOL(hci_resume_dev);
2463
Marcel Holtmann76bca882009-11-18 00:40:39 +01002464/* Receive frame from HCI drivers */
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002465int hci_recv_frame(struct hci_dev *hdev, struct sk_buff *skb)
Marcel Holtmann76bca882009-11-18 00:40:39 +01002466{
Marcel Holtmann76bca882009-11-18 00:40:39 +01002467 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002468 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002469 kfree_skb(skb);
2470 return -ENXIO;
2471 }
2472
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002473 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002474 bt_cb(skb)->incoming = 1;
2475
2476 /* Time stamp */
2477 __net_timestamp(skb);
2478
Marcel Holtmann76bca882009-11-18 00:40:39 +01002479 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002480 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002481
Marcel Holtmann76bca882009-11-18 00:40:39 +01002482 return 0;
2483}
2484EXPORT_SYMBOL(hci_recv_frame);
2485
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302486static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002487 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302488{
2489 int len = 0;
2490 int hlen = 0;
2491 int remain = count;
2492 struct sk_buff *skb;
2493 struct bt_skb_cb *scb;
2494
2495 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002496 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302497 return -EILSEQ;
2498
2499 skb = hdev->reassembly[index];
2500
2501 if (!skb) {
2502 switch (type) {
2503 case HCI_ACLDATA_PKT:
2504 len = HCI_MAX_FRAME_SIZE;
2505 hlen = HCI_ACL_HDR_SIZE;
2506 break;
2507 case HCI_EVENT_PKT:
2508 len = HCI_MAX_EVENT_SIZE;
2509 hlen = HCI_EVENT_HDR_SIZE;
2510 break;
2511 case HCI_SCODATA_PKT:
2512 len = HCI_MAX_SCO_SIZE;
2513 hlen = HCI_SCO_HDR_SIZE;
2514 break;
2515 }
2516
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002517 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302518 if (!skb)
2519 return -ENOMEM;
2520
2521 scb = (void *) skb->cb;
2522 scb->expect = hlen;
2523 scb->pkt_type = type;
2524
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302525 hdev->reassembly[index] = skb;
2526 }
2527
2528 while (count) {
2529 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002530 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302531
2532 memcpy(skb_put(skb, len), data, len);
2533
2534 count -= len;
2535 data += len;
2536 scb->expect -= len;
2537 remain = count;
2538
2539 switch (type) {
2540 case HCI_EVENT_PKT:
2541 if (skb->len == HCI_EVENT_HDR_SIZE) {
2542 struct hci_event_hdr *h = hci_event_hdr(skb);
2543 scb->expect = h->plen;
2544
2545 if (skb_tailroom(skb) < scb->expect) {
2546 kfree_skb(skb);
2547 hdev->reassembly[index] = NULL;
2548 return -ENOMEM;
2549 }
2550 }
2551 break;
2552
2553 case HCI_ACLDATA_PKT:
2554 if (skb->len == HCI_ACL_HDR_SIZE) {
2555 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2556 scb->expect = __le16_to_cpu(h->dlen);
2557
2558 if (skb_tailroom(skb) < scb->expect) {
2559 kfree_skb(skb);
2560 hdev->reassembly[index] = NULL;
2561 return -ENOMEM;
2562 }
2563 }
2564 break;
2565
2566 case HCI_SCODATA_PKT:
2567 if (skb->len == HCI_SCO_HDR_SIZE) {
2568 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2569 scb->expect = h->dlen;
2570
2571 if (skb_tailroom(skb) < scb->expect) {
2572 kfree_skb(skb);
2573 hdev->reassembly[index] = NULL;
2574 return -ENOMEM;
2575 }
2576 }
2577 break;
2578 }
2579
2580 if (scb->expect == 0) {
2581 /* Complete frame */
2582
2583 bt_cb(skb)->pkt_type = type;
Marcel Holtmanne1a26172013-10-10 16:52:43 -07002584 hci_recv_frame(hdev, skb);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302585
2586 hdev->reassembly[index] = NULL;
2587 return remain;
2588 }
2589 }
2590
2591 return remain;
2592}
2593
Marcel Holtmannef222012007-07-11 06:42:04 +02002594int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2595{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302596 int rem = 0;
2597
Marcel Holtmannef222012007-07-11 06:42:04 +02002598 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2599 return -EILSEQ;
2600
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002601 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002602 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302603 if (rem < 0)
2604 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002605
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302606 data += (count - rem);
2607 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002608 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002609
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302610 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002611}
2612EXPORT_SYMBOL(hci_recv_fragment);
2613
Suraj Sumangala99811512010-07-14 13:02:19 +05302614#define STREAM_REASSEMBLY 0
2615
2616int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2617{
2618 int type;
2619 int rem = 0;
2620
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002621 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302622 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2623
2624 if (!skb) {
2625 struct { char type; } *pkt;
2626
2627 /* Start of the frame */
2628 pkt = data;
2629 type = pkt->type;
2630
2631 data++;
2632 count--;
2633 } else
2634 type = bt_cb(skb)->pkt_type;
2635
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002636 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002637 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302638 if (rem < 0)
2639 return rem;
2640
2641 data += (count - rem);
2642 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002643 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302644
2645 return rem;
2646}
2647EXPORT_SYMBOL(hci_recv_stream_fragment);
2648
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649/* ---- Interface to upper protocols ---- */
2650
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651int hci_register_cb(struct hci_cb *cb)
2652{
2653 BT_DBG("%p name %s", cb, cb->name);
2654
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002655 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002657 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658
2659 return 0;
2660}
2661EXPORT_SYMBOL(hci_register_cb);
2662
2663int hci_unregister_cb(struct hci_cb *cb)
2664{
2665 BT_DBG("%p name %s", cb, cb->name);
2666
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002667 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002668 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002669 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
2671 return 0;
2672}
2673EXPORT_SYMBOL(hci_unregister_cb);
2674
Marcel Holtmann51086992013-10-10 14:54:19 -07002675static void hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676{
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002677 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002679 /* Time stamp */
2680 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002682 /* Send copy to monitor */
2683 hci_send_to_monitor(hdev, skb);
2684
2685 if (atomic_read(&hdev->promisc)) {
2686 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002687 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 }
2689
2690 /* Get rid of skb owner, prior to sending to the driver. */
2691 skb_orphan(skb);
2692
Marcel Holtmann7bd8f092013-10-11 06:19:18 -07002693 if (hdev->send(hdev, skb) < 0)
Marcel Holtmann51086992013-10-10 14:54:19 -07002694 BT_ERR("%s sending frame failed", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695}
2696
Johan Hedberg3119ae92013-03-05 20:37:44 +02002697void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2698{
2699 skb_queue_head_init(&req->cmd_q);
2700 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002701 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002702}
2703
2704int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2705{
2706 struct hci_dev *hdev = req->hdev;
2707 struct sk_buff *skb;
2708 unsigned long flags;
2709
2710 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2711
Andre Guedes5d73e032013-03-08 11:20:16 -03002712 /* If an error occured during request building, remove all HCI
2713 * commands queued on the HCI request queue.
2714 */
2715 if (req->err) {
2716 skb_queue_purge(&req->cmd_q);
2717 return req->err;
2718 }
2719
Johan Hedberg3119ae92013-03-05 20:37:44 +02002720 /* Do not allow empty requests */
2721 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002722 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002723
2724 skb = skb_peek_tail(&req->cmd_q);
2725 bt_cb(skb)->req.complete = complete;
2726
2727 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2728 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2729 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2730
2731 queue_work(hdev->workqueue, &hdev->cmd_work);
2732
2733 return 0;
2734}
2735
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002736static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002737 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738{
2739 int len = HCI_COMMAND_HDR_SIZE + plen;
2740 struct hci_command_hdr *hdr;
2741 struct sk_buff *skb;
2742
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002744 if (!skb)
2745 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002748 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 hdr->plen = plen;
2750
2751 if (plen)
2752 memcpy(skb_put(skb, plen), param, plen);
2753
2754 BT_DBG("skb len %d", skb->len);
2755
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002756 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002757
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002758 return skb;
2759}
2760
2761/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002762int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2763 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002764{
2765 struct sk_buff *skb;
2766
2767 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2768
2769 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2770 if (!skb) {
2771 BT_ERR("%s no memory for command", hdev->name);
2772 return -ENOMEM;
2773 }
2774
Johan Hedberg11714b32013-03-05 20:37:47 +02002775 /* Stand-alone HCI commands must be flaged as
2776 * single-command requests.
2777 */
2778 bt_cb(skb)->req.start = true;
2779
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002781 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 return 0;
2784}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785
Johan Hedberg71c76a12013-03-05 20:37:46 +02002786/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002787void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2788 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002789{
2790 struct hci_dev *hdev = req->hdev;
2791 struct sk_buff *skb;
2792
2793 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2794
Andre Guedes34739c12013-03-08 11:20:18 -03002795 /* If an error occured during request building, there is no point in
2796 * queueing the HCI command. We can simply return.
2797 */
2798 if (req->err)
2799 return;
2800
Johan Hedberg71c76a12013-03-05 20:37:46 +02002801 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2802 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002803 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2804 hdev->name, opcode);
2805 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002806 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002807 }
2808
2809 if (skb_queue_empty(&req->cmd_q))
2810 bt_cb(skb)->req.start = true;
2811
Johan Hedberg02350a72013-04-03 21:50:29 +03002812 bt_cb(skb)->req.event = event;
2813
Johan Hedberg71c76a12013-03-05 20:37:46 +02002814 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002815}
2816
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002817void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2818 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002819{
2820 hci_req_add_ev(req, opcode, plen, param, 0);
2821}
2822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002824void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
2826 struct hci_command_hdr *hdr;
2827
2828 if (!hdev->sent_cmd)
2829 return NULL;
2830
2831 hdr = (void *) hdev->sent_cmd->data;
2832
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002833 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 return NULL;
2835
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002836 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
2838 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2839}
2840
2841/* Send ACL data */
2842static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2843{
2844 struct hci_acl_hdr *hdr;
2845 int len = skb->len;
2846
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002847 skb_push(skb, HCI_ACL_HDR_SIZE);
2848 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002849 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002850 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2851 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852}
2853
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002854static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002855 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002857 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 struct hci_dev *hdev = conn->hdev;
2859 struct sk_buff *list;
2860
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002861 skb->len = skb_headlen(skb);
2862 skb->data_len = 0;
2863
2864 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002865
2866 switch (hdev->dev_type) {
2867 case HCI_BREDR:
2868 hci_add_acl_hdr(skb, conn->handle, flags);
2869 break;
2870 case HCI_AMP:
2871 hci_add_acl_hdr(skb, chan->handle, flags);
2872 break;
2873 default:
2874 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2875 return;
2876 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002877
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002878 list = skb_shinfo(skb)->frag_list;
2879 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 /* Non fragmented */
2881 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2882
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002883 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 } else {
2885 /* Fragmented */
2886 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2887
2888 skb_shinfo(skb)->frag_list = NULL;
2889
2890 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002891 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002893 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002894
2895 flags &= ~ACL_START;
2896 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 do {
2898 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002899
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002900 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002901 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2904
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002905 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 } while (list);
2907
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002908 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002910}
2911
2912void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2913{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002914 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002916 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002917
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002918 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002920 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922
2923/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002924void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925{
2926 struct hci_dev *hdev = conn->hdev;
2927 struct hci_sco_hdr hdr;
2928
2929 BT_DBG("%s len %d", hdev->name, skb->len);
2930
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002931 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 hdr.dlen = skb->len;
2933
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002934 skb_push(skb, HCI_SCO_HDR_SIZE);
2935 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002936 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002938 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002939
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002941 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943
2944/* ---- HCI TX task (outgoing data) ---- */
2945
2946/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002947static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2948 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949{
2950 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002951 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002952 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002953
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002954 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002956
2957 rcu_read_lock();
2958
2959 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002960 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002961 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002962
2963 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2964 continue;
2965
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 num++;
2967
2968 if (c->sent < min) {
2969 min = c->sent;
2970 conn = c;
2971 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002972
2973 if (hci_conn_num(hdev, type) == num)
2974 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002975 }
2976
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002977 rcu_read_unlock();
2978
Linus Torvalds1da177e2005-04-16 15:20:36 -07002979 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002980 int cnt, q;
2981
2982 switch (conn->type) {
2983 case ACL_LINK:
2984 cnt = hdev->acl_cnt;
2985 break;
2986 case SCO_LINK:
2987 case ESCO_LINK:
2988 cnt = hdev->sco_cnt;
2989 break;
2990 case LE_LINK:
2991 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2992 break;
2993 default:
2994 cnt = 0;
2995 BT_ERR("Unknown link type");
2996 }
2997
2998 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002999 *quote = q ? q : 1;
3000 } else
3001 *quote = 0;
3002
3003 BT_DBG("conn %p quote %d", conn, *quote);
3004 return conn;
3005}
3006
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003007static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008{
3009 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02003010 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003011
Ville Tervobae1f5d92011-02-10 22:38:53 -03003012 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003013
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003014 rcu_read_lock();
3015
Linus Torvalds1da177e2005-04-16 15:20:36 -07003016 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003017 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03003018 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03003019 BT_ERR("%s killing stalled connection %pMR",
3020 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03003021 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 }
3023 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003024
3025 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026}
3027
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003028static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
3029 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003030{
3031 struct hci_conn_hash *h = &hdev->conn_hash;
3032 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02003033 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003034 struct hci_conn *conn;
3035 int cnt, q, conn_num = 0;
3036
3037 BT_DBG("%s", hdev->name);
3038
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003039 rcu_read_lock();
3040
3041 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003042 struct hci_chan *tmp;
3043
3044 if (conn->type != type)
3045 continue;
3046
3047 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3048 continue;
3049
3050 conn_num++;
3051
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003052 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003053 struct sk_buff *skb;
3054
3055 if (skb_queue_empty(&tmp->data_q))
3056 continue;
3057
3058 skb = skb_peek(&tmp->data_q);
3059 if (skb->priority < cur_prio)
3060 continue;
3061
3062 if (skb->priority > cur_prio) {
3063 num = 0;
3064 min = ~0;
3065 cur_prio = skb->priority;
3066 }
3067
3068 num++;
3069
3070 if (conn->sent < min) {
3071 min = conn->sent;
3072 chan = tmp;
3073 }
3074 }
3075
3076 if (hci_conn_num(hdev, type) == conn_num)
3077 break;
3078 }
3079
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003080 rcu_read_unlock();
3081
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 if (!chan)
3083 return NULL;
3084
3085 switch (chan->conn->type) {
3086 case ACL_LINK:
3087 cnt = hdev->acl_cnt;
3088 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003089 case AMP_LINK:
3090 cnt = hdev->block_cnt;
3091 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003092 case SCO_LINK:
3093 case ESCO_LINK:
3094 cnt = hdev->sco_cnt;
3095 break;
3096 case LE_LINK:
3097 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
3098 break;
3099 default:
3100 cnt = 0;
3101 BT_ERR("Unknown link type");
3102 }
3103
3104 q = cnt / num;
3105 *quote = q ? q : 1;
3106 BT_DBG("chan %p quote %d", chan, *quote);
3107 return chan;
3108}
3109
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003110static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
3111{
3112 struct hci_conn_hash *h = &hdev->conn_hash;
3113 struct hci_conn *conn;
3114 int num = 0;
3115
3116 BT_DBG("%s", hdev->name);
3117
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003118 rcu_read_lock();
3119
3120 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003121 struct hci_chan *chan;
3122
3123 if (conn->type != type)
3124 continue;
3125
3126 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3127 continue;
3128
3129 num++;
3130
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003131 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003132 struct sk_buff *skb;
3133
3134 if (chan->sent) {
3135 chan->sent = 0;
3136 continue;
3137 }
3138
3139 if (skb_queue_empty(&chan->data_q))
3140 continue;
3141
3142 skb = skb_peek(&chan->data_q);
3143 if (skb->priority >= HCI_PRIO_MAX - 1)
3144 continue;
3145
3146 skb->priority = HCI_PRIO_MAX - 1;
3147
3148 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003149 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003150 }
3151
3152 if (hci_conn_num(hdev, type) == num)
3153 break;
3154 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003155
3156 rcu_read_unlock();
3157
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003158}
3159
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003160static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3161{
3162 /* Calculate count of blocks used by this packet */
3163 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3164}
3165
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003166static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003168 if (!test_bit(HCI_RAW, &hdev->flags)) {
3169 /* ACL tx timeout must be longer than maximum
3170 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003171 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003172 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003173 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003175}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003176
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003177static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003178{
3179 unsigned int cnt = hdev->acl_cnt;
3180 struct hci_chan *chan;
3181 struct sk_buff *skb;
3182 int quote;
3183
3184 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003185
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003186 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003187 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003188 u32 priority = (skb_peek(&chan->data_q))->priority;
3189 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003190 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003191 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003192
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003193 /* Stop if priority has changed */
3194 if (skb->priority < priority)
3195 break;
3196
3197 skb = skb_dequeue(&chan->data_q);
3198
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003199 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003200 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003201
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003202 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003203 hdev->acl_last_tx = jiffies;
3204
3205 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003206 chan->sent++;
3207 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 }
3209 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003210
3211 if (cnt != hdev->acl_cnt)
3212 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003213}
3214
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003215static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003216{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003217 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003218 struct hci_chan *chan;
3219 struct sk_buff *skb;
3220 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003221 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003222
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003223 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003224
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003225 BT_DBG("%s", hdev->name);
3226
3227 if (hdev->dev_type == HCI_AMP)
3228 type = AMP_LINK;
3229 else
3230 type = ACL_LINK;
3231
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003232 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003233 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003234 u32 priority = (skb_peek(&chan->data_q))->priority;
3235 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3236 int blocks;
3237
3238 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003239 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003240
3241 /* Stop if priority has changed */
3242 if (skb->priority < priority)
3243 break;
3244
3245 skb = skb_dequeue(&chan->data_q);
3246
3247 blocks = __get_blocks(hdev, skb);
3248 if (blocks > hdev->block_cnt)
3249 return;
3250
3251 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003252 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003253
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003254 hci_send_frame(hdev, skb);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003255 hdev->acl_last_tx = jiffies;
3256
3257 hdev->block_cnt -= blocks;
3258 quote -= blocks;
3259
3260 chan->sent += blocks;
3261 chan->conn->sent += blocks;
3262 }
3263 }
3264
3265 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003266 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003267}
3268
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003269static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003270{
3271 BT_DBG("%s", hdev->name);
3272
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003273 /* No ACL link over BR/EDR controller */
3274 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3275 return;
3276
3277 /* No AMP link over AMP controller */
3278 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003279 return;
3280
3281 switch (hdev->flow_ctl_mode) {
3282 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3283 hci_sched_acl_pkt(hdev);
3284 break;
3285
3286 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3287 hci_sched_acl_blk(hdev);
3288 break;
3289 }
3290}
3291
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003293static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294{
3295 struct hci_conn *conn;
3296 struct sk_buff *skb;
3297 int quote;
3298
3299 BT_DBG("%s", hdev->name);
3300
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003301 if (!hci_conn_num(hdev, SCO_LINK))
3302 return;
3303
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3305 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3306 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003307 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
3309 conn->sent++;
3310 if (conn->sent == ~0)
3311 conn->sent = 0;
3312 }
3313 }
3314}
3315
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003316static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003317{
3318 struct hci_conn *conn;
3319 struct sk_buff *skb;
3320 int quote;
3321
3322 BT_DBG("%s", hdev->name);
3323
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003324 if (!hci_conn_num(hdev, ESCO_LINK))
3325 return;
3326
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003327 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3328 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003329 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3330 BT_DBG("skb %p len %d", skb, skb->len);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003331 hci_send_frame(hdev, skb);
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003332
3333 conn->sent++;
3334 if (conn->sent == ~0)
3335 conn->sent = 0;
3336 }
3337 }
3338}
3339
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003340static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003341{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003342 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003343 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003344 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003345
3346 BT_DBG("%s", hdev->name);
3347
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003348 if (!hci_conn_num(hdev, LE_LINK))
3349 return;
3350
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003351 if (!test_bit(HCI_RAW, &hdev->flags)) {
3352 /* LE tx timeout must be longer than maximum
3353 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003354 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003355 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003356 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003357 }
3358
3359 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003360 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003361 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003362 u32 priority = (skb_peek(&chan->data_q))->priority;
3363 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003364 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003365 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003366
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003367 /* Stop if priority has changed */
3368 if (skb->priority < priority)
3369 break;
3370
3371 skb = skb_dequeue(&chan->data_q);
3372
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003373 hci_send_frame(hdev, skb);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003374 hdev->le_last_tx = jiffies;
3375
3376 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003377 chan->sent++;
3378 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003379 }
3380 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003381
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003382 if (hdev->le_pkts)
3383 hdev->le_cnt = cnt;
3384 else
3385 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003386
3387 if (cnt != tmp)
3388 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003389}
3390
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003391static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003393 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 struct sk_buff *skb;
3395
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003396 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003397 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Marcel Holtmann52de5992013-09-03 18:08:38 -07003399 if (!test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
3400 /* Schedule queues and send stuff to HCI driver */
3401 hci_sched_acl(hdev);
3402 hci_sched_sco(hdev);
3403 hci_sched_esco(hdev);
3404 hci_sched_le(hdev);
3405 }
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003406
Linus Torvalds1da177e2005-04-16 15:20:36 -07003407 /* Send next queued raw (unknown type) packet */
3408 while ((skb = skb_dequeue(&hdev->raw_q)))
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003409 hci_send_frame(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003410}
3411
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003412/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003413
3414/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003415static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416{
3417 struct hci_acl_hdr *hdr = (void *) skb->data;
3418 struct hci_conn *conn;
3419 __u16 handle, flags;
3420
3421 skb_pull(skb, HCI_ACL_HDR_SIZE);
3422
3423 handle = __le16_to_cpu(hdr->handle);
3424 flags = hci_flags(handle);
3425 handle = hci_handle(handle);
3426
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003427 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003428 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003429
3430 hdev->stat.acl_rx++;
3431
3432 hci_dev_lock(hdev);
3433 conn = hci_conn_hash_lookup_handle(hdev, handle);
3434 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003435
Linus Torvalds1da177e2005-04-16 15:20:36 -07003436 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003437 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003438
Linus Torvalds1da177e2005-04-16 15:20:36 -07003439 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003440 l2cap_recv_acldata(conn, skb, flags);
3441 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003442 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003443 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003444 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003445 }
3446
3447 kfree_skb(skb);
3448}
3449
3450/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003451static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452{
3453 struct hci_sco_hdr *hdr = (void *) skb->data;
3454 struct hci_conn *conn;
3455 __u16 handle;
3456
3457 skb_pull(skb, HCI_SCO_HDR_SIZE);
3458
3459 handle = __le16_to_cpu(hdr->handle);
3460
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003461 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462
3463 hdev->stat.sco_rx++;
3464
3465 hci_dev_lock(hdev);
3466 conn = hci_conn_hash_lookup_handle(hdev, handle);
3467 hci_dev_unlock(hdev);
3468
3469 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003470 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003471 sco_recv_scodata(conn, skb);
3472 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003473 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003474 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003475 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 }
3477
3478 kfree_skb(skb);
3479}
3480
Johan Hedberg9238f362013-03-05 20:37:48 +02003481static bool hci_req_is_complete(struct hci_dev *hdev)
3482{
3483 struct sk_buff *skb;
3484
3485 skb = skb_peek(&hdev->cmd_q);
3486 if (!skb)
3487 return true;
3488
3489 return bt_cb(skb)->req.start;
3490}
3491
Johan Hedberg42c6b122013-03-05 20:37:49 +02003492static void hci_resend_last(struct hci_dev *hdev)
3493{
3494 struct hci_command_hdr *sent;
3495 struct sk_buff *skb;
3496 u16 opcode;
3497
3498 if (!hdev->sent_cmd)
3499 return;
3500
3501 sent = (void *) hdev->sent_cmd->data;
3502 opcode = __le16_to_cpu(sent->opcode);
3503 if (opcode == HCI_OP_RESET)
3504 return;
3505
3506 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3507 if (!skb)
3508 return;
3509
3510 skb_queue_head(&hdev->cmd_q, skb);
3511 queue_work(hdev->workqueue, &hdev->cmd_work);
3512}
3513
Johan Hedberg9238f362013-03-05 20:37:48 +02003514void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3515{
3516 hci_req_complete_t req_complete = NULL;
3517 struct sk_buff *skb;
3518 unsigned long flags;
3519
3520 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3521
Johan Hedberg42c6b122013-03-05 20:37:49 +02003522 /* If the completed command doesn't match the last one that was
3523 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003524 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003525 if (!hci_sent_cmd_data(hdev, opcode)) {
3526 /* Some CSR based controllers generate a spontaneous
3527 * reset complete event during init and any pending
3528 * command will never be completed. In such a case we
3529 * need to resend whatever was the last sent
3530 * command.
3531 */
3532 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3533 hci_resend_last(hdev);
3534
Johan Hedberg9238f362013-03-05 20:37:48 +02003535 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003536 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003537
3538 /* If the command succeeded and there's still more commands in
3539 * this request the request is not yet complete.
3540 */
3541 if (!status && !hci_req_is_complete(hdev))
3542 return;
3543
3544 /* If this was the last command in a request the complete
3545 * callback would be found in hdev->sent_cmd instead of the
3546 * command queue (hdev->cmd_q).
3547 */
3548 if (hdev->sent_cmd) {
3549 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003550
3551 if (req_complete) {
3552 /* We must set the complete callback to NULL to
3553 * avoid calling the callback more than once if
3554 * this function gets called again.
3555 */
3556 bt_cb(hdev->sent_cmd)->req.complete = NULL;
3557
Johan Hedberg9238f362013-03-05 20:37:48 +02003558 goto call_complete;
Johan Hedberg53e21fb2013-07-27 14:11:14 -05003559 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003560 }
3561
3562 /* Remove all pending commands belonging to this request */
3563 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3564 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3565 if (bt_cb(skb)->req.start) {
3566 __skb_queue_head(&hdev->cmd_q, skb);
3567 break;
3568 }
3569
3570 req_complete = bt_cb(skb)->req.complete;
3571 kfree_skb(skb);
3572 }
3573 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3574
3575call_complete:
3576 if (req_complete)
3577 req_complete(hdev, status);
3578}
3579
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003580static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003581{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003582 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003583 struct sk_buff *skb;
3584
3585 BT_DBG("%s", hdev->name);
3586
Linus Torvalds1da177e2005-04-16 15:20:36 -07003587 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003588 /* Send copy to monitor */
3589 hci_send_to_monitor(hdev, skb);
3590
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591 if (atomic_read(&hdev->promisc)) {
3592 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003593 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594 }
3595
Marcel Holtmann0736cfa2013-08-26 21:40:51 -07003596 if (test_bit(HCI_RAW, &hdev->flags) ||
3597 test_bit(HCI_USER_CHANNEL, &hdev->dev_flags)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598 kfree_skb(skb);
3599 continue;
3600 }
3601
3602 if (test_bit(HCI_INIT, &hdev->flags)) {
3603 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003604 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003605 case HCI_ACLDATA_PKT:
3606 case HCI_SCODATA_PKT:
3607 kfree_skb(skb);
3608 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003610 }
3611
3612 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003613 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003614 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003615 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003616 hci_event_packet(hdev, skb);
3617 break;
3618
3619 case HCI_ACLDATA_PKT:
3620 BT_DBG("%s ACL data packet", hdev->name);
3621 hci_acldata_packet(hdev, skb);
3622 break;
3623
3624 case HCI_SCODATA_PKT:
3625 BT_DBG("%s SCO data packet", hdev->name);
3626 hci_scodata_packet(hdev, skb);
3627 break;
3628
3629 default:
3630 kfree_skb(skb);
3631 break;
3632 }
3633 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003634}
3635
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003636static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003637{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003638 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003639 struct sk_buff *skb;
3640
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003641 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3642 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003643
Linus Torvalds1da177e2005-04-16 15:20:36 -07003644 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003645 if (atomic_read(&hdev->cmd_cnt)) {
3646 skb = skb_dequeue(&hdev->cmd_q);
3647 if (!skb)
3648 return;
3649
Wei Yongjun7585b972009-02-25 18:29:52 +08003650 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003651
Marcel Holtmanna675d7f2013-09-03 18:11:07 -07003652 hdev->sent_cmd = skb_clone(skb, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003653 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003654 atomic_dec(&hdev->cmd_cnt);
Marcel Holtmann57d17d72013-10-10 14:54:17 -07003655 hci_send_frame(hdev, skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003656 if (test_bit(HCI_RESET, &hdev->flags))
3657 del_timer(&hdev->cmd_timer);
3658 else
3659 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003660 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003661 } else {
3662 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003663 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003664 }
3665 }
3666}