blob: 8b2d543fb1432f2dffbd41b767aa2ed4d7ed8d60 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Johan Hedberg75e84b72013-04-02 13:35:04 +030082struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode)
83{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
106 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
107 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
108 goto failed;
109 }
110
111 if (skb->len < sizeof(*ev)) {
112 BT_ERR("Too short cmd_complete event");
113 goto failed;
114 }
115
116 ev = (void *) skb->data;
117 skb_pull(skb, sizeof(*ev));
118
119 if (opcode == __le16_to_cpu(ev->opcode))
120 return skb;
121
122 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
123 __le16_to_cpu(ev->opcode));
124
125failed:
126 kfree_skb(skb);
127 return ERR_PTR(-ENODATA);
128}
129
130struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
131 void *param, u32 timeout)
132{
133 DECLARE_WAITQUEUE(wait, current);
134 struct hci_request req;
135 int err = 0;
136
137 BT_DBG("%s", hdev->name);
138
139 hci_req_init(&req, hdev);
140
141 hci_req_add(&req, opcode, plen, param);
142
143 hdev->req_status = HCI_REQ_PEND;
144
145 err = hci_req_run(&req, hci_req_sync_complete);
146 if (err < 0)
147 return ERR_PTR(err);
148
149 add_wait_queue(&hdev->req_wait_q, &wait);
150 set_current_state(TASK_INTERRUPTIBLE);
151
152 schedule_timeout(timeout);
153
154 remove_wait_queue(&hdev->req_wait_q, &wait);
155
156 if (signal_pending(current))
157 return ERR_PTR(-EINTR);
158
159 switch (hdev->req_status) {
160 case HCI_REQ_DONE:
161 err = -bt_to_errno(hdev->req_result);
162 break;
163
164 case HCI_REQ_CANCELED:
165 err = -hdev->req_result;
166 break;
167
168 default:
169 err = -ETIMEDOUT;
170 break;
171 }
172
173 hdev->req_status = hdev->req_result = 0;
174
175 BT_DBG("%s end: err %d", hdev->name, err);
176
177 if (err < 0)
178 return ERR_PTR(err);
179
180 return hci_get_cmd_complete(hdev, opcode);
181}
182EXPORT_SYMBOL(__hci_cmd_sync);
183
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200185static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200186 void (*func)(struct hci_request *req,
187 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200188 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200190 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 DECLARE_WAITQUEUE(wait, current);
192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
Johan Hedberg42c6b122013-03-05 20:37:49 +0200196 hci_req_init(&req, hdev);
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 hdev->req_status = HCI_REQ_PEND;
199
Johan Hedberg42c6b122013-03-05 20:37:49 +0200200 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200201
Johan Hedberg42c6b122013-03-05 20:37:49 +0200202 err = hci_req_run(&req, hci_req_sync_complete);
203 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200204 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300205
206 /* ENODATA means the HCI request command queue is empty.
207 * This can happen when a request with conditionals doesn't
208 * trigger any commands to be sent. This is normal behavior
209 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200210 */
Andre Guedes920c8302013-03-08 11:20:15 -0300211 if (err == -ENODATA)
212 return 0;
213
214 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200215 }
216
Andre Guedesbc4445c2013-03-08 11:20:13 -0300217 add_wait_queue(&hdev->req_wait_q, &wait);
218 set_current_state(TASK_INTERRUPTIBLE);
219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 schedule_timeout(timeout);
221
222 remove_wait_queue(&hdev->req_wait_q, &wait);
223
224 if (signal_pending(current))
225 return -EINTR;
226
227 switch (hdev->req_status) {
228 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700229 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 break;
231
232 case HCI_REQ_CANCELED:
233 err = -hdev->req_result;
234 break;
235
236 default:
237 err = -ETIMEDOUT;
238 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240
Johan Hedberga5040ef2011-01-10 13:28:59 +0200241 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242
243 BT_DBG("%s end: err %d", hdev->name, err);
244
245 return err;
246}
247
Johan Hedberg01178cd2013-03-05 20:37:41 +0200248static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200249 void (*req)(struct hci_request *req,
250 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200251 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252{
253 int ret;
254
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200255 if (!test_bit(HCI_UP, &hdev->flags))
256 return -ENETDOWN;
257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* Serialize all requests */
259 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200260 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 hci_req_unlock(hdev);
262
263 return ret;
264}
265
Johan Hedberg42c6b122013-03-05 20:37:49 +0200266static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200268 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269
270 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200271 set_bit(HCI_RESET, &req->hdev->flags);
272 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273}
274
Johan Hedberg42c6b122013-03-05 20:37:49 +0200275static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200278
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200282 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284
285 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287}
288
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200290{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200291 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200292
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200293 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200294 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300295
296 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200297 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300298
299 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200300 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200301}
302
Johan Hedberg42c6b122013-03-05 20:37:49 +0200303static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200304{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200305 struct hci_dev *hdev = req->hdev;
306 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200307 struct sk_buff *skb;
308
309 BT_DBG("%s %ld", hdev->name, opt);
310
311 /* Driver initialization */
312
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_init(&init_req, hdev);
314
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200315 /* Special commands */
316 while ((skb = skb_dequeue(&hdev->driver_init))) {
317 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
318 skb->dev = (void *) hdev;
319
Johan Hedberg42c6b122013-03-05 20:37:49 +0200320 if (skb_queue_empty(&init_req.cmd_q))
321 bt_cb(skb)->req.start = true;
322
323 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200324 }
325 skb_queue_purge(&hdev->driver_init);
326
Johan Hedberg42c6b122013-03-05 20:37:49 +0200327 hci_req_run(&init_req, NULL);
328
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300329 /* Reset */
330 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200331 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300332
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200333 switch (hdev->dev_type) {
334 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200336 break;
337
338 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200339 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200340 break;
341
342 default:
343 BT_ERR("Unknown device type %d", hdev->dev_type);
344 break;
345 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346}
347
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349{
350 struct hci_cp_delete_stored_link_key cp;
351 __le16 param;
352 __u8 flt_type;
353
354 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200355 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200356
357 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200358 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200359
360 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200364 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200365
366 /* Clear Event Filters */
367 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Connection accept timeout ~20 secs */
371 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200372 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200373
374 bacpy(&cp.bdaddr, BDADDR_ANY);
375 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200376 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500377
378 /* Read page scan parameters */
379 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
380 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
381 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
382 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200383}
384
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386{
387 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200388 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200389
390 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200394 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200395
396 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200400 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200401}
402
403static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
404{
405 if (lmp_ext_inq_capable(hdev))
406 return 0x02;
407
408 if (lmp_inq_rssi_capable(hdev))
409 return 0x01;
410
411 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
412 hdev->lmp_subver == 0x0757)
413 return 0x01;
414
415 if (hdev->manufacturer == 15) {
416 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
417 return 0x01;
418 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
419 return 0x01;
420 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
421 return 0x01;
422 }
423
424 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
425 hdev->lmp_subver == 0x1805)
426 return 0x01;
427
428 return 0x00;
429}
430
Johan Hedberg42c6b122013-03-05 20:37:49 +0200431static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200432{
433 u8 mode;
434
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436
Johan Hedberg42c6b122013-03-05 20:37:49 +0200437 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200438}
439
Johan Hedberg42c6b122013-03-05 20:37:49 +0200440static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200441{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200442 struct hci_dev *hdev = req->hdev;
443
Johan Hedberg2177bab2013-03-05 20:37:43 +0200444 /* The second byte is 0xff instead of 0x9f (two reserved bits
445 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
446 * command otherwise.
447 */
448 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
449
450 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
451 * any event mask for pre 1.2 devices.
452 */
453 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
454 return;
455
456 if (lmp_bredr_capable(hdev)) {
457 events[4] |= 0x01; /* Flow Specification Complete */
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459 events[4] |= 0x04; /* Read Remote Extended Features Complete */
460 events[5] |= 0x08; /* Synchronous Connection Complete */
461 events[5] |= 0x10; /* Synchronous Connection Changed */
462 }
463
464 if (lmp_inq_rssi_capable(hdev))
465 events[4] |= 0x02; /* Inquiry Result with RSSI */
466
467 if (lmp_sniffsubr_capable(hdev))
468 events[5] |= 0x20; /* Sniff Subrating */
469
470 if (lmp_pause_enc_capable(hdev))
471 events[5] |= 0x80; /* Encryption Key Refresh Complete */
472
473 if (lmp_ext_inq_capable(hdev))
474 events[5] |= 0x40; /* Extended Inquiry Result */
475
476 if (lmp_no_flush_capable(hdev))
477 events[7] |= 0x01; /* Enhanced Flush Complete */
478
479 if (lmp_lsto_capable(hdev))
480 events[6] |= 0x80; /* Link Supervision Timeout Changed */
481
482 if (lmp_ssp_capable(hdev)) {
483 events[6] |= 0x01; /* IO Capability Request */
484 events[6] |= 0x02; /* IO Capability Response */
485 events[6] |= 0x04; /* User Confirmation Request */
486 events[6] |= 0x08; /* User Passkey Request */
487 events[6] |= 0x10; /* Remote OOB Data Request */
488 events[6] |= 0x20; /* Simple Pairing Complete */
489 events[7] |= 0x04; /* User Passkey Notification */
490 events[7] |= 0x08; /* Keypress Notification */
491 events[7] |= 0x10; /* Remote Host Supported
492 * Features Notification
493 */
494 }
495
496 if (lmp_le_capable(hdev))
497 events[7] |= 0x20; /* LE Meta-Event */
498
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500
501 if (lmp_le_capable(hdev)) {
502 memset(events, 0, sizeof(events));
503 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
505 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506 }
507}
508
Johan Hedberg42c6b122013-03-05 20:37:49 +0200509static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200510{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200511 struct hci_dev *hdev = req->hdev;
512
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200514 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200515
516 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200518
Johan Hedberg42c6b122013-03-05 20:37:49 +0200519 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200520
521 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523
524 if (lmp_ssp_capable(hdev)) {
525 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
526 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
528 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 } else {
530 struct hci_cp_write_eir cp;
531
532 memset(hdev->eir, 0, sizeof(hdev->eir));
533 memset(&cp, 0, sizeof(cp));
534
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536 }
537 }
538
539 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200541
542 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544
545 if (lmp_ext_feat_capable(hdev)) {
546 struct hci_cp_read_local_ext_features cp;
547
548 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200549 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
550 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200551 }
552
553 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
554 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
556 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557 }
558}
559
Johan Hedberg42c6b122013-03-05 20:37:49 +0200560static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200561{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200562 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200563 struct hci_cp_write_def_link_policy cp;
564 u16 link_policy = 0;
565
566 if (lmp_rswitch_capable(hdev))
567 link_policy |= HCI_LP_RSWITCH;
568 if (lmp_hold_capable(hdev))
569 link_policy |= HCI_LP_HOLD;
570 if (lmp_sniff_capable(hdev))
571 link_policy |= HCI_LP_SNIFF;
572 if (lmp_park_capable(hdev))
573 link_policy |= HCI_LP_PARK;
574
575 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200576 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200577}
578
Johan Hedberg42c6b122013-03-05 20:37:49 +0200579static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200580{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200581 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200582 struct hci_cp_write_le_host_supported cp;
583
584 memset(&cp, 0, sizeof(cp));
585
586 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
587 cp.le = 0x01;
588 cp.simul = lmp_le_br_capable(hdev);
589 }
590
591 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200592 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
593 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594}
595
Johan Hedberg42c6b122013-03-05 20:37:49 +0200596static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 struct hci_dev *hdev = req->hdev;
599
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200601 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200602
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500603 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200604 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500605 hci_update_ad(req);
606 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200607}
608
609static int __hci_init(struct hci_dev *hdev)
610{
611 int err;
612
613 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
614 if (err < 0)
615 return err;
616
617 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
618 * BR/EDR/LE type controllers. AMP controllers only need the
619 * first stage init.
620 */
621 if (hdev->dev_type != HCI_BREDR)
622 return 0;
623
624 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
625 if (err < 0)
626 return err;
627
628 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
629}
630
Johan Hedberg42c6b122013-03-05 20:37:49 +0200631static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 __u8 scan = opt;
634
Johan Hedberg42c6b122013-03-05 20:37:49 +0200635 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639}
640
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642{
643 __u8 auth = opt;
644
Johan Hedberg42c6b122013-03-05 20:37:49 +0200645 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649}
650
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652{
653 __u8 encrypt = opt;
654
Johan Hedberg42c6b122013-03-05 20:37:49 +0200655 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200657 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659}
660
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200662{
663 __le16 policy = cpu_to_le16(opt);
664
Johan Hedberg42c6b122013-03-05 20:37:49 +0200665 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200666
667 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200669}
670
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900671/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 * Device is held on return. */
673struct hci_dev *hci_dev_get(int index)
674{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200675 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676
677 BT_DBG("%d", index);
678
679 if (index < 0)
680 return NULL;
681
682 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200683 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700684 if (d->id == index) {
685 hdev = hci_dev_hold(d);
686 break;
687 }
688 }
689 read_unlock(&hci_dev_list_lock);
690 return hdev;
691}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
693/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200694
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200695bool hci_discovery_active(struct hci_dev *hdev)
696{
697 struct discovery_state *discov = &hdev->discovery;
698
Andre Guedes6fbe1952012-02-03 17:47:58 -0300699 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300700 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300701 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200702 return true;
703
Andre Guedes6fbe1952012-02-03 17:47:58 -0300704 default:
705 return false;
706 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200707}
708
Johan Hedbergff9ef572012-01-04 14:23:45 +0200709void hci_discovery_set_state(struct hci_dev *hdev, int state)
710{
711 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
712
713 if (hdev->discovery.state == state)
714 return;
715
716 switch (state) {
717 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300718 if (hdev->discovery.state != DISCOVERY_STARTING)
719 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200720 break;
721 case DISCOVERY_STARTING:
722 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300723 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200724 mgmt_discovering(hdev, 1);
725 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200726 case DISCOVERY_RESOLVING:
727 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200728 case DISCOVERY_STOPPING:
729 break;
730 }
731
732 hdev->discovery.state = state;
733}
734
Linus Torvalds1da177e2005-04-16 15:20:36 -0700735static void inquiry_cache_flush(struct hci_dev *hdev)
736{
Johan Hedberg30883512012-01-04 14:16:21 +0200737 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200738 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Johan Hedberg561aafb2012-01-04 13:31:59 +0200740 list_for_each_entry_safe(p, n, &cache->all, all) {
741 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200742 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200744
745 INIT_LIST_HEAD(&cache->unknown);
746 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747}
748
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300749struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
750 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751{
Johan Hedberg30883512012-01-04 14:16:21 +0200752 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 struct inquiry_entry *e;
754
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300755 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Johan Hedberg561aafb2012-01-04 13:31:59 +0200757 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200759 return e;
760 }
761
762 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763}
764
Johan Hedberg561aafb2012-01-04 13:31:59 +0200765struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300766 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200767{
Johan Hedberg30883512012-01-04 14:16:21 +0200768 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200769 struct inquiry_entry *e;
770
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300771 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200772
773 list_for_each_entry(e, &cache->unknown, list) {
774 if (!bacmp(&e->data.bdaddr, bdaddr))
775 return e;
776 }
777
778 return NULL;
779}
780
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200781struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300782 bdaddr_t *bdaddr,
783 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200784{
785 struct discovery_state *cache = &hdev->discovery;
786 struct inquiry_entry *e;
787
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300788 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200789
790 list_for_each_entry(e, &cache->resolve, list) {
791 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
792 return e;
793 if (!bacmp(&e->data.bdaddr, bdaddr))
794 return e;
795 }
796
797 return NULL;
798}
799
Johan Hedberga3d4e202012-01-09 00:53:02 +0200800void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300801 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200802{
803 struct discovery_state *cache = &hdev->discovery;
804 struct list_head *pos = &cache->resolve;
805 struct inquiry_entry *p;
806
807 list_del(&ie->list);
808
809 list_for_each_entry(p, &cache->resolve, list) {
810 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300811 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200812 break;
813 pos = &p->list;
814 }
815
816 list_add(&ie->list, pos);
817}
818
Johan Hedberg31754052012-01-04 13:39:52 +0200819bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300820 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821{
Johan Hedberg30883512012-01-04 14:16:21 +0200822 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200823 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300825 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
Szymon Janc2b2fec42012-11-20 11:38:54 +0100827 hci_remove_remote_oob_data(hdev, &data->bdaddr);
828
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200829 if (ssp)
830 *ssp = data->ssp_mode;
831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200832 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200833 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200834 if (ie->data.ssp_mode && ssp)
835 *ssp = true;
836
Johan Hedberga3d4e202012-01-09 00:53:02 +0200837 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300838 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200839 ie->data.rssi = data->rssi;
840 hci_inquiry_cache_update_resolve(hdev, ie);
841 }
842
Johan Hedberg561aafb2012-01-04 13:31:59 +0200843 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200844 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845
Johan Hedberg561aafb2012-01-04 13:31:59 +0200846 /* Entry not in the cache. Add new one. */
847 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
848 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200849 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200850
851 list_add(&ie->all, &cache->all);
852
853 if (name_known) {
854 ie->name_state = NAME_KNOWN;
855 } else {
856 ie->name_state = NAME_NOT_KNOWN;
857 list_add(&ie->list, &cache->unknown);
858 }
859
860update:
861 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300862 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200863 ie->name_state = NAME_KNOWN;
864 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 }
866
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200867 memcpy(&ie->data, data, sizeof(*data));
868 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200870
871 if (ie->name_state == NAME_NOT_KNOWN)
872 return false;
873
874 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875}
876
877static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
878{
Johan Hedberg30883512012-01-04 14:16:21 +0200879 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880 struct inquiry_info *info = (struct inquiry_info *) buf;
881 struct inquiry_entry *e;
882 int copied = 0;
883
Johan Hedberg561aafb2012-01-04 13:31:59 +0200884 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200886
887 if (copied >= num)
888 break;
889
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 bacpy(&info->bdaddr, &data->bdaddr);
891 info->pscan_rep_mode = data->pscan_rep_mode;
892 info->pscan_period_mode = data->pscan_period_mode;
893 info->pscan_mode = data->pscan_mode;
894 memcpy(info->dev_class, data->dev_class, 3);
895 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200898 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900
901 BT_DBG("cache %p, copied %d", cache, copied);
902 return copied;
903}
904
Johan Hedberg42c6b122013-03-05 20:37:49 +0200905static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700906{
907 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200908 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 struct hci_cp_inquiry cp;
910
911 BT_DBG("%s", hdev->name);
912
913 if (test_bit(HCI_INQUIRY, &hdev->flags))
914 return;
915
916 /* Start Inquiry */
917 memcpy(&cp.lap, &ir->lap, 3);
918 cp.length = ir->length;
919 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200920 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921}
922
Andre Guedes3e13fa12013-03-27 20:04:56 -0300923static int wait_inquiry(void *word)
924{
925 schedule();
926 return signal_pending(current);
927}
928
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929int hci_inquiry(void __user *arg)
930{
931 __u8 __user *ptr = arg;
932 struct hci_inquiry_req ir;
933 struct hci_dev *hdev;
934 int err = 0, do_inquiry = 0, max_rsp;
935 long timeo;
936 __u8 *buf;
937
938 if (copy_from_user(&ir, ptr, sizeof(ir)))
939 return -EFAULT;
940
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200941 hdev = hci_dev_get(ir.dev_id);
942 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 return -ENODEV;
944
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300945 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900946 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300947 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 inquiry_cache_flush(hdev);
949 do_inquiry = 1;
950 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300951 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
Marcel Holtmann04837f62006-07-03 10:02:33 +0200953 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200954
955 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200956 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
957 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200958 if (err < 0)
959 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300960
961 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
962 * cleared). If it is interrupted by a signal, return -EINTR.
963 */
964 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
965 TASK_INTERRUPTIBLE))
966 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200967 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300969 /* for unlimited number of responses we will use buffer with
970 * 255 entries
971 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
973
974 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
975 * copy it to the user space.
976 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100977 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200978 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 err = -ENOMEM;
980 goto done;
981 }
982
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300983 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300985 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986
987 BT_DBG("num_rsp %d", ir.num_rsp);
988
989 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
990 ptr += sizeof(ir);
991 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300992 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900994 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 err = -EFAULT;
996
997 kfree(buf);
998
999done:
1000 hci_dev_put(hdev);
1001 return err;
1002}
1003
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001004static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1005{
1006 u8 ad_len = 0, flags = 0;
1007 size_t name_len;
1008
1009 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1010 flags |= LE_AD_GENERAL;
1011
1012 if (!lmp_bredr_capable(hdev))
1013 flags |= LE_AD_NO_BREDR;
1014
1015 if (lmp_le_br_capable(hdev))
1016 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1017
1018 if (lmp_host_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_HOST;
1020
1021 if (flags) {
1022 BT_DBG("adv flags 0x%02x", flags);
1023
1024 ptr[0] = 2;
1025 ptr[1] = EIR_FLAGS;
1026 ptr[2] = flags;
1027
1028 ad_len += 3;
1029 ptr += 3;
1030 }
1031
1032 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1033 ptr[0] = 2;
1034 ptr[1] = EIR_TX_POWER;
1035 ptr[2] = (u8) hdev->adv_tx_power;
1036
1037 ad_len += 3;
1038 ptr += 3;
1039 }
1040
1041 name_len = strlen(hdev->dev_name);
1042 if (name_len > 0) {
1043 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1044
1045 if (name_len > max_len) {
1046 name_len = max_len;
1047 ptr[1] = EIR_NAME_SHORT;
1048 } else
1049 ptr[1] = EIR_NAME_COMPLETE;
1050
1051 ptr[0] = name_len + 1;
1052
1053 memcpy(ptr + 2, hdev->dev_name, name_len);
1054
1055 ad_len += (name_len + 2);
1056 ptr += (name_len + 2);
1057 }
1058
1059 return ad_len;
1060}
1061
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001062void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001063{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001064 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001065 struct hci_cp_le_set_adv_data cp;
1066 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001067
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001068 if (!lmp_le_capable(hdev))
1069 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001070
1071 memset(&cp, 0, sizeof(cp));
1072
1073 len = create_ad(hdev, cp.data);
1074
1075 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001076 memcmp(cp.data, hdev->adv_data, len) == 0)
1077 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001078
1079 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1080 hdev->adv_data_len = len;
1081
1082 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001083
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001084 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001085}
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087/* ---- HCI ioctl helpers ---- */
1088
1089int hci_dev_open(__u16 dev)
1090{
1091 struct hci_dev *hdev;
1092 int ret = 0;
1093
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001094 hdev = hci_dev_get(dev);
1095 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 return -ENODEV;
1097
1098 BT_DBG("%s %p", hdev->name, hdev);
1099
1100 hci_req_lock(hdev);
1101
Johan Hovold94324962012-03-15 14:48:41 +01001102 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1103 ret = -ENODEV;
1104 goto done;
1105 }
1106
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001107 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1108 ret = -ERFKILL;
1109 goto done;
1110 }
1111
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 if (test_bit(HCI_UP, &hdev->flags)) {
1113 ret = -EALREADY;
1114 goto done;
1115 }
1116
1117 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1118 set_bit(HCI_RAW, &hdev->flags);
1119
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001120 /* Treat all non BR/EDR controllers as raw devices if
1121 enable_hs is not set */
1122 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001123 set_bit(HCI_RAW, &hdev->flags);
1124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (hdev->open(hdev)) {
1126 ret = -EIO;
1127 goto done;
1128 }
1129
1130 if (!test_bit(HCI_RAW, &hdev->flags)) {
1131 atomic_set(&hdev->cmd_cnt, 1);
1132 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001133 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134 clear_bit(HCI_INIT, &hdev->flags);
1135 }
1136
1137 if (!ret) {
1138 hci_dev_hold(hdev);
1139 set_bit(HCI_UP, &hdev->flags);
1140 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001141 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1142 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001143 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001144 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001145 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001146 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001147 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001149 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001150 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001151 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152
1153 skb_queue_purge(&hdev->cmd_q);
1154 skb_queue_purge(&hdev->rx_q);
1155
1156 if (hdev->flush)
1157 hdev->flush(hdev);
1158
1159 if (hdev->sent_cmd) {
1160 kfree_skb(hdev->sent_cmd);
1161 hdev->sent_cmd = NULL;
1162 }
1163
1164 hdev->close(hdev);
1165 hdev->flags = 0;
1166 }
1167
1168done:
1169 hci_req_unlock(hdev);
1170 hci_dev_put(hdev);
1171 return ret;
1172}
1173
1174static int hci_dev_do_close(struct hci_dev *hdev)
1175{
1176 BT_DBG("%s %p", hdev->name, hdev);
1177
Andre Guedes28b75a82012-02-03 17:48:00 -03001178 cancel_work_sync(&hdev->le_scan);
1179
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001180 cancel_delayed_work(&hdev->power_off);
1181
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182 hci_req_cancel(hdev, ENODEV);
1183 hci_req_lock(hdev);
1184
1185 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001186 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 hci_req_unlock(hdev);
1188 return 0;
1189 }
1190
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001191 /* Flush RX and TX works */
1192 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001193 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001195 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001196 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001197 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001198 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001199 }
1200
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001201 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001202 cancel_delayed_work(&hdev->service_cache);
1203
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001204 cancel_delayed_work_sync(&hdev->le_scan_disable);
1205
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001206 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 inquiry_cache_flush(hdev);
1208 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001209 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210
1211 hci_notify(hdev, HCI_DEV_DOWN);
1212
1213 if (hdev->flush)
1214 hdev->flush(hdev);
1215
1216 /* Reset device */
1217 skb_queue_purge(&hdev->cmd_q);
1218 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001219 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001220 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001222 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223 clear_bit(HCI_INIT, &hdev->flags);
1224 }
1225
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001226 /* flush cmd work */
1227 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228
1229 /* Drop queues */
1230 skb_queue_purge(&hdev->rx_q);
1231 skb_queue_purge(&hdev->cmd_q);
1232 skb_queue_purge(&hdev->raw_q);
1233
1234 /* Drop last sent command */
1235 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001236 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237 kfree_skb(hdev->sent_cmd);
1238 hdev->sent_cmd = NULL;
1239 }
1240
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001241 kfree_skb(hdev->recv_evt);
1242 hdev->recv_evt = NULL;
1243
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 /* After this point our queues are empty
1245 * and no tasks are scheduled. */
1246 hdev->close(hdev);
1247
Johan Hedberg35b973c2013-03-15 17:06:59 -05001248 /* Clear flags */
1249 hdev->flags = 0;
1250 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1251
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001252 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1253 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001254 hci_dev_lock(hdev);
1255 mgmt_powered(hdev, 0);
1256 hci_dev_unlock(hdev);
1257 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001258
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001259 /* Controller radio is available but is currently powered down */
1260 hdev->amp_status = 0;
1261
Johan Hedberge59fda82012-02-22 18:11:53 +02001262 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001263 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 hci_req_unlock(hdev);
1266
1267 hci_dev_put(hdev);
1268 return 0;
1269}
1270
1271int hci_dev_close(__u16 dev)
1272{
1273 struct hci_dev *hdev;
1274 int err;
1275
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001276 hdev = hci_dev_get(dev);
1277 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001279
1280 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1281 cancel_delayed_work(&hdev->power_off);
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 hci_dev_put(hdev);
1286 return err;
1287}
1288
1289int hci_dev_reset(__u16 dev)
1290{
1291 struct hci_dev *hdev;
1292 int ret = 0;
1293
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001294 hdev = hci_dev_get(dev);
1295 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 return -ENODEV;
1297
1298 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299
1300 if (!test_bit(HCI_UP, &hdev->flags))
1301 goto done;
1302
1303 /* Drop queues */
1304 skb_queue_purge(&hdev->rx_q);
1305 skb_queue_purge(&hdev->cmd_q);
1306
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001307 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 inquiry_cache_flush(hdev);
1309 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001310 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311
1312 if (hdev->flush)
1313 hdev->flush(hdev);
1314
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001315 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001316 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001317
1318 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001319 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320
1321done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 hci_req_unlock(hdev);
1323 hci_dev_put(hdev);
1324 return ret;
1325}
1326
1327int hci_dev_reset_stat(__u16 dev)
1328{
1329 struct hci_dev *hdev;
1330 int ret = 0;
1331
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001332 hdev = hci_dev_get(dev);
1333 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return -ENODEV;
1335
1336 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1337
1338 hci_dev_put(hdev);
1339
1340 return ret;
1341}
1342
1343int hci_dev_cmd(unsigned int cmd, void __user *arg)
1344{
1345 struct hci_dev *hdev;
1346 struct hci_dev_req dr;
1347 int err = 0;
1348
1349 if (copy_from_user(&dr, arg, sizeof(dr)))
1350 return -EFAULT;
1351
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001352 hdev = hci_dev_get(dr.dev_id);
1353 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return -ENODEV;
1355
1356 switch (cmd) {
1357 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001358 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1359 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 break;
1361
1362 case HCISETENCRYPT:
1363 if (!lmp_encrypt_capable(hdev)) {
1364 err = -EOPNOTSUPP;
1365 break;
1366 }
1367
1368 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1369 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001370 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1371 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372 if (err)
1373 break;
1374 }
1375
Johan Hedberg01178cd2013-03-05 20:37:41 +02001376 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1377 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001378 break;
1379
1380 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001381 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1382 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001383 break;
1384
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001385 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001386 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001388 break;
1389
1390 case HCISETLINKMODE:
1391 hdev->link_mode = ((__u16) dr.dev_opt) &
1392 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1393 break;
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 case HCISETPTYPE:
1396 hdev->pkt_type = (__u16) dr.dev_opt;
1397 break;
1398
Linus Torvalds1da177e2005-04-16 15:20:36 -07001399 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001400 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1401 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402 break;
1403
1404 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001405 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1406 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001407 break;
1408
1409 default:
1410 err = -EINVAL;
1411 break;
1412 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001413
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 hci_dev_put(hdev);
1415 return err;
1416}
1417
1418int hci_get_dev_list(void __user *arg)
1419{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001420 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001421 struct hci_dev_list_req *dl;
1422 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 int n = 0, size, err;
1424 __u16 dev_num;
1425
1426 if (get_user(dev_num, (__u16 __user *) arg))
1427 return -EFAULT;
1428
1429 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1430 return -EINVAL;
1431
1432 size = sizeof(*dl) + dev_num * sizeof(*dr);
1433
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001434 dl = kzalloc(size, GFP_KERNEL);
1435 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 return -ENOMEM;
1437
1438 dr = dl->dev_req;
1439
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001440 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001441 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001442 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001443 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001444
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001445 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1446 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001447
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 (dr + n)->dev_id = hdev->id;
1449 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001450
Linus Torvalds1da177e2005-04-16 15:20:36 -07001451 if (++n >= dev_num)
1452 break;
1453 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001454 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455
1456 dl->dev_num = n;
1457 size = sizeof(*dl) + n * sizeof(*dr);
1458
1459 err = copy_to_user(arg, dl, size);
1460 kfree(dl);
1461
1462 return err ? -EFAULT : 0;
1463}
1464
1465int hci_get_dev_info(void __user *arg)
1466{
1467 struct hci_dev *hdev;
1468 struct hci_dev_info di;
1469 int err = 0;
1470
1471 if (copy_from_user(&di, arg, sizeof(di)))
1472 return -EFAULT;
1473
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001474 hdev = hci_dev_get(di.dev_id);
1475 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 return -ENODEV;
1477
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001478 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001479 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001480
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001481 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1482 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001483
Linus Torvalds1da177e2005-04-16 15:20:36 -07001484 strcpy(di.name, hdev->name);
1485 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001486 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487 di.flags = hdev->flags;
1488 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001489 if (lmp_bredr_capable(hdev)) {
1490 di.acl_mtu = hdev->acl_mtu;
1491 di.acl_pkts = hdev->acl_pkts;
1492 di.sco_mtu = hdev->sco_mtu;
1493 di.sco_pkts = hdev->sco_pkts;
1494 } else {
1495 di.acl_mtu = hdev->le_mtu;
1496 di.acl_pkts = hdev->le_pkts;
1497 di.sco_mtu = 0;
1498 di.sco_pkts = 0;
1499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 di.link_policy = hdev->link_policy;
1501 di.link_mode = hdev->link_mode;
1502
1503 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1504 memcpy(&di.features, &hdev->features, sizeof(di.features));
1505
1506 if (copy_to_user(arg, &di, sizeof(di)))
1507 err = -EFAULT;
1508
1509 hci_dev_put(hdev);
1510
1511 return err;
1512}
1513
1514/* ---- Interface to HCI drivers ---- */
1515
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001516static int hci_rfkill_set_block(void *data, bool blocked)
1517{
1518 struct hci_dev *hdev = data;
1519
1520 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1521
1522 if (!blocked)
1523 return 0;
1524
1525 hci_dev_do_close(hdev);
1526
1527 return 0;
1528}
1529
1530static const struct rfkill_ops hci_rfkill_ops = {
1531 .set_block = hci_rfkill_set_block,
1532};
1533
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001534static void hci_power_on(struct work_struct *work)
1535{
1536 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1537
1538 BT_DBG("%s", hdev->name);
1539
1540 if (hci_dev_open(hdev->id) < 0)
1541 return;
1542
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001543 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001544 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1545 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001546
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001547 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001548 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001549}
1550
1551static void hci_power_off(struct work_struct *work)
1552{
Johan Hedberg32435532011-11-07 22:16:04 +02001553 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001554 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001555
1556 BT_DBG("%s", hdev->name);
1557
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001558 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559}
1560
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001561static void hci_discov_off(struct work_struct *work)
1562{
1563 struct hci_dev *hdev;
1564 u8 scan = SCAN_PAGE;
1565
1566 hdev = container_of(work, struct hci_dev, discov_off.work);
1567
1568 BT_DBG("%s", hdev->name);
1569
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001570 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001571
1572 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1573
1574 hdev->discov_timeout = 0;
1575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001576 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001577}
1578
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001579int hci_uuids_clear(struct hci_dev *hdev)
1580{
Johan Hedberg48210022013-01-27 00:31:28 +02001581 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001582
Johan Hedberg48210022013-01-27 00:31:28 +02001583 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1584 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001585 kfree(uuid);
1586 }
1587
1588 return 0;
1589}
1590
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001591int hci_link_keys_clear(struct hci_dev *hdev)
1592{
1593 struct list_head *p, *n;
1594
1595 list_for_each_safe(p, n, &hdev->link_keys) {
1596 struct link_key *key;
1597
1598 key = list_entry(p, struct link_key, list);
1599
1600 list_del(p);
1601 kfree(key);
1602 }
1603
1604 return 0;
1605}
1606
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001607int hci_smp_ltks_clear(struct hci_dev *hdev)
1608{
1609 struct smp_ltk *k, *tmp;
1610
1611 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1612 list_del(&k->list);
1613 kfree(k);
1614 }
1615
1616 return 0;
1617}
1618
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001619struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1620{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001621 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001622
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001623 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001624 if (bacmp(bdaddr, &k->bdaddr) == 0)
1625 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001626
1627 return NULL;
1628}
1629
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301630static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001631 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001632{
1633 /* Legacy key */
1634 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301635 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001636
1637 /* Debug keys are insecure so don't store them persistently */
1638 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301639 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001640
1641 /* Changed combination key and there's no previous one */
1642 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301643 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001644
1645 /* Security mode 3 case */
1646 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301647 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001648
1649 /* Neither local nor remote side had no-bonding as requirement */
1650 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301651 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001652
1653 /* Local side had dedicated bonding as requirement */
1654 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301655 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001656
1657 /* Remote side had dedicated bonding as requirement */
1658 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301659 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001660
1661 /* If none of the above criteria match, then don't store the key
1662 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301663 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001664}
1665
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001666struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001667{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001668 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001669
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001670 list_for_each_entry(k, &hdev->long_term_keys, list) {
1671 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001672 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001673 continue;
1674
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001675 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001676 }
1677
1678 return NULL;
1679}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001680
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001681struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001682 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001683{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001684 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001685
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001686 list_for_each_entry(k, &hdev->long_term_keys, list)
1687 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001688 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001689 return k;
1690
1691 return NULL;
1692}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001693
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001694int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001696{
1697 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301698 u8 old_key_type;
1699 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001700
1701 old_key = hci_find_link_key(hdev, bdaddr);
1702 if (old_key) {
1703 old_key_type = old_key->type;
1704 key = old_key;
1705 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001706 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001707 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1708 if (!key)
1709 return -ENOMEM;
1710 list_add(&key->list, &hdev->link_keys);
1711 }
1712
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001713 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001714
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001715 /* Some buggy controller combinations generate a changed
1716 * combination key for legacy pairing even when there's no
1717 * previous key */
1718 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001719 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001720 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001721 if (conn)
1722 conn->key_type = type;
1723 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001724
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001725 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001726 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001727 key->pin_len = pin_len;
1728
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001729 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001730 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001731 else
1732 key->type = type;
1733
Johan Hedberg4df378a2011-04-28 11:29:03 -07001734 if (!new_key)
1735 return 0;
1736
1737 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1738
Johan Hedberg744cf192011-11-08 20:40:14 +02001739 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001740
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301741 if (conn)
1742 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001743
1744 return 0;
1745}
1746
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001747int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001748 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001749 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001750{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001751 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001752
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001753 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1754 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001755
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001756 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1757 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001758 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001759 else {
1760 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001761 if (!key)
1762 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001763 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001764 }
1765
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001766 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001767 key->bdaddr_type = addr_type;
1768 memcpy(key->val, tk, sizeof(key->val));
1769 key->authenticated = authenticated;
1770 key->ediv = ediv;
1771 key->enc_size = enc_size;
1772 key->type = type;
1773 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001775 if (!new_key)
1776 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001777
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001778 if (type & HCI_SMP_LTK)
1779 mgmt_new_ltk(hdev, key, 1);
1780
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001781 return 0;
1782}
1783
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001784int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1785{
1786 struct link_key *key;
1787
1788 key = hci_find_link_key(hdev, bdaddr);
1789 if (!key)
1790 return -ENOENT;
1791
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001792 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001793
1794 list_del(&key->list);
1795 kfree(key);
1796
1797 return 0;
1798}
1799
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001800int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1801{
1802 struct smp_ltk *k, *tmp;
1803
1804 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1805 if (bacmp(bdaddr, &k->bdaddr))
1806 continue;
1807
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001808 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001809
1810 list_del(&k->list);
1811 kfree(k);
1812 }
1813
1814 return 0;
1815}
1816
Ville Tervo6bd32322011-02-16 16:32:41 +02001817/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001818static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001819{
1820 struct hci_dev *hdev = (void *) arg;
1821
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001822 if (hdev->sent_cmd) {
1823 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1824 u16 opcode = __le16_to_cpu(sent->opcode);
1825
1826 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1827 } else {
1828 BT_ERR("%s command tx timeout", hdev->name);
1829 }
1830
Ville Tervo6bd32322011-02-16 16:32:41 +02001831 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001832 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001833}
1834
Szymon Janc2763eda2011-03-22 13:12:22 +01001835struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001836 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001837{
1838 struct oob_data *data;
1839
1840 list_for_each_entry(data, &hdev->remote_oob_data, list)
1841 if (bacmp(bdaddr, &data->bdaddr) == 0)
1842 return data;
1843
1844 return NULL;
1845}
1846
1847int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1848{
1849 struct oob_data *data;
1850
1851 data = hci_find_remote_oob_data(hdev, bdaddr);
1852 if (!data)
1853 return -ENOENT;
1854
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001855 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001856
1857 list_del(&data->list);
1858 kfree(data);
1859
1860 return 0;
1861}
1862
1863int hci_remote_oob_data_clear(struct hci_dev *hdev)
1864{
1865 struct oob_data *data, *n;
1866
1867 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1868 list_del(&data->list);
1869 kfree(data);
1870 }
1871
1872 return 0;
1873}
1874
1875int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001876 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001877{
1878 struct oob_data *data;
1879
1880 data = hci_find_remote_oob_data(hdev, bdaddr);
1881
1882 if (!data) {
1883 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1884 if (!data)
1885 return -ENOMEM;
1886
1887 bacpy(&data->bdaddr, bdaddr);
1888 list_add(&data->list, &hdev->remote_oob_data);
1889 }
1890
1891 memcpy(data->hash, hash, sizeof(data->hash));
1892 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1893
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001894 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001895
1896 return 0;
1897}
1898
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001899struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001900{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001901 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001902
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001903 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001904 if (bacmp(bdaddr, &b->bdaddr) == 0)
1905 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001906
1907 return NULL;
1908}
1909
1910int hci_blacklist_clear(struct hci_dev *hdev)
1911{
1912 struct list_head *p, *n;
1913
1914 list_for_each_safe(p, n, &hdev->blacklist) {
1915 struct bdaddr_list *b;
1916
1917 b = list_entry(p, struct bdaddr_list, list);
1918
1919 list_del(p);
1920 kfree(b);
1921 }
1922
1923 return 0;
1924}
1925
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001926int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001927{
1928 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001929
1930 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1931 return -EBADF;
1932
Antti Julku5e762442011-08-25 16:48:02 +03001933 if (hci_blacklist_lookup(hdev, bdaddr))
1934 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001935
1936 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001937 if (!entry)
1938 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001939
1940 bacpy(&entry->bdaddr, bdaddr);
1941
1942 list_add(&entry->list, &hdev->blacklist);
1943
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001944 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001945}
1946
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001947int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001948{
1949 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001950
Szymon Janc1ec918c2011-11-16 09:32:21 +01001951 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001952 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001953
1954 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001955 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001956 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001957
1958 list_del(&entry->list);
1959 kfree(entry);
1960
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001961 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001962}
1963
Johan Hedberg42c6b122013-03-05 20:37:49 +02001964static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001965{
1966 struct le_scan_params *param = (struct le_scan_params *) opt;
1967 struct hci_cp_le_set_scan_param cp;
1968
1969 memset(&cp, 0, sizeof(cp));
1970 cp.type = param->type;
1971 cp.interval = cpu_to_le16(param->interval);
1972 cp.window = cpu_to_le16(param->window);
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001975}
1976
Johan Hedberg42c6b122013-03-05 20:37:49 +02001977static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001978{
1979 struct hci_cp_le_set_scan_enable cp;
1980
1981 memset(&cp, 0, sizeof(cp));
1982 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001983 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001984
Johan Hedberg42c6b122013-03-05 20:37:49 +02001985 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001986}
1987
1988static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001989 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001990{
1991 long timeo = msecs_to_jiffies(3000);
1992 struct le_scan_params param;
1993 int err;
1994
1995 BT_DBG("%s", hdev->name);
1996
1997 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1998 return -EINPROGRESS;
1999
2000 param.type = type;
2001 param.interval = interval;
2002 param.window = window;
2003
2004 hci_req_lock(hdev);
2005
Johan Hedberg01178cd2013-03-05 20:37:41 +02002006 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2007 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002008 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002009 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002010
2011 hci_req_unlock(hdev);
2012
2013 if (err < 0)
2014 return err;
2015
Johan Hedberg46818ed2013-01-14 22:33:52 +02002016 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2017 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002018
2019 return 0;
2020}
2021
Andre Guedes7dbfac12012-03-15 16:52:07 -03002022int hci_cancel_le_scan(struct hci_dev *hdev)
2023{
2024 BT_DBG("%s", hdev->name);
2025
2026 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2027 return -EALREADY;
2028
2029 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2030 struct hci_cp_le_set_scan_enable cp;
2031
2032 /* Send HCI command to disable LE Scan */
2033 memset(&cp, 0, sizeof(cp));
2034 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2035 }
2036
2037 return 0;
2038}
2039
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002040static void le_scan_disable_work(struct work_struct *work)
2041{
2042 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002043 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002044 struct hci_cp_le_set_scan_enable cp;
2045
2046 BT_DBG("%s", hdev->name);
2047
2048 memset(&cp, 0, sizeof(cp));
2049
2050 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2051}
2052
Andre Guedes28b75a82012-02-03 17:48:00 -03002053static void le_scan_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2056 struct le_scan_params *param = &hdev->le_scan_params;
2057
2058 BT_DBG("%s", hdev->name);
2059
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002060 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2061 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002062}
2063
2064int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002065 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002066{
2067 struct le_scan_params *param = &hdev->le_scan_params;
2068
2069 BT_DBG("%s", hdev->name);
2070
Johan Hedbergf15504782012-10-24 21:12:03 +03002071 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2072 return -ENOTSUPP;
2073
Andre Guedes28b75a82012-02-03 17:48:00 -03002074 if (work_busy(&hdev->le_scan))
2075 return -EINPROGRESS;
2076
2077 param->type = type;
2078 param->interval = interval;
2079 param->window = window;
2080 param->timeout = timeout;
2081
2082 queue_work(system_long_wq, &hdev->le_scan);
2083
2084 return 0;
2085}
2086
David Herrmann9be0dab2012-04-22 14:39:57 +02002087/* Alloc HCI device */
2088struct hci_dev *hci_alloc_dev(void)
2089{
2090 struct hci_dev *hdev;
2091
2092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2093 if (!hdev)
2094 return NULL;
2095
David Herrmannb1b813d2012-04-22 14:39:58 +02002096 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2097 hdev->esco_type = (ESCO_HV1);
2098 hdev->link_mode = (HCI_LM_ACCEPT);
2099 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002100 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2101 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002102
David Herrmannb1b813d2012-04-22 14:39:58 +02002103 hdev->sniff_max_interval = 800;
2104 hdev->sniff_min_interval = 80;
2105
2106 mutex_init(&hdev->lock);
2107 mutex_init(&hdev->req_lock);
2108
2109 INIT_LIST_HEAD(&hdev->mgmt_pending);
2110 INIT_LIST_HEAD(&hdev->blacklist);
2111 INIT_LIST_HEAD(&hdev->uuids);
2112 INIT_LIST_HEAD(&hdev->link_keys);
2113 INIT_LIST_HEAD(&hdev->long_term_keys);
2114 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002115 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002116
2117 INIT_WORK(&hdev->rx_work, hci_rx_work);
2118 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2119 INIT_WORK(&hdev->tx_work, hci_tx_work);
2120 INIT_WORK(&hdev->power_on, hci_power_on);
2121 INIT_WORK(&hdev->le_scan, le_scan_work);
2122
David Herrmannb1b813d2012-04-22 14:39:58 +02002123 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2124 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2125 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2126
David Herrmann9be0dab2012-04-22 14:39:57 +02002127 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002128 skb_queue_head_init(&hdev->rx_q);
2129 skb_queue_head_init(&hdev->cmd_q);
2130 skb_queue_head_init(&hdev->raw_q);
2131
2132 init_waitqueue_head(&hdev->req_wait_q);
2133
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002134 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002135
David Herrmannb1b813d2012-04-22 14:39:58 +02002136 hci_init_sysfs(hdev);
2137 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002138
2139 return hdev;
2140}
2141EXPORT_SYMBOL(hci_alloc_dev);
2142
2143/* Free HCI device */
2144void hci_free_dev(struct hci_dev *hdev)
2145{
2146 skb_queue_purge(&hdev->driver_init);
2147
2148 /* will free via device release */
2149 put_device(&hdev->dev);
2150}
2151EXPORT_SYMBOL(hci_free_dev);
2152
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153/* Register HCI device */
2154int hci_register_dev(struct hci_dev *hdev)
2155{
David Herrmannb1b813d2012-04-22 14:39:58 +02002156 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
David Herrmann010666a2012-01-07 15:47:07 +01002158 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 return -EINVAL;
2160
Mat Martineau08add512011-11-02 16:18:36 -07002161 /* Do not allow HCI_AMP devices to register at index 0,
2162 * so the index can be used as the AMP controller ID.
2163 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002164 switch (hdev->dev_type) {
2165 case HCI_BREDR:
2166 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2167 break;
2168 case HCI_AMP:
2169 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2170 break;
2171 default:
2172 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002174
Sasha Levin3df92b32012-05-27 22:36:56 +02002175 if (id < 0)
2176 return id;
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 sprintf(hdev->name, "hci%d", id);
2179 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002180
2181 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2182
Sasha Levin3df92b32012-05-27 22:36:56 +02002183 write_lock(&hci_dev_list_lock);
2184 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002185 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002187 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002188 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002189 if (!hdev->workqueue) {
2190 error = -ENOMEM;
2191 goto err;
2192 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002193
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002194 hdev->req_workqueue = alloc_workqueue(hdev->name,
2195 WQ_HIGHPRI | WQ_UNBOUND |
2196 WQ_MEM_RECLAIM, 1);
2197 if (!hdev->req_workqueue) {
2198 destroy_workqueue(hdev->workqueue);
2199 error = -ENOMEM;
2200 goto err;
2201 }
2202
David Herrmann33ca9542011-10-08 14:58:49 +02002203 error = hci_add_sysfs(hdev);
2204 if (error < 0)
2205 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002207 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002208 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2209 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002210 if (hdev->rfkill) {
2211 if (rfkill_register(hdev->rfkill) < 0) {
2212 rfkill_destroy(hdev->rfkill);
2213 hdev->rfkill = NULL;
2214 }
2215 }
2216
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002217 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002218
2219 if (hdev->dev_type != HCI_AMP)
2220 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2221
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002223 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
Johan Hedberg19202572013-01-14 22:33:51 +02002225 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002226
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002228
David Herrmann33ca9542011-10-08 14:58:49 +02002229err_wqueue:
2230 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002231 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002232err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002233 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002234 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002235 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002236 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002237
David Herrmann33ca9542011-10-08 14:58:49 +02002238 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239}
2240EXPORT_SYMBOL(hci_register_dev);
2241
2242/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002243void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244{
Sasha Levin3df92b32012-05-27 22:36:56 +02002245 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002246
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002247 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Johan Hovold94324962012-03-15 14:48:41 +01002249 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2250
Sasha Levin3df92b32012-05-27 22:36:56 +02002251 id = hdev->id;
2252
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002253 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002255 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256
2257 hci_dev_do_close(hdev);
2258
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302259 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002260 kfree_skb(hdev->reassembly[i]);
2261
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002262 cancel_work_sync(&hdev->power_on);
2263
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002264 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002265 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002266 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002267 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002268 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002269 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002270
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002271 /* mgmt_index_removed should take care of emptying the
2272 * pending list */
2273 BUG_ON(!list_empty(&hdev->mgmt_pending));
2274
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275 hci_notify(hdev, HCI_DEV_UNREG);
2276
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002277 if (hdev->rfkill) {
2278 rfkill_unregister(hdev->rfkill);
2279 rfkill_destroy(hdev->rfkill);
2280 }
2281
David Herrmannce242972011-10-08 14:58:48 +02002282 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002283
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002284 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002285 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002286
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002287 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002288 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002289 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002290 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002291 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002292 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002293 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002294
David Herrmanndc946bd2012-01-07 15:47:24 +01002295 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002296
2297 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298}
2299EXPORT_SYMBOL(hci_unregister_dev);
2300
2301/* Suspend HCI device */
2302int hci_suspend_dev(struct hci_dev *hdev)
2303{
2304 hci_notify(hdev, HCI_DEV_SUSPEND);
2305 return 0;
2306}
2307EXPORT_SYMBOL(hci_suspend_dev);
2308
2309/* Resume HCI device */
2310int hci_resume_dev(struct hci_dev *hdev)
2311{
2312 hci_notify(hdev, HCI_DEV_RESUME);
2313 return 0;
2314}
2315EXPORT_SYMBOL(hci_resume_dev);
2316
Marcel Holtmann76bca882009-11-18 00:40:39 +01002317/* Receive frame from HCI drivers */
2318int hci_recv_frame(struct sk_buff *skb)
2319{
2320 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2321 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002322 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002323 kfree_skb(skb);
2324 return -ENXIO;
2325 }
2326
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002327 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002328 bt_cb(skb)->incoming = 1;
2329
2330 /* Time stamp */
2331 __net_timestamp(skb);
2332
Marcel Holtmann76bca882009-11-18 00:40:39 +01002333 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002334 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002335
Marcel Holtmann76bca882009-11-18 00:40:39 +01002336 return 0;
2337}
2338EXPORT_SYMBOL(hci_recv_frame);
2339
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302340static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002341 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302342{
2343 int len = 0;
2344 int hlen = 0;
2345 int remain = count;
2346 struct sk_buff *skb;
2347 struct bt_skb_cb *scb;
2348
2349 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002350 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302351 return -EILSEQ;
2352
2353 skb = hdev->reassembly[index];
2354
2355 if (!skb) {
2356 switch (type) {
2357 case HCI_ACLDATA_PKT:
2358 len = HCI_MAX_FRAME_SIZE;
2359 hlen = HCI_ACL_HDR_SIZE;
2360 break;
2361 case HCI_EVENT_PKT:
2362 len = HCI_MAX_EVENT_SIZE;
2363 hlen = HCI_EVENT_HDR_SIZE;
2364 break;
2365 case HCI_SCODATA_PKT:
2366 len = HCI_MAX_SCO_SIZE;
2367 hlen = HCI_SCO_HDR_SIZE;
2368 break;
2369 }
2370
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002371 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302372 if (!skb)
2373 return -ENOMEM;
2374
2375 scb = (void *) skb->cb;
2376 scb->expect = hlen;
2377 scb->pkt_type = type;
2378
2379 skb->dev = (void *) hdev;
2380 hdev->reassembly[index] = skb;
2381 }
2382
2383 while (count) {
2384 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002385 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302386
2387 memcpy(skb_put(skb, len), data, len);
2388
2389 count -= len;
2390 data += len;
2391 scb->expect -= len;
2392 remain = count;
2393
2394 switch (type) {
2395 case HCI_EVENT_PKT:
2396 if (skb->len == HCI_EVENT_HDR_SIZE) {
2397 struct hci_event_hdr *h = hci_event_hdr(skb);
2398 scb->expect = h->plen;
2399
2400 if (skb_tailroom(skb) < scb->expect) {
2401 kfree_skb(skb);
2402 hdev->reassembly[index] = NULL;
2403 return -ENOMEM;
2404 }
2405 }
2406 break;
2407
2408 case HCI_ACLDATA_PKT:
2409 if (skb->len == HCI_ACL_HDR_SIZE) {
2410 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2411 scb->expect = __le16_to_cpu(h->dlen);
2412
2413 if (skb_tailroom(skb) < scb->expect) {
2414 kfree_skb(skb);
2415 hdev->reassembly[index] = NULL;
2416 return -ENOMEM;
2417 }
2418 }
2419 break;
2420
2421 case HCI_SCODATA_PKT:
2422 if (skb->len == HCI_SCO_HDR_SIZE) {
2423 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2424 scb->expect = h->dlen;
2425
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2430 }
2431 }
2432 break;
2433 }
2434
2435 if (scb->expect == 0) {
2436 /* Complete frame */
2437
2438 bt_cb(skb)->pkt_type = type;
2439 hci_recv_frame(skb);
2440
2441 hdev->reassembly[index] = NULL;
2442 return remain;
2443 }
2444 }
2445
2446 return remain;
2447}
2448
Marcel Holtmannef222012007-07-11 06:42:04 +02002449int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2450{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302451 int rem = 0;
2452
Marcel Holtmannef222012007-07-11 06:42:04 +02002453 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2454 return -EILSEQ;
2455
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002456 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002457 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302458 if (rem < 0)
2459 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002460
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302461 data += (count - rem);
2462 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002463 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002464
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302465 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002466}
2467EXPORT_SYMBOL(hci_recv_fragment);
2468
Suraj Sumangala99811512010-07-14 13:02:19 +05302469#define STREAM_REASSEMBLY 0
2470
2471int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2472{
2473 int type;
2474 int rem = 0;
2475
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002476 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302477 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2478
2479 if (!skb) {
2480 struct { char type; } *pkt;
2481
2482 /* Start of the frame */
2483 pkt = data;
2484 type = pkt->type;
2485
2486 data++;
2487 count--;
2488 } else
2489 type = bt_cb(skb)->pkt_type;
2490
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002491 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002492 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302493 if (rem < 0)
2494 return rem;
2495
2496 data += (count - rem);
2497 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002498 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302499
2500 return rem;
2501}
2502EXPORT_SYMBOL(hci_recv_stream_fragment);
2503
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504/* ---- Interface to upper protocols ---- */
2505
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506int hci_register_cb(struct hci_cb *cb)
2507{
2508 BT_DBG("%p name %s", cb, cb->name);
2509
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002510 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002512 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513
2514 return 0;
2515}
2516EXPORT_SYMBOL(hci_register_cb);
2517
2518int hci_unregister_cb(struct hci_cb *cb)
2519{
2520 BT_DBG("%p name %s", cb, cb->name);
2521
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002522 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002524 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525
2526 return 0;
2527}
2528EXPORT_SYMBOL(hci_unregister_cb);
2529
2530static int hci_send_frame(struct sk_buff *skb)
2531{
2532 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2533
2534 if (!hdev) {
2535 kfree_skb(skb);
2536 return -ENODEV;
2537 }
2538
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002539 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002541 /* Time stamp */
2542 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002543
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002544 /* Send copy to monitor */
2545 hci_send_to_monitor(hdev, skb);
2546
2547 if (atomic_read(&hdev->promisc)) {
2548 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002549 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 }
2551
2552 /* Get rid of skb owner, prior to sending to the driver. */
2553 skb_orphan(skb);
2554
2555 return hdev->send(skb);
2556}
2557
Johan Hedberg3119ae92013-03-05 20:37:44 +02002558void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2559{
2560 skb_queue_head_init(&req->cmd_q);
2561 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002562 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002563}
2564
2565int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2566{
2567 struct hci_dev *hdev = req->hdev;
2568 struct sk_buff *skb;
2569 unsigned long flags;
2570
2571 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2572
Andre Guedes5d73e032013-03-08 11:20:16 -03002573 /* If an error occured during request building, remove all HCI
2574 * commands queued on the HCI request queue.
2575 */
2576 if (req->err) {
2577 skb_queue_purge(&req->cmd_q);
2578 return req->err;
2579 }
2580
Johan Hedberg3119ae92013-03-05 20:37:44 +02002581 /* Do not allow empty requests */
2582 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002583 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002584
2585 skb = skb_peek_tail(&req->cmd_q);
2586 bt_cb(skb)->req.complete = complete;
2587
2588 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2589 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2590 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2591
2592 queue_work(hdev->workqueue, &hdev->cmd_work);
2593
2594 return 0;
2595}
2596
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002597static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2598 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002599{
2600 int len = HCI_COMMAND_HDR_SIZE + plen;
2601 struct hci_command_hdr *hdr;
2602 struct sk_buff *skb;
2603
Linus Torvalds1da177e2005-04-16 15:20:36 -07002604 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002605 if (!skb)
2606 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002607
2608 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002609 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002610 hdr->plen = plen;
2611
2612 if (plen)
2613 memcpy(skb_put(skb, plen), param, plen);
2614
2615 BT_DBG("skb len %d", skb->len);
2616
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002617 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002618 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002619
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002620 return skb;
2621}
2622
2623/* Send HCI command */
2624int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2625{
2626 struct sk_buff *skb;
2627
2628 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2629
2630 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2631 if (!skb) {
2632 BT_ERR("%s no memory for command", hdev->name);
2633 return -ENOMEM;
2634 }
2635
Johan Hedberg11714b32013-03-05 20:37:47 +02002636 /* Stand-alone HCI commands must be flaged as
2637 * single-command requests.
2638 */
2639 bt_cb(skb)->req.start = true;
2640
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002642 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002643
2644 return 0;
2645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646
Johan Hedberg71c76a12013-03-05 20:37:46 +02002647/* Queue a command to an asynchronous HCI request */
Andre Guedese348fe62013-03-08 11:20:17 -03002648void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002649{
2650 struct hci_dev *hdev = req->hdev;
2651 struct sk_buff *skb;
2652
2653 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2654
Andre Guedes34739c12013-03-08 11:20:18 -03002655 /* If an error occured during request building, there is no point in
2656 * queueing the HCI command. We can simply return.
2657 */
2658 if (req->err)
2659 return;
2660
Johan Hedberg71c76a12013-03-05 20:37:46 +02002661 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2662 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002663 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2664 hdev->name, opcode);
2665 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002666 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002667 }
2668
2669 if (skb_queue_empty(&req->cmd_q))
2670 bt_cb(skb)->req.start = true;
2671
2672 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002673}
2674
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002676void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
2678 struct hci_command_hdr *hdr;
2679
2680 if (!hdev->sent_cmd)
2681 return NULL;
2682
2683 hdr = (void *) hdev->sent_cmd->data;
2684
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002685 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002686 return NULL;
2687
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002688 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2691}
2692
2693/* Send ACL data */
2694static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2695{
2696 struct hci_acl_hdr *hdr;
2697 int len = skb->len;
2698
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002699 skb_push(skb, HCI_ACL_HDR_SIZE);
2700 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002701 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002702 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2703 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704}
2705
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002706static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002707 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002709 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 struct hci_dev *hdev = conn->hdev;
2711 struct sk_buff *list;
2712
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002713 skb->len = skb_headlen(skb);
2714 skb->data_len = 0;
2715
2716 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002717
2718 switch (hdev->dev_type) {
2719 case HCI_BREDR:
2720 hci_add_acl_hdr(skb, conn->handle, flags);
2721 break;
2722 case HCI_AMP:
2723 hci_add_acl_hdr(skb, chan->handle, flags);
2724 break;
2725 default:
2726 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2727 return;
2728 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002730 list = skb_shinfo(skb)->frag_list;
2731 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732 /* Non fragmented */
2733 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2734
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002735 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 } else {
2737 /* Fragmented */
2738 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2739
2740 skb_shinfo(skb)->frag_list = NULL;
2741
2742 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002743 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002745 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002746
2747 flags &= ~ACL_START;
2748 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 do {
2750 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002753 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002754 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2757
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002758 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 } while (list);
2760
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002761 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002763}
2764
2765void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2766{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002767 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002768
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002769 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002770
2771 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002772
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002773 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002775 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777
2778/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002779void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780{
2781 struct hci_dev *hdev = conn->hdev;
2782 struct hci_sco_hdr hdr;
2783
2784 BT_DBG("%s len %d", hdev->name, skb->len);
2785
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002786 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 hdr.dlen = skb->len;
2788
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002789 skb_push(skb, HCI_SCO_HDR_SIZE);
2790 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002791 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792
2793 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002794 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002795
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002797 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800/* ---- HCI TX task (outgoing data) ---- */
2801
2802/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002803static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2804 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805{
2806 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002807 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002808 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002810 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002812
2813 rcu_read_lock();
2814
2815 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002816 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002818
2819 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2820 continue;
2821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 num++;
2823
2824 if (c->sent < min) {
2825 min = c->sent;
2826 conn = c;
2827 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002828
2829 if (hci_conn_num(hdev, type) == num)
2830 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 }
2832
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002833 rcu_read_unlock();
2834
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002836 int cnt, q;
2837
2838 switch (conn->type) {
2839 case ACL_LINK:
2840 cnt = hdev->acl_cnt;
2841 break;
2842 case SCO_LINK:
2843 case ESCO_LINK:
2844 cnt = hdev->sco_cnt;
2845 break;
2846 case LE_LINK:
2847 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2848 break;
2849 default:
2850 cnt = 0;
2851 BT_ERR("Unknown link type");
2852 }
2853
2854 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 *quote = q ? q : 1;
2856 } else
2857 *quote = 0;
2858
2859 BT_DBG("conn %p quote %d", conn, *quote);
2860 return conn;
2861}
2862
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002863static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864{
2865 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002866 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867
Ville Tervobae1f5d92011-02-10 22:38:53 -03002868 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002870 rcu_read_lock();
2871
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002873 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002874 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002875 BT_ERR("%s killing stalled connection %pMR",
2876 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002877 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 }
2879 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002880
2881 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882}
2883
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002884static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2885 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002886{
2887 struct hci_conn_hash *h = &hdev->conn_hash;
2888 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002889 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002890 struct hci_conn *conn;
2891 int cnt, q, conn_num = 0;
2892
2893 BT_DBG("%s", hdev->name);
2894
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002895 rcu_read_lock();
2896
2897 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002898 struct hci_chan *tmp;
2899
2900 if (conn->type != type)
2901 continue;
2902
2903 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2904 continue;
2905
2906 conn_num++;
2907
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002908 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002909 struct sk_buff *skb;
2910
2911 if (skb_queue_empty(&tmp->data_q))
2912 continue;
2913
2914 skb = skb_peek(&tmp->data_q);
2915 if (skb->priority < cur_prio)
2916 continue;
2917
2918 if (skb->priority > cur_prio) {
2919 num = 0;
2920 min = ~0;
2921 cur_prio = skb->priority;
2922 }
2923
2924 num++;
2925
2926 if (conn->sent < min) {
2927 min = conn->sent;
2928 chan = tmp;
2929 }
2930 }
2931
2932 if (hci_conn_num(hdev, type) == conn_num)
2933 break;
2934 }
2935
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002936 rcu_read_unlock();
2937
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002938 if (!chan)
2939 return NULL;
2940
2941 switch (chan->conn->type) {
2942 case ACL_LINK:
2943 cnt = hdev->acl_cnt;
2944 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002945 case AMP_LINK:
2946 cnt = hdev->block_cnt;
2947 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002948 case SCO_LINK:
2949 case ESCO_LINK:
2950 cnt = hdev->sco_cnt;
2951 break;
2952 case LE_LINK:
2953 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2954 break;
2955 default:
2956 cnt = 0;
2957 BT_ERR("Unknown link type");
2958 }
2959
2960 q = cnt / num;
2961 *quote = q ? q : 1;
2962 BT_DBG("chan %p quote %d", chan, *quote);
2963 return chan;
2964}
2965
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002966static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2967{
2968 struct hci_conn_hash *h = &hdev->conn_hash;
2969 struct hci_conn *conn;
2970 int num = 0;
2971
2972 BT_DBG("%s", hdev->name);
2973
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002974 rcu_read_lock();
2975
2976 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002977 struct hci_chan *chan;
2978
2979 if (conn->type != type)
2980 continue;
2981
2982 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2983 continue;
2984
2985 num++;
2986
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002987 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002988 struct sk_buff *skb;
2989
2990 if (chan->sent) {
2991 chan->sent = 0;
2992 continue;
2993 }
2994
2995 if (skb_queue_empty(&chan->data_q))
2996 continue;
2997
2998 skb = skb_peek(&chan->data_q);
2999 if (skb->priority >= HCI_PRIO_MAX - 1)
3000 continue;
3001
3002 skb->priority = HCI_PRIO_MAX - 1;
3003
3004 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003005 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003006 }
3007
3008 if (hci_conn_num(hdev, type) == num)
3009 break;
3010 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003011
3012 rcu_read_unlock();
3013
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003014}
3015
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003016static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3017{
3018 /* Calculate count of blocks used by this packet */
3019 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3020}
3021
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003022static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003023{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003024 if (!test_bit(HCI_RAW, &hdev->flags)) {
3025 /* ACL tx timeout must be longer than maximum
3026 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003027 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003028 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003029 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003031}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003032
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003033static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003034{
3035 unsigned int cnt = hdev->acl_cnt;
3036 struct hci_chan *chan;
3037 struct sk_buff *skb;
3038 int quote;
3039
3040 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003041
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003042 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003043 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003044 u32 priority = (skb_peek(&chan->data_q))->priority;
3045 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003046 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003047 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003048
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003049 /* Stop if priority has changed */
3050 if (skb->priority < priority)
3051 break;
3052
3053 skb = skb_dequeue(&chan->data_q);
3054
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003055 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003056 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003057
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 hci_send_frame(skb);
3059 hdev->acl_last_tx = jiffies;
3060
3061 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003062 chan->sent++;
3063 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003064 }
3065 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003066
3067 if (cnt != hdev->acl_cnt)
3068 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003069}
3070
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003071static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003072{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003073 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003074 struct hci_chan *chan;
3075 struct sk_buff *skb;
3076 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003077 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003078
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003079 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003080
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003081 BT_DBG("%s", hdev->name);
3082
3083 if (hdev->dev_type == HCI_AMP)
3084 type = AMP_LINK;
3085 else
3086 type = ACL_LINK;
3087
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003088 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003089 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003090 u32 priority = (skb_peek(&chan->data_q))->priority;
3091 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3092 int blocks;
3093
3094 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003095 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003096
3097 /* Stop if priority has changed */
3098 if (skb->priority < priority)
3099 break;
3100
3101 skb = skb_dequeue(&chan->data_q);
3102
3103 blocks = __get_blocks(hdev, skb);
3104 if (blocks > hdev->block_cnt)
3105 return;
3106
3107 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003108 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003109
3110 hci_send_frame(skb);
3111 hdev->acl_last_tx = jiffies;
3112
3113 hdev->block_cnt -= blocks;
3114 quote -= blocks;
3115
3116 chan->sent += blocks;
3117 chan->conn->sent += blocks;
3118 }
3119 }
3120
3121 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003122 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003123}
3124
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003125static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003126{
3127 BT_DBG("%s", hdev->name);
3128
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003129 /* No ACL link over BR/EDR controller */
3130 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3131 return;
3132
3133 /* No AMP link over AMP controller */
3134 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003135 return;
3136
3137 switch (hdev->flow_ctl_mode) {
3138 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3139 hci_sched_acl_pkt(hdev);
3140 break;
3141
3142 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3143 hci_sched_acl_blk(hdev);
3144 break;
3145 }
3146}
3147
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003149static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150{
3151 struct hci_conn *conn;
3152 struct sk_buff *skb;
3153 int quote;
3154
3155 BT_DBG("%s", hdev->name);
3156
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003157 if (!hci_conn_num(hdev, SCO_LINK))
3158 return;
3159
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3161 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3162 BT_DBG("skb %p len %d", skb, skb->len);
3163 hci_send_frame(skb);
3164
3165 conn->sent++;
3166 if (conn->sent == ~0)
3167 conn->sent = 0;
3168 }
3169 }
3170}
3171
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003172static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003173{
3174 struct hci_conn *conn;
3175 struct sk_buff *skb;
3176 int quote;
3177
3178 BT_DBG("%s", hdev->name);
3179
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003180 if (!hci_conn_num(hdev, ESCO_LINK))
3181 return;
3182
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003183 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3184 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003185 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3186 BT_DBG("skb %p len %d", skb, skb->len);
3187 hci_send_frame(skb);
3188
3189 conn->sent++;
3190 if (conn->sent == ~0)
3191 conn->sent = 0;
3192 }
3193 }
3194}
3195
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003196static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003197{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003198 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003199 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003200 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003201
3202 BT_DBG("%s", hdev->name);
3203
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003204 if (!hci_conn_num(hdev, LE_LINK))
3205 return;
3206
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003207 if (!test_bit(HCI_RAW, &hdev->flags)) {
3208 /* LE tx timeout must be longer than maximum
3209 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003210 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003211 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003212 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003213 }
3214
3215 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003216 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003217 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003218 u32 priority = (skb_peek(&chan->data_q))->priority;
3219 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003220 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003221 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003222
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003223 /* Stop if priority has changed */
3224 if (skb->priority < priority)
3225 break;
3226
3227 skb = skb_dequeue(&chan->data_q);
3228
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003229 hci_send_frame(skb);
3230 hdev->le_last_tx = jiffies;
3231
3232 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003233 chan->sent++;
3234 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003235 }
3236 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003238 if (hdev->le_pkts)
3239 hdev->le_cnt = cnt;
3240 else
3241 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003242
3243 if (cnt != tmp)
3244 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003245}
3246
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003247static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003248{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003249 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003250 struct sk_buff *skb;
3251
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003252 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003253 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003254
3255 /* Schedule queues and send stuff to HCI driver */
3256
3257 hci_sched_acl(hdev);
3258
3259 hci_sched_sco(hdev);
3260
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003261 hci_sched_esco(hdev);
3262
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003263 hci_sched_le(hdev);
3264
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265 /* Send next queued raw (unknown type) packet */
3266 while ((skb = skb_dequeue(&hdev->raw_q)))
3267 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268}
3269
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003270/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
3272/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003273static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
3275 struct hci_acl_hdr *hdr = (void *) skb->data;
3276 struct hci_conn *conn;
3277 __u16 handle, flags;
3278
3279 skb_pull(skb, HCI_ACL_HDR_SIZE);
3280
3281 handle = __le16_to_cpu(hdr->handle);
3282 flags = hci_flags(handle);
3283 handle = hci_handle(handle);
3284
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003285 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003286 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
3288 hdev->stat.acl_rx++;
3289
3290 hci_dev_lock(hdev);
3291 conn = hci_conn_hash_lookup_handle(hdev, handle);
3292 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003293
Linus Torvalds1da177e2005-04-16 15:20:36 -07003294 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003295 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003296
Linus Torvalds1da177e2005-04-16 15:20:36 -07003297 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003298 l2cap_recv_acldata(conn, skb, flags);
3299 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003300 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003301 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003302 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003303 }
3304
3305 kfree_skb(skb);
3306}
3307
3308/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003309static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003310{
3311 struct hci_sco_hdr *hdr = (void *) skb->data;
3312 struct hci_conn *conn;
3313 __u16 handle;
3314
3315 skb_pull(skb, HCI_SCO_HDR_SIZE);
3316
3317 handle = __le16_to_cpu(hdr->handle);
3318
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003319 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320
3321 hdev->stat.sco_rx++;
3322
3323 hci_dev_lock(hdev);
3324 conn = hci_conn_hash_lookup_handle(hdev, handle);
3325 hci_dev_unlock(hdev);
3326
3327 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003329 sco_recv_scodata(conn, skb);
3330 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003332 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003333 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003334 }
3335
3336 kfree_skb(skb);
3337}
3338
Johan Hedberg9238f362013-03-05 20:37:48 +02003339static bool hci_req_is_complete(struct hci_dev *hdev)
3340{
3341 struct sk_buff *skb;
3342
3343 skb = skb_peek(&hdev->cmd_q);
3344 if (!skb)
3345 return true;
3346
3347 return bt_cb(skb)->req.start;
3348}
3349
Johan Hedberg42c6b122013-03-05 20:37:49 +02003350static void hci_resend_last(struct hci_dev *hdev)
3351{
3352 struct hci_command_hdr *sent;
3353 struct sk_buff *skb;
3354 u16 opcode;
3355
3356 if (!hdev->sent_cmd)
3357 return;
3358
3359 sent = (void *) hdev->sent_cmd->data;
3360 opcode = __le16_to_cpu(sent->opcode);
3361 if (opcode == HCI_OP_RESET)
3362 return;
3363
3364 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3365 if (!skb)
3366 return;
3367
3368 skb_queue_head(&hdev->cmd_q, skb);
3369 queue_work(hdev->workqueue, &hdev->cmd_work);
3370}
3371
Johan Hedberg9238f362013-03-05 20:37:48 +02003372void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3373{
3374 hci_req_complete_t req_complete = NULL;
3375 struct sk_buff *skb;
3376 unsigned long flags;
3377
3378 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3379
Johan Hedberg42c6b122013-03-05 20:37:49 +02003380 /* If the completed command doesn't match the last one that was
3381 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003382 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003383 if (!hci_sent_cmd_data(hdev, opcode)) {
3384 /* Some CSR based controllers generate a spontaneous
3385 * reset complete event during init and any pending
3386 * command will never be completed. In such a case we
3387 * need to resend whatever was the last sent
3388 * command.
3389 */
3390 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3391 hci_resend_last(hdev);
3392
Johan Hedberg9238f362013-03-05 20:37:48 +02003393 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003394 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003395
3396 /* If the command succeeded and there's still more commands in
3397 * this request the request is not yet complete.
3398 */
3399 if (!status && !hci_req_is_complete(hdev))
3400 return;
3401
3402 /* If this was the last command in a request the complete
3403 * callback would be found in hdev->sent_cmd instead of the
3404 * command queue (hdev->cmd_q).
3405 */
3406 if (hdev->sent_cmd) {
3407 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3408 if (req_complete)
3409 goto call_complete;
3410 }
3411
3412 /* Remove all pending commands belonging to this request */
3413 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3414 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3415 if (bt_cb(skb)->req.start) {
3416 __skb_queue_head(&hdev->cmd_q, skb);
3417 break;
3418 }
3419
3420 req_complete = bt_cb(skb)->req.complete;
3421 kfree_skb(skb);
3422 }
3423 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3424
3425call_complete:
3426 if (req_complete)
3427 req_complete(hdev, status);
3428}
3429
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003430static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003431{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003432 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003433 struct sk_buff *skb;
3434
3435 BT_DBG("%s", hdev->name);
3436
Linus Torvalds1da177e2005-04-16 15:20:36 -07003437 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003438 /* Send copy to monitor */
3439 hci_send_to_monitor(hdev, skb);
3440
Linus Torvalds1da177e2005-04-16 15:20:36 -07003441 if (atomic_read(&hdev->promisc)) {
3442 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003443 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003444 }
3445
3446 if (test_bit(HCI_RAW, &hdev->flags)) {
3447 kfree_skb(skb);
3448 continue;
3449 }
3450
3451 if (test_bit(HCI_INIT, &hdev->flags)) {
3452 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003453 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 case HCI_ACLDATA_PKT:
3455 case HCI_SCODATA_PKT:
3456 kfree_skb(skb);
3457 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003458 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003459 }
3460
3461 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003462 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003463 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003464 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 hci_event_packet(hdev, skb);
3466 break;
3467
3468 case HCI_ACLDATA_PKT:
3469 BT_DBG("%s ACL data packet", hdev->name);
3470 hci_acldata_packet(hdev, skb);
3471 break;
3472
3473 case HCI_SCODATA_PKT:
3474 BT_DBG("%s SCO data packet", hdev->name);
3475 hci_scodata_packet(hdev, skb);
3476 break;
3477
3478 default:
3479 kfree_skb(skb);
3480 break;
3481 }
3482 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003483}
3484
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003485static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003487 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003488 struct sk_buff *skb;
3489
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003490 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3491 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492
Linus Torvalds1da177e2005-04-16 15:20:36 -07003493 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003494 if (atomic_read(&hdev->cmd_cnt)) {
3495 skb = skb_dequeue(&hdev->cmd_q);
3496 if (!skb)
3497 return;
3498
Wei Yongjun7585b972009-02-25 18:29:52 +08003499 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003501 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3502 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503 atomic_dec(&hdev->cmd_cnt);
3504 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003505 if (test_bit(HCI_RESET, &hdev->flags))
3506 del_timer(&hdev->cmd_timer);
3507 else
3508 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003509 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 } else {
3511 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003512 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513 }
3514 }
3515}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003516
3517int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3518{
3519 /* General inquiry access code (GIAC) */
3520 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3521 struct hci_cp_inquiry cp;
3522
3523 BT_DBG("%s", hdev->name);
3524
3525 if (test_bit(HCI_INQUIRY, &hdev->flags))
3526 return -EINPROGRESS;
3527
Johan Hedberg46632622012-01-02 16:06:08 +02003528 inquiry_cache_flush(hdev);
3529
Andre Guedes2519a1f2011-11-07 11:45:24 -03003530 memset(&cp, 0, sizeof(cp));
3531 memcpy(&cp.lap, lap, sizeof(cp.lap));
3532 cp.length = length;
3533
3534 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3535}
Andre Guedes023d50492011-11-04 14:16:52 -03003536
3537int hci_cancel_inquiry(struct hci_dev *hdev)
3538{
3539 BT_DBG("%s", hdev->name);
3540
3541 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003542 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003543
3544 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3545}
Andre Guedes31f79562012-04-24 21:02:53 -03003546
3547u8 bdaddr_to_le(u8 bdaddr_type)
3548{
3549 switch (bdaddr_type) {
3550 case BDADDR_LE_PUBLIC:
3551 return ADDR_LE_DEV_PUBLIC;
3552
3553 default:
3554 /* Fallback to LE Random address type */
3555 return ADDR_LE_DEV_RANDOM;
3556 }
3557}