blob: b04ac355f1186d2976e6ace5e54ccf7d244e37cf [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Johan Hedberg7b1abbb2013-04-03 21:54:47 +030082struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030083{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
Johan Hedberg75e84b72013-04-02 13:35:04 +0300112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300137 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300147 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300191 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200198static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199 void (*func)(struct hci_request *req,
200 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200201 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_init(&req, hdev);
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 hdev->req_status = HCI_REQ_PEND;
212
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200214
Johan Hedberg42c6b122013-03-05 20:37:49 +0200215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200217 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223 */
Andre Guedes920c8302013-03-08 11:20:15 -0300224 if (err == -ENODATA)
225 return 0;
226
227 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200228 }
229
Andre Guedesbc4445c2013-03-08 11:20:13 -0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700242 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Johan Hedberga5040ef2011-01-10 13:28:59 +0200254 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
Johan Hedberg01178cd2013-03-05 20:37:41 +0200261static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 void (*req)(struct hci_request *req,
263 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200264 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 int ret;
267
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* Serialize all requests */
272 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200273 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200281 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200295 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200297
298 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200305
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200306 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300308
309 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300311
312 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200314}
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200317{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200319
320 BT_DBG("%s %ld", hdev->name, opt);
321
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300325
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200326 switch (hdev->dev_type) {
327 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200329 break;
330
331 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339}
340
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200342{
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376}
377
Johan Hedberg42c6b122013-03-05 20:37:49 +0200378static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379{
Johan Hedbergc73eee92013-04-19 18:35:21 +0300380 struct hci_dev *hdev = req->hdev;
381
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200383 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200384
385 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200386 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200387
388 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200390
391 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200392 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200393
394 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200395 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedbergc73eee92013-04-19 18:35:21 +0300396
397 /* LE-only controllers have LE implicitly enabled */
398 if (!lmp_bredr_capable(hdev))
399 set_bit(HCI_LE_ENABLED, &hdev->dev_flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200400}
401
402static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
403{
404 if (lmp_ext_inq_capable(hdev))
405 return 0x02;
406
407 if (lmp_inq_rssi_capable(hdev))
408 return 0x01;
409
410 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
411 hdev->lmp_subver == 0x0757)
412 return 0x01;
413
414 if (hdev->manufacturer == 15) {
415 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
416 return 0x01;
417 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
418 return 0x01;
419 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
420 return 0x01;
421 }
422
423 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
424 hdev->lmp_subver == 0x1805)
425 return 0x01;
426
427 return 0x00;
428}
429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431{
432 u8 mode;
433
Johan Hedberg42c6b122013-03-05 20:37:49 +0200434 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200435
Johan Hedberg42c6b122013-03-05 20:37:49 +0200436 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437}
438
Johan Hedberg42c6b122013-03-05 20:37:49 +0200439static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200440{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 struct hci_dev *hdev = req->hdev;
442
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443 /* The second byte is 0xff instead of 0x9f (two reserved bits
444 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
445 * command otherwise.
446 */
447 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
448
449 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
450 * any event mask for pre 1.2 devices.
451 */
452 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
453 return;
454
455 if (lmp_bredr_capable(hdev)) {
456 events[4] |= 0x01; /* Flow Specification Complete */
457 events[4] |= 0x02; /* Inquiry Result with RSSI */
458 events[4] |= 0x04; /* Read Remote Extended Features Complete */
459 events[5] |= 0x08; /* Synchronous Connection Complete */
460 events[5] |= 0x10; /* Synchronous Connection Changed */
461 }
462
463 if (lmp_inq_rssi_capable(hdev))
464 events[4] |= 0x02; /* Inquiry Result with RSSI */
465
466 if (lmp_sniffsubr_capable(hdev))
467 events[5] |= 0x20; /* Sniff Subrating */
468
469 if (lmp_pause_enc_capable(hdev))
470 events[5] |= 0x80; /* Encryption Key Refresh Complete */
471
472 if (lmp_ext_inq_capable(hdev))
473 events[5] |= 0x40; /* Extended Inquiry Result */
474
475 if (lmp_no_flush_capable(hdev))
476 events[7] |= 0x01; /* Enhanced Flush Complete */
477
478 if (lmp_lsto_capable(hdev))
479 events[6] |= 0x80; /* Link Supervision Timeout Changed */
480
481 if (lmp_ssp_capable(hdev)) {
482 events[6] |= 0x01; /* IO Capability Request */
483 events[6] |= 0x02; /* IO Capability Response */
484 events[6] |= 0x04; /* User Confirmation Request */
485 events[6] |= 0x08; /* User Passkey Request */
486 events[6] |= 0x10; /* Remote OOB Data Request */
487 events[6] |= 0x20; /* Simple Pairing Complete */
488 events[7] |= 0x04; /* User Passkey Notification */
489 events[7] |= 0x08; /* Keypress Notification */
490 events[7] |= 0x10; /* Remote Host Supported
491 * Features Notification
492 */
493 }
494
495 if (lmp_le_capable(hdev))
496 events[7] |= 0x20; /* LE Meta-Event */
497
Johan Hedberg42c6b122013-03-05 20:37:49 +0200498 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499
500 if (lmp_le_capable(hdev)) {
501 memset(events, 0, sizeof(events));
502 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200503 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
504 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505 }
506}
507
Johan Hedberg42c6b122013-03-05 20:37:49 +0200508static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200509{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 struct hci_dev *hdev = req->hdev;
511
Johan Hedberg2177bab2013-03-05 20:37:43 +0200512 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200513 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200514
515 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200516 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200517
Johan Hedberg42c6b122013-03-05 20:37:49 +0200518 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519
520 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522
523 if (lmp_ssp_capable(hdev)) {
524 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
525 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200526 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
527 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200528 } else {
529 struct hci_cp_write_eir cp;
530
531 memset(hdev->eir, 0, sizeof(hdev->eir));
532 memset(&cp, 0, sizeof(cp));
533
Johan Hedberg42c6b122013-03-05 20:37:49 +0200534 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200535 }
536 }
537
538 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200540
541 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200543
544 if (lmp_ext_feat_capable(hdev)) {
545 struct hci_cp_read_local_ext_features cp;
546
547 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
549 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200550 }
551
552 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
553 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200554 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
555 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556 }
557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200560{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200561 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200562 struct hci_cp_write_def_link_policy cp;
563 u16 link_policy = 0;
564
565 if (lmp_rswitch_capable(hdev))
566 link_policy |= HCI_LP_RSWITCH;
567 if (lmp_hold_capable(hdev))
568 link_policy |= HCI_LP_HOLD;
569 if (lmp_sniff_capable(hdev))
570 link_policy |= HCI_LP_SNIFF;
571 if (lmp_park_capable(hdev))
572 link_policy |= HCI_LP_PARK;
573
574 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200575 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200576}
577
Johan Hedberg42c6b122013-03-05 20:37:49 +0200578static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200579{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200580 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200581 struct hci_cp_write_le_host_supported cp;
582
Johan Hedbergc73eee92013-04-19 18:35:21 +0300583 /* LE-only devices do not support explicit enablement */
584 if (!lmp_bredr_capable(hdev))
585 return;
586
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587 memset(&cp, 0, sizeof(cp));
588
589 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
590 cp.le = 0x01;
591 cp.simul = lmp_le_br_capable(hdev);
592 }
593
594 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
596 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200597}
598
Johan Hedberg42c6b122013-03-05 20:37:49 +0200599static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200600{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200601 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300602 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200603
Johan Hedberg2177bab2013-03-05 20:37:43 +0200604 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200606
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500607 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200608 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500609 hci_update_ad(req);
610 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300611
612 /* Read features beyond page 1 if available */
613 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
614 struct hci_cp_read_local_ext_features cp;
615
616 cp.page = p;
617 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
618 sizeof(cp), &cp);
619 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620}
621
622static int __hci_init(struct hci_dev *hdev)
623{
624 int err;
625
626 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
627 if (err < 0)
628 return err;
629
630 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
631 * BR/EDR/LE type controllers. AMP controllers only need the
632 * first stage init.
633 */
634 if (hdev->dev_type != HCI_BREDR)
635 return 0;
636
637 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
638 if (err < 0)
639 return err;
640
641 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
642}
643
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 __u8 scan = opt;
647
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 __u8 auth = opt;
657
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
660 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 __u8 encrypt = opt;
667
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200670 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200675{
676 __le16 policy = cpu_to_le16(opt);
677
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200679
680 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200681 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200682}
683
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900684/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 * Device is held on return. */
686struct hci_dev *hci_dev_get(int index)
687{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200688 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 BT_DBG("%d", index);
691
692 if (index < 0)
693 return NULL;
694
695 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200696 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (d->id == index) {
698 hdev = hci_dev_hold(d);
699 break;
700 }
701 }
702 read_unlock(&hci_dev_list_lock);
703 return hdev;
704}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200707
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200708bool hci_discovery_active(struct hci_dev *hdev)
709{
710 struct discovery_state *discov = &hdev->discovery;
711
Andre Guedes6fbe1952012-02-03 17:47:58 -0300712 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300713 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300714 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200715 return true;
716
Andre Guedes6fbe1952012-02-03 17:47:58 -0300717 default:
718 return false;
719 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200720}
721
Johan Hedbergff9ef572012-01-04 14:23:45 +0200722void hci_discovery_set_state(struct hci_dev *hdev, int state)
723{
724 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
725
726 if (hdev->discovery.state == state)
727 return;
728
729 switch (state) {
730 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300731 if (hdev->discovery.state != DISCOVERY_STARTING)
732 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200733 break;
734 case DISCOVERY_STARTING:
735 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300736 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200737 mgmt_discovering(hdev, 1);
738 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200739 case DISCOVERY_RESOLVING:
740 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200741 case DISCOVERY_STOPPING:
742 break;
743 }
744
745 hdev->discovery.state = state;
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748static void inquiry_cache_flush(struct hci_dev *hdev)
749{
Johan Hedberg30883512012-01-04 14:16:21 +0200750 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200751 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Johan Hedberg561aafb2012-01-04 13:31:59 +0200753 list_for_each_entry_safe(p, n, &cache->all, all) {
754 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200755 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200757
758 INIT_LIST_HEAD(&cache->unknown);
759 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300762struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
763 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764{
Johan Hedberg30883512012-01-04 14:16:21 +0200765 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 struct inquiry_entry *e;
767
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300768 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Johan Hedberg561aafb2012-01-04 13:31:59 +0200770 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200772 return e;
773 }
774
775 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776}
777
Johan Hedberg561aafb2012-01-04 13:31:59 +0200778struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300779 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200780{
Johan Hedberg30883512012-01-04 14:16:21 +0200781 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 struct inquiry_entry *e;
783
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300784 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200785
786 list_for_each_entry(e, &cache->unknown, list) {
787 if (!bacmp(&e->data.bdaddr, bdaddr))
788 return e;
789 }
790
791 return NULL;
792}
793
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300795 bdaddr_t *bdaddr,
796 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200797{
798 struct discovery_state *cache = &hdev->discovery;
799 struct inquiry_entry *e;
800
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300801 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200802
803 list_for_each_entry(e, &cache->resolve, list) {
804 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
805 return e;
806 if (!bacmp(&e->data.bdaddr, bdaddr))
807 return e;
808 }
809
810 return NULL;
811}
812
Johan Hedberga3d4e202012-01-09 00:53:02 +0200813void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300814 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200815{
816 struct discovery_state *cache = &hdev->discovery;
817 struct list_head *pos = &cache->resolve;
818 struct inquiry_entry *p;
819
820 list_del(&ie->list);
821
822 list_for_each_entry(p, &cache->resolve, list) {
823 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300824 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200825 break;
826 pos = &p->list;
827 }
828
829 list_add(&ie->list, pos);
830}
831
Johan Hedberg31754052012-01-04 13:39:52 +0200832bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300833 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
Johan Hedberg30883512012-01-04 14:16:21 +0200835 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200836 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300838 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Szymon Janc2b2fec42012-11-20 11:38:54 +0100840 hci_remove_remote_oob_data(hdev, &data->bdaddr);
841
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200842 if (ssp)
843 *ssp = data->ssp_mode;
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200846 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200847 if (ie->data.ssp_mode && ssp)
848 *ssp = true;
849
Johan Hedberga3d4e202012-01-09 00:53:02 +0200850 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300851 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200852 ie->data.rssi = data->rssi;
853 hci_inquiry_cache_update_resolve(hdev, ie);
854 }
855
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200857 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859 /* Entry not in the cache. Add new one. */
860 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
861 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200862 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200863
864 list_add(&ie->all, &cache->all);
865
866 if (name_known) {
867 ie->name_state = NAME_KNOWN;
868 } else {
869 ie->name_state = NAME_NOT_KNOWN;
870 list_add(&ie->list, &cache->unknown);
871 }
872
873update:
874 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300875 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876 ie->name_state = NAME_KNOWN;
877 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200880 memcpy(&ie->data, data, sizeof(*data));
881 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200883
884 if (ie->name_state == NAME_NOT_KNOWN)
885 return false;
886
887 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
890static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
891{
Johan Hedberg30883512012-01-04 14:16:21 +0200892 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 struct inquiry_info *info = (struct inquiry_info *) buf;
894 struct inquiry_entry *e;
895 int copied = 0;
896
Johan Hedberg561aafb2012-01-04 13:31:59 +0200897 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200899
900 if (copied >= num)
901 break;
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 bacpy(&info->bdaddr, &data->bdaddr);
904 info->pscan_rep_mode = data->pscan_rep_mode;
905 info->pscan_period_mode = data->pscan_period_mode;
906 info->pscan_mode = data->pscan_mode;
907 memcpy(info->dev_class, data->dev_class, 3);
908 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200911 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
913
914 BT_DBG("cache %p, copied %d", cache, copied);
915 return copied;
916}
917
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
920 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct hci_cp_inquiry cp;
923
924 BT_DBG("%s", hdev->name);
925
926 if (test_bit(HCI_INQUIRY, &hdev->flags))
927 return;
928
929 /* Start Inquiry */
930 memcpy(&cp.lap, &ir->lap, 3);
931 cp.length = ir->length;
932 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934}
935
Andre Guedes3e13fa12013-03-27 20:04:56 -0300936static int wait_inquiry(void *word)
937{
938 schedule();
939 return signal_pending(current);
940}
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942int hci_inquiry(void __user *arg)
943{
944 __u8 __user *ptr = arg;
945 struct hci_inquiry_req ir;
946 struct hci_dev *hdev;
947 int err = 0, do_inquiry = 0, max_rsp;
948 long timeo;
949 __u8 *buf;
950
951 if (copy_from_user(&ir, ptr, sizeof(ir)))
952 return -EFAULT;
953
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200954 hdev = hci_dev_get(ir.dev_id);
955 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return -ENODEV;
957
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300958 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900959 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300960 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 inquiry_cache_flush(hdev);
962 do_inquiry = 1;
963 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300964 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann04837f62006-07-03 10:02:33 +0200966 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200967
968 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200969 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
970 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200971 if (err < 0)
972 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300973
974 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
975 * cleared). If it is interrupted by a signal, return -EINTR.
976 */
977 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
978 TASK_INTERRUPTIBLE))
979 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300982 /* for unlimited number of responses we will use buffer with
983 * 255 entries
984 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
986
987 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
988 * copy it to the user space.
989 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100990 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200991 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 err = -ENOMEM;
993 goto done;
994 }
995
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300996 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300998 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 BT_DBG("num_rsp %d", ir.num_rsp);
1001
1002 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1003 ptr += sizeof(ir);
1004 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001005 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001007 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 err = -EFAULT;
1009
1010 kfree(buf);
1011
1012done:
1013 hci_dev_put(hdev);
1014 return err;
1015}
1016
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001017static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1018{
1019 u8 ad_len = 0, flags = 0;
1020 size_t name_len;
1021
1022 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1023 flags |= LE_AD_GENERAL;
1024
1025 if (!lmp_bredr_capable(hdev))
1026 flags |= LE_AD_NO_BREDR;
1027
1028 if (lmp_le_br_capable(hdev))
1029 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1030
1031 if (lmp_host_le_br_capable(hdev))
1032 flags |= LE_AD_SIM_LE_BREDR_HOST;
1033
1034 if (flags) {
1035 BT_DBG("adv flags 0x%02x", flags);
1036
1037 ptr[0] = 2;
1038 ptr[1] = EIR_FLAGS;
1039 ptr[2] = flags;
1040
1041 ad_len += 3;
1042 ptr += 3;
1043 }
1044
1045 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1046 ptr[0] = 2;
1047 ptr[1] = EIR_TX_POWER;
1048 ptr[2] = (u8) hdev->adv_tx_power;
1049
1050 ad_len += 3;
1051 ptr += 3;
1052 }
1053
1054 name_len = strlen(hdev->dev_name);
1055 if (name_len > 0) {
1056 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1057
1058 if (name_len > max_len) {
1059 name_len = max_len;
1060 ptr[1] = EIR_NAME_SHORT;
1061 } else
1062 ptr[1] = EIR_NAME_COMPLETE;
1063
1064 ptr[0] = name_len + 1;
1065
1066 memcpy(ptr + 2, hdev->dev_name, name_len);
1067
1068 ad_len += (name_len + 2);
1069 ptr += (name_len + 2);
1070 }
1071
1072 return ad_len;
1073}
1074
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001075void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001076{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001077 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001078 struct hci_cp_le_set_adv_data cp;
1079 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001080
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001081 if (!lmp_le_capable(hdev))
1082 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001083
1084 memset(&cp, 0, sizeof(cp));
1085
1086 len = create_ad(hdev, cp.data);
1087
1088 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001089 memcmp(cp.data, hdev->adv_data, len) == 0)
1090 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001091
1092 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1093 hdev->adv_data_len = len;
1094
1095 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001097 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001098}
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100/* ---- HCI ioctl helpers ---- */
1101
1102int hci_dev_open(__u16 dev)
1103{
1104 struct hci_dev *hdev;
1105 int ret = 0;
1106
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001107 hdev = hci_dev_get(dev);
1108 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 return -ENODEV;
1110
1111 BT_DBG("%s %p", hdev->name, hdev);
1112
1113 hci_req_lock(hdev);
1114
Johan Hovold94324962012-03-15 14:48:41 +01001115 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1116 ret = -ENODEV;
1117 goto done;
1118 }
1119
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001120 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1121 ret = -ERFKILL;
1122 goto done;
1123 }
1124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (test_bit(HCI_UP, &hdev->flags)) {
1126 ret = -EALREADY;
1127 goto done;
1128 }
1129
Linus Torvalds1da177e2005-04-16 15:20:36 -07001130 if (hdev->open(hdev)) {
1131 ret = -EIO;
1132 goto done;
1133 }
1134
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001135 atomic_set(&hdev->cmd_cnt, 1);
1136 set_bit(HCI_INIT, &hdev->flags);
1137
1138 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1139 ret = hdev->setup(hdev);
1140
1141 if (!ret) {
1142 /* Treat all non BR/EDR controllers as raw devices if
1143 * enable_hs is not set.
1144 */
1145 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1146 set_bit(HCI_RAW, &hdev->flags);
1147
1148 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1149 set_bit(HCI_RAW, &hdev->flags);
1150
1151 if (!test_bit(HCI_RAW, &hdev->flags))
1152 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153 }
1154
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001155 clear_bit(HCI_INIT, &hdev->flags);
1156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157 if (!ret) {
1158 hci_dev_hold(hdev);
1159 set_bit(HCI_UP, &hdev->flags);
1160 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001161 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1162 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001163 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001164 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001165 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001166 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001167 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001168 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001169 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001170 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001171 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 skb_queue_purge(&hdev->cmd_q);
1174 skb_queue_purge(&hdev->rx_q);
1175
1176 if (hdev->flush)
1177 hdev->flush(hdev);
1178
1179 if (hdev->sent_cmd) {
1180 kfree_skb(hdev->sent_cmd);
1181 hdev->sent_cmd = NULL;
1182 }
1183
1184 hdev->close(hdev);
1185 hdev->flags = 0;
1186 }
1187
1188done:
1189 hci_req_unlock(hdev);
1190 hci_dev_put(hdev);
1191 return ret;
1192}
1193
1194static int hci_dev_do_close(struct hci_dev *hdev)
1195{
1196 BT_DBG("%s %p", hdev->name, hdev);
1197
Andre Guedes28b75a82012-02-03 17:48:00 -03001198 cancel_work_sync(&hdev->le_scan);
1199
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001200 cancel_delayed_work(&hdev->power_off);
1201
Linus Torvalds1da177e2005-04-16 15:20:36 -07001202 hci_req_cancel(hdev, ENODEV);
1203 hci_req_lock(hdev);
1204
1205 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001206 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207 hci_req_unlock(hdev);
1208 return 0;
1209 }
1210
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001211 /* Flush RX and TX works */
1212 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001213 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001214
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001215 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001216 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001217 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001218 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001219 }
1220
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001221 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001222 cancel_delayed_work(&hdev->service_cache);
1223
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001224 cancel_delayed_work_sync(&hdev->le_scan_disable);
1225
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001226 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227 inquiry_cache_flush(hdev);
1228 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001229 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
1231 hci_notify(hdev, HCI_DEV_DOWN);
1232
1233 if (hdev->flush)
1234 hdev->flush(hdev);
1235
1236 /* Reset device */
1237 skb_queue_purge(&hdev->cmd_q);
1238 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001239 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001240 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001243 clear_bit(HCI_INIT, &hdev->flags);
1244 }
1245
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001246 /* flush cmd work */
1247 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
1249 /* Drop queues */
1250 skb_queue_purge(&hdev->rx_q);
1251 skb_queue_purge(&hdev->cmd_q);
1252 skb_queue_purge(&hdev->raw_q);
1253
1254 /* Drop last sent command */
1255 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001256 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 kfree_skb(hdev->sent_cmd);
1258 hdev->sent_cmd = NULL;
1259 }
1260
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001261 kfree_skb(hdev->recv_evt);
1262 hdev->recv_evt = NULL;
1263
Linus Torvalds1da177e2005-04-16 15:20:36 -07001264 /* After this point our queues are empty
1265 * and no tasks are scheduled. */
1266 hdev->close(hdev);
1267
Johan Hedberg35b973c2013-03-15 17:06:59 -05001268 /* Clear flags */
1269 hdev->flags = 0;
1270 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1271
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001272 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1273 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001274 hci_dev_lock(hdev);
1275 mgmt_powered(hdev, 0);
1276 hci_dev_unlock(hdev);
1277 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001278
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001279 /* Controller radio is available but is currently powered down */
1280 hdev->amp_status = 0;
1281
Johan Hedberge59fda82012-02-22 18:11:53 +02001282 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001283 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 hci_req_unlock(hdev);
1286
1287 hci_dev_put(hdev);
1288 return 0;
1289}
1290
1291int hci_dev_close(__u16 dev)
1292{
1293 struct hci_dev *hdev;
1294 int err;
1295
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001296 hdev = hci_dev_get(dev);
1297 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001299
1300 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1301 cancel_delayed_work(&hdev->power_off);
1302
Linus Torvalds1da177e2005-04-16 15:20:36 -07001303 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001304
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 hci_dev_put(hdev);
1306 return err;
1307}
1308
1309int hci_dev_reset(__u16 dev)
1310{
1311 struct hci_dev *hdev;
1312 int ret = 0;
1313
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001314 hdev = hci_dev_get(dev);
1315 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316 return -ENODEV;
1317
1318 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319
1320 if (!test_bit(HCI_UP, &hdev->flags))
1321 goto done;
1322
1323 /* Drop queues */
1324 skb_queue_purge(&hdev->rx_q);
1325 skb_queue_purge(&hdev->cmd_q);
1326
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001327 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 inquiry_cache_flush(hdev);
1329 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001330 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331
1332 if (hdev->flush)
1333 hdev->flush(hdev);
1334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001335 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001336 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337
1338 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001339 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001340
1341done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001342 hci_req_unlock(hdev);
1343 hci_dev_put(hdev);
1344 return ret;
1345}
1346
1347int hci_dev_reset_stat(__u16 dev)
1348{
1349 struct hci_dev *hdev;
1350 int ret = 0;
1351
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001352 hdev = hci_dev_get(dev);
1353 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001354 return -ENODEV;
1355
1356 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1357
1358 hci_dev_put(hdev);
1359
1360 return ret;
1361}
1362
1363int hci_dev_cmd(unsigned int cmd, void __user *arg)
1364{
1365 struct hci_dev *hdev;
1366 struct hci_dev_req dr;
1367 int err = 0;
1368
1369 if (copy_from_user(&dr, arg, sizeof(dr)))
1370 return -EFAULT;
1371
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001372 hdev = hci_dev_get(dr.dev_id);
1373 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return -ENODEV;
1375
1376 switch (cmd) {
1377 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001378 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1379 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001380 break;
1381
1382 case HCISETENCRYPT:
1383 if (!lmp_encrypt_capable(hdev)) {
1384 err = -EOPNOTSUPP;
1385 break;
1386 }
1387
1388 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1389 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001390 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1391 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001392 if (err)
1393 break;
1394 }
1395
Johan Hedberg01178cd2013-03-05 20:37:41 +02001396 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 break;
1399
1400 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001401 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1402 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 break;
1404
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001405 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001406 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1407 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001408 break;
1409
1410 case HCISETLINKMODE:
1411 hdev->link_mode = ((__u16) dr.dev_opt) &
1412 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1413 break;
1414
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 case HCISETPTYPE:
1416 hdev->pkt_type = (__u16) dr.dev_opt;
1417 break;
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001420 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1421 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422 break;
1423
1424 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001425 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1426 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 break;
1428
1429 default:
1430 err = -EINVAL;
1431 break;
1432 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001433
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 hci_dev_put(hdev);
1435 return err;
1436}
1437
1438int hci_get_dev_list(void __user *arg)
1439{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001440 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441 struct hci_dev_list_req *dl;
1442 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 int n = 0, size, err;
1444 __u16 dev_num;
1445
1446 if (get_user(dev_num, (__u16 __user *) arg))
1447 return -EFAULT;
1448
1449 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1450 return -EINVAL;
1451
1452 size = sizeof(*dl) + dev_num * sizeof(*dr);
1453
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001454 dl = kzalloc(size, GFP_KERNEL);
1455 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001456 return -ENOMEM;
1457
1458 dr = dl->dev_req;
1459
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001460 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001461 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001462 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001463 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001464
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001465 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1466 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001467
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 (dr + n)->dev_id = hdev->id;
1469 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001470
Linus Torvalds1da177e2005-04-16 15:20:36 -07001471 if (++n >= dev_num)
1472 break;
1473 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001474 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001475
1476 dl->dev_num = n;
1477 size = sizeof(*dl) + n * sizeof(*dr);
1478
1479 err = copy_to_user(arg, dl, size);
1480 kfree(dl);
1481
1482 return err ? -EFAULT : 0;
1483}
1484
1485int hci_get_dev_info(void __user *arg)
1486{
1487 struct hci_dev *hdev;
1488 struct hci_dev_info di;
1489 int err = 0;
1490
1491 if (copy_from_user(&di, arg, sizeof(di)))
1492 return -EFAULT;
1493
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001494 hdev = hci_dev_get(di.dev_id);
1495 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496 return -ENODEV;
1497
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001498 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001499 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001500
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001501 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1502 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001503
Linus Torvalds1da177e2005-04-16 15:20:36 -07001504 strcpy(di.name, hdev->name);
1505 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001506 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 di.flags = hdev->flags;
1508 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001509 if (lmp_bredr_capable(hdev)) {
1510 di.acl_mtu = hdev->acl_mtu;
1511 di.acl_pkts = hdev->acl_pkts;
1512 di.sco_mtu = hdev->sco_mtu;
1513 di.sco_pkts = hdev->sco_pkts;
1514 } else {
1515 di.acl_mtu = hdev->le_mtu;
1516 di.acl_pkts = hdev->le_pkts;
1517 di.sco_mtu = 0;
1518 di.sco_pkts = 0;
1519 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 di.link_policy = hdev->link_policy;
1521 di.link_mode = hdev->link_mode;
1522
1523 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1524 memcpy(&di.features, &hdev->features, sizeof(di.features));
1525
1526 if (copy_to_user(arg, &di, sizeof(di)))
1527 err = -EFAULT;
1528
1529 hci_dev_put(hdev);
1530
1531 return err;
1532}
1533
1534/* ---- Interface to HCI drivers ---- */
1535
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001536static int hci_rfkill_set_block(void *data, bool blocked)
1537{
1538 struct hci_dev *hdev = data;
1539
1540 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1541
1542 if (!blocked)
1543 return 0;
1544
1545 hci_dev_do_close(hdev);
1546
1547 return 0;
1548}
1549
1550static const struct rfkill_ops hci_rfkill_ops = {
1551 .set_block = hci_rfkill_set_block,
1552};
1553
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001554static void hci_power_on(struct work_struct *work)
1555{
1556 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1557
1558 BT_DBG("%s", hdev->name);
1559
1560 if (hci_dev_open(hdev->id) < 0)
1561 return;
1562
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001563 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001564 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1565 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001566
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001567 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001568 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001569}
1570
1571static void hci_power_off(struct work_struct *work)
1572{
Johan Hedberg32435532011-11-07 22:16:04 +02001573 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001574 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001575
1576 BT_DBG("%s", hdev->name);
1577
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001578 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001579}
1580
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001581static void hci_discov_off(struct work_struct *work)
1582{
1583 struct hci_dev *hdev;
1584 u8 scan = SCAN_PAGE;
1585
1586 hdev = container_of(work, struct hci_dev, discov_off.work);
1587
1588 BT_DBG("%s", hdev->name);
1589
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001590 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001591
1592 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1593
1594 hdev->discov_timeout = 0;
1595
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001596 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001597}
1598
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001599int hci_uuids_clear(struct hci_dev *hdev)
1600{
Johan Hedberg48210022013-01-27 00:31:28 +02001601 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001602
Johan Hedberg48210022013-01-27 00:31:28 +02001603 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1604 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001605 kfree(uuid);
1606 }
1607
1608 return 0;
1609}
1610
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001611int hci_link_keys_clear(struct hci_dev *hdev)
1612{
1613 struct list_head *p, *n;
1614
1615 list_for_each_safe(p, n, &hdev->link_keys) {
1616 struct link_key *key;
1617
1618 key = list_entry(p, struct link_key, list);
1619
1620 list_del(p);
1621 kfree(key);
1622 }
1623
1624 return 0;
1625}
1626
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001627int hci_smp_ltks_clear(struct hci_dev *hdev)
1628{
1629 struct smp_ltk *k, *tmp;
1630
1631 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1632 list_del(&k->list);
1633 kfree(k);
1634 }
1635
1636 return 0;
1637}
1638
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001639struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1640{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001641 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001642
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001643 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001644 if (bacmp(bdaddr, &k->bdaddr) == 0)
1645 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001646
1647 return NULL;
1648}
1649
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301650static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001651 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001652{
1653 /* Legacy key */
1654 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301655 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001656
1657 /* Debug keys are insecure so don't store them persistently */
1658 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301659 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001660
1661 /* Changed combination key and there's no previous one */
1662 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301663 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001664
1665 /* Security mode 3 case */
1666 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301667 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001668
1669 /* Neither local nor remote side had no-bonding as requirement */
1670 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301671 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001672
1673 /* Local side had dedicated bonding as requirement */
1674 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301675 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001676
1677 /* Remote side had dedicated bonding as requirement */
1678 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301679 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001680
1681 /* If none of the above criteria match, then don't store the key
1682 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301683 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001684}
1685
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001686struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001687{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001688 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001689
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001690 list_for_each_entry(k, &hdev->long_term_keys, list) {
1691 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001692 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001693 continue;
1694
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001695 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001696 }
1697
1698 return NULL;
1699}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001700
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001701struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001702 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001703{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001704 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001705
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001706 list_for_each_entry(k, &hdev->long_term_keys, list)
1707 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001708 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001709 return k;
1710
1711 return NULL;
1712}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001713
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001714int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001715 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001716{
1717 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301718 u8 old_key_type;
1719 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001720
1721 old_key = hci_find_link_key(hdev, bdaddr);
1722 if (old_key) {
1723 old_key_type = old_key->type;
1724 key = old_key;
1725 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001726 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001727 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1728 if (!key)
1729 return -ENOMEM;
1730 list_add(&key->list, &hdev->link_keys);
1731 }
1732
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001733 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001734
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001735 /* Some buggy controller combinations generate a changed
1736 * combination key for legacy pairing even when there's no
1737 * previous key */
1738 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001739 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001740 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001741 if (conn)
1742 conn->key_type = type;
1743 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001744
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001745 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001746 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001747 key->pin_len = pin_len;
1748
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001749 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001750 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001751 else
1752 key->type = type;
1753
Johan Hedberg4df378a2011-04-28 11:29:03 -07001754 if (!new_key)
1755 return 0;
1756
1757 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1758
Johan Hedberg744cf192011-11-08 20:40:14 +02001759 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001760
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301761 if (conn)
1762 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001763
1764 return 0;
1765}
1766
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001767int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001768 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001769 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001770{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001771 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001772
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001773 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1774 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001775
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001776 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1777 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001778 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001779 else {
1780 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001781 if (!key)
1782 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001783 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001784 }
1785
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001786 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001787 key->bdaddr_type = addr_type;
1788 memcpy(key->val, tk, sizeof(key->val));
1789 key->authenticated = authenticated;
1790 key->ediv = ediv;
1791 key->enc_size = enc_size;
1792 key->type = type;
1793 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001795 if (!new_key)
1796 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001797
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001798 if (type & HCI_SMP_LTK)
1799 mgmt_new_ltk(hdev, key, 1);
1800
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001801 return 0;
1802}
1803
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001804int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1805{
1806 struct link_key *key;
1807
1808 key = hci_find_link_key(hdev, bdaddr);
1809 if (!key)
1810 return -ENOENT;
1811
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001812 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001813
1814 list_del(&key->list);
1815 kfree(key);
1816
1817 return 0;
1818}
1819
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001820int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1821{
1822 struct smp_ltk *k, *tmp;
1823
1824 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1825 if (bacmp(bdaddr, &k->bdaddr))
1826 continue;
1827
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001828 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001829
1830 list_del(&k->list);
1831 kfree(k);
1832 }
1833
1834 return 0;
1835}
1836
Ville Tervo6bd32322011-02-16 16:32:41 +02001837/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001838static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001839{
1840 struct hci_dev *hdev = (void *) arg;
1841
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001842 if (hdev->sent_cmd) {
1843 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1844 u16 opcode = __le16_to_cpu(sent->opcode);
1845
1846 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1847 } else {
1848 BT_ERR("%s command tx timeout", hdev->name);
1849 }
1850
Ville Tervo6bd32322011-02-16 16:32:41 +02001851 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001852 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001853}
1854
Szymon Janc2763eda2011-03-22 13:12:22 +01001855struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001856 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001857{
1858 struct oob_data *data;
1859
1860 list_for_each_entry(data, &hdev->remote_oob_data, list)
1861 if (bacmp(bdaddr, &data->bdaddr) == 0)
1862 return data;
1863
1864 return NULL;
1865}
1866
1867int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1868{
1869 struct oob_data *data;
1870
1871 data = hci_find_remote_oob_data(hdev, bdaddr);
1872 if (!data)
1873 return -ENOENT;
1874
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001875 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001876
1877 list_del(&data->list);
1878 kfree(data);
1879
1880 return 0;
1881}
1882
1883int hci_remote_oob_data_clear(struct hci_dev *hdev)
1884{
1885 struct oob_data *data, *n;
1886
1887 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1888 list_del(&data->list);
1889 kfree(data);
1890 }
1891
1892 return 0;
1893}
1894
1895int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001896 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001897{
1898 struct oob_data *data;
1899
1900 data = hci_find_remote_oob_data(hdev, bdaddr);
1901
1902 if (!data) {
1903 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1904 if (!data)
1905 return -ENOMEM;
1906
1907 bacpy(&data->bdaddr, bdaddr);
1908 list_add(&data->list, &hdev->remote_oob_data);
1909 }
1910
1911 memcpy(data->hash, hash, sizeof(data->hash));
1912 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1913
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001914 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001915
1916 return 0;
1917}
1918
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001919struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001920{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001921 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001922
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001923 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001924 if (bacmp(bdaddr, &b->bdaddr) == 0)
1925 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001926
1927 return NULL;
1928}
1929
1930int hci_blacklist_clear(struct hci_dev *hdev)
1931{
1932 struct list_head *p, *n;
1933
1934 list_for_each_safe(p, n, &hdev->blacklist) {
1935 struct bdaddr_list *b;
1936
1937 b = list_entry(p, struct bdaddr_list, list);
1938
1939 list_del(p);
1940 kfree(b);
1941 }
1942
1943 return 0;
1944}
1945
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001946int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001947{
1948 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001949
1950 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1951 return -EBADF;
1952
Antti Julku5e762442011-08-25 16:48:02 +03001953 if (hci_blacklist_lookup(hdev, bdaddr))
1954 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001955
1956 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001957 if (!entry)
1958 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001959
1960 bacpy(&entry->bdaddr, bdaddr);
1961
1962 list_add(&entry->list, &hdev->blacklist);
1963
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001964 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001965}
1966
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001967int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001968{
1969 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001970
Szymon Janc1ec918c2011-11-16 09:32:21 +01001971 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001972 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001973
1974 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001975 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001976 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001977
1978 list_del(&entry->list);
1979 kfree(entry);
1980
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001981 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001982}
1983
Johan Hedberg42c6b122013-03-05 20:37:49 +02001984static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001985{
1986 struct le_scan_params *param = (struct le_scan_params *) opt;
1987 struct hci_cp_le_set_scan_param cp;
1988
1989 memset(&cp, 0, sizeof(cp));
1990 cp.type = param->type;
1991 cp.interval = cpu_to_le16(param->interval);
1992 cp.window = cpu_to_le16(param->window);
1993
Johan Hedberg42c6b122013-03-05 20:37:49 +02001994 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001995}
1996
Johan Hedberg42c6b122013-03-05 20:37:49 +02001997static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001998{
1999 struct hci_cp_le_set_scan_enable cp;
2000
2001 memset(&cp, 0, sizeof(cp));
Andre Guedes76a388b2013-04-04 20:21:02 -03002002 cp.enable = LE_SCAN_ENABLE;
Andre Guedes525e2962013-04-04 20:21:01 -03002003 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002004
Johan Hedberg42c6b122013-03-05 20:37:49 +02002005 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002006}
2007
2008static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002009 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002010{
2011 long timeo = msecs_to_jiffies(3000);
2012 struct le_scan_params param;
2013 int err;
2014
2015 BT_DBG("%s", hdev->name);
2016
2017 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2018 return -EINPROGRESS;
2019
2020 param.type = type;
2021 param.interval = interval;
2022 param.window = window;
2023
2024 hci_req_lock(hdev);
2025
Johan Hedberg01178cd2013-03-05 20:37:41 +02002026 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2027 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002028 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002029 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002030
2031 hci_req_unlock(hdev);
2032
2033 if (err < 0)
2034 return err;
2035
Johan Hedberg46818ed2013-01-14 22:33:52 +02002036 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
Andre Guedesb6c75152013-04-04 20:20:59 -03002037 timeout);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002038
2039 return 0;
2040}
2041
Andre Guedes7dbfac12012-03-15 16:52:07 -03002042int hci_cancel_le_scan(struct hci_dev *hdev)
2043{
2044 BT_DBG("%s", hdev->name);
2045
2046 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2047 return -EALREADY;
2048
2049 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2050 struct hci_cp_le_set_scan_enable cp;
2051
2052 /* Send HCI command to disable LE Scan */
2053 memset(&cp, 0, sizeof(cp));
2054 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2055 }
2056
2057 return 0;
2058}
2059
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002060static void le_scan_disable_work(struct work_struct *work)
2061{
2062 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002063 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002064 struct hci_cp_le_set_scan_enable cp;
2065
2066 BT_DBG("%s", hdev->name);
2067
2068 memset(&cp, 0, sizeof(cp));
2069
2070 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2071}
2072
Andre Guedes28b75a82012-02-03 17:48:00 -03002073static void le_scan_work(struct work_struct *work)
2074{
2075 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2076 struct le_scan_params *param = &hdev->le_scan_params;
2077
2078 BT_DBG("%s", hdev->name);
2079
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002080 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2081 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002082}
2083
2084int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002085 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002086{
2087 struct le_scan_params *param = &hdev->le_scan_params;
2088
2089 BT_DBG("%s", hdev->name);
2090
Johan Hedbergf1550472012-10-24 21:12:03 +03002091 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2092 return -ENOTSUPP;
2093
Andre Guedes28b75a82012-02-03 17:48:00 -03002094 if (work_busy(&hdev->le_scan))
2095 return -EINPROGRESS;
2096
2097 param->type = type;
2098 param->interval = interval;
2099 param->window = window;
2100 param->timeout = timeout;
2101
2102 queue_work(system_long_wq, &hdev->le_scan);
2103
2104 return 0;
2105}
2106
David Herrmann9be0dab2012-04-22 14:39:57 +02002107/* Alloc HCI device */
2108struct hci_dev *hci_alloc_dev(void)
2109{
2110 struct hci_dev *hdev;
2111
2112 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2113 if (!hdev)
2114 return NULL;
2115
David Herrmannb1b813d2012-04-22 14:39:58 +02002116 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2117 hdev->esco_type = (ESCO_HV1);
2118 hdev->link_mode = (HCI_LM_ACCEPT);
2119 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002120 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2121 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002122
David Herrmannb1b813d2012-04-22 14:39:58 +02002123 hdev->sniff_max_interval = 800;
2124 hdev->sniff_min_interval = 80;
2125
2126 mutex_init(&hdev->lock);
2127 mutex_init(&hdev->req_lock);
2128
2129 INIT_LIST_HEAD(&hdev->mgmt_pending);
2130 INIT_LIST_HEAD(&hdev->blacklist);
2131 INIT_LIST_HEAD(&hdev->uuids);
2132 INIT_LIST_HEAD(&hdev->link_keys);
2133 INIT_LIST_HEAD(&hdev->long_term_keys);
2134 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002135 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002136
2137 INIT_WORK(&hdev->rx_work, hci_rx_work);
2138 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2139 INIT_WORK(&hdev->tx_work, hci_tx_work);
2140 INIT_WORK(&hdev->power_on, hci_power_on);
2141 INIT_WORK(&hdev->le_scan, le_scan_work);
2142
David Herrmannb1b813d2012-04-22 14:39:58 +02002143 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2144 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2145 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2146
David Herrmannb1b813d2012-04-22 14:39:58 +02002147 skb_queue_head_init(&hdev->rx_q);
2148 skb_queue_head_init(&hdev->cmd_q);
2149 skb_queue_head_init(&hdev->raw_q);
2150
2151 init_waitqueue_head(&hdev->req_wait_q);
2152
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002153 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002154
David Herrmannb1b813d2012-04-22 14:39:58 +02002155 hci_init_sysfs(hdev);
2156 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002157
2158 return hdev;
2159}
2160EXPORT_SYMBOL(hci_alloc_dev);
2161
2162/* Free HCI device */
2163void hci_free_dev(struct hci_dev *hdev)
2164{
David Herrmann9be0dab2012-04-22 14:39:57 +02002165 /* will free via device release */
2166 put_device(&hdev->dev);
2167}
2168EXPORT_SYMBOL(hci_free_dev);
2169
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170/* Register HCI device */
2171int hci_register_dev(struct hci_dev *hdev)
2172{
David Herrmannb1b813d2012-04-22 14:39:58 +02002173 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174
David Herrmann010666a2012-01-07 15:47:07 +01002175 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -EINVAL;
2177
Mat Martineau08add512011-11-02 16:18:36 -07002178 /* Do not allow HCI_AMP devices to register at index 0,
2179 * so the index can be used as the AMP controller ID.
2180 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002181 switch (hdev->dev_type) {
2182 case HCI_BREDR:
2183 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2184 break;
2185 case HCI_AMP:
2186 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2187 break;
2188 default:
2189 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002191
Sasha Levin3df92b32012-05-27 22:36:56 +02002192 if (id < 0)
2193 return id;
2194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 sprintf(hdev->name, "hci%d", id);
2196 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002197
2198 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2199
Sasha Levin3df92b32012-05-27 22:36:56 +02002200 write_lock(&hci_dev_list_lock);
2201 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002202 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002204 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002205 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002206 if (!hdev->workqueue) {
2207 error = -ENOMEM;
2208 goto err;
2209 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002210
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002211 hdev->req_workqueue = alloc_workqueue(hdev->name,
2212 WQ_HIGHPRI | WQ_UNBOUND |
2213 WQ_MEM_RECLAIM, 1);
2214 if (!hdev->req_workqueue) {
2215 destroy_workqueue(hdev->workqueue);
2216 error = -ENOMEM;
2217 goto err;
2218 }
2219
David Herrmann33ca9542011-10-08 14:58:49 +02002220 error = hci_add_sysfs(hdev);
2221 if (error < 0)
2222 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002224 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002225 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2226 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002227 if (hdev->rfkill) {
2228 if (rfkill_register(hdev->rfkill) < 0) {
2229 rfkill_destroy(hdev->rfkill);
2230 hdev->rfkill = NULL;
2231 }
2232 }
2233
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002234 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002235
2236 if (hdev->dev_type != HCI_AMP)
2237 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2238
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002240 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
Johan Hedberg19202572013-01-14 22:33:51 +02002242 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002243
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002245
David Herrmann33ca9542011-10-08 14:58:49 +02002246err_wqueue:
2247 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002248 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002249err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002250 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002251 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002252 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002253 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002254
David Herrmann33ca9542011-10-08 14:58:49 +02002255 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256}
2257EXPORT_SYMBOL(hci_register_dev);
2258
2259/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002260void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261{
Sasha Levin3df92b32012-05-27 22:36:56 +02002262 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002263
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002264 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265
Johan Hovold94324962012-03-15 14:48:41 +01002266 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2267
Sasha Levin3df92b32012-05-27 22:36:56 +02002268 id = hdev->id;
2269
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002270 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002272 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273
2274 hci_dev_do_close(hdev);
2275
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302276 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002277 kfree_skb(hdev->reassembly[i]);
2278
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002279 cancel_work_sync(&hdev->power_on);
2280
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002281 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002282 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002283 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002284 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002285 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002286 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002287
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002288 /* mgmt_index_removed should take care of emptying the
2289 * pending list */
2290 BUG_ON(!list_empty(&hdev->mgmt_pending));
2291
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 hci_notify(hdev, HCI_DEV_UNREG);
2293
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002294 if (hdev->rfkill) {
2295 rfkill_unregister(hdev->rfkill);
2296 rfkill_destroy(hdev->rfkill);
2297 }
2298
David Herrmannce242972011-10-08 14:58:48 +02002299 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002300
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002301 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002302 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002303
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002304 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002305 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002306 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002307 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002308 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002309 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002310 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002311
David Herrmanndc946bd2012-01-07 15:47:24 +01002312 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002313
2314 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315}
2316EXPORT_SYMBOL(hci_unregister_dev);
2317
2318/* Suspend HCI device */
2319int hci_suspend_dev(struct hci_dev *hdev)
2320{
2321 hci_notify(hdev, HCI_DEV_SUSPEND);
2322 return 0;
2323}
2324EXPORT_SYMBOL(hci_suspend_dev);
2325
2326/* Resume HCI device */
2327int hci_resume_dev(struct hci_dev *hdev)
2328{
2329 hci_notify(hdev, HCI_DEV_RESUME);
2330 return 0;
2331}
2332EXPORT_SYMBOL(hci_resume_dev);
2333
Marcel Holtmann76bca882009-11-18 00:40:39 +01002334/* Receive frame from HCI drivers */
2335int hci_recv_frame(struct sk_buff *skb)
2336{
2337 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2338 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002339 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002340 kfree_skb(skb);
2341 return -ENXIO;
2342 }
2343
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002344 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002345 bt_cb(skb)->incoming = 1;
2346
2347 /* Time stamp */
2348 __net_timestamp(skb);
2349
Marcel Holtmann76bca882009-11-18 00:40:39 +01002350 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002351 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002352
Marcel Holtmann76bca882009-11-18 00:40:39 +01002353 return 0;
2354}
2355EXPORT_SYMBOL(hci_recv_frame);
2356
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302357static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002358 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302359{
2360 int len = 0;
2361 int hlen = 0;
2362 int remain = count;
2363 struct sk_buff *skb;
2364 struct bt_skb_cb *scb;
2365
2366 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002367 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302368 return -EILSEQ;
2369
2370 skb = hdev->reassembly[index];
2371
2372 if (!skb) {
2373 switch (type) {
2374 case HCI_ACLDATA_PKT:
2375 len = HCI_MAX_FRAME_SIZE;
2376 hlen = HCI_ACL_HDR_SIZE;
2377 break;
2378 case HCI_EVENT_PKT:
2379 len = HCI_MAX_EVENT_SIZE;
2380 hlen = HCI_EVENT_HDR_SIZE;
2381 break;
2382 case HCI_SCODATA_PKT:
2383 len = HCI_MAX_SCO_SIZE;
2384 hlen = HCI_SCO_HDR_SIZE;
2385 break;
2386 }
2387
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002388 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302389 if (!skb)
2390 return -ENOMEM;
2391
2392 scb = (void *) skb->cb;
2393 scb->expect = hlen;
2394 scb->pkt_type = type;
2395
2396 skb->dev = (void *) hdev;
2397 hdev->reassembly[index] = skb;
2398 }
2399
2400 while (count) {
2401 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002402 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302403
2404 memcpy(skb_put(skb, len), data, len);
2405
2406 count -= len;
2407 data += len;
2408 scb->expect -= len;
2409 remain = count;
2410
2411 switch (type) {
2412 case HCI_EVENT_PKT:
2413 if (skb->len == HCI_EVENT_HDR_SIZE) {
2414 struct hci_event_hdr *h = hci_event_hdr(skb);
2415 scb->expect = h->plen;
2416
2417 if (skb_tailroom(skb) < scb->expect) {
2418 kfree_skb(skb);
2419 hdev->reassembly[index] = NULL;
2420 return -ENOMEM;
2421 }
2422 }
2423 break;
2424
2425 case HCI_ACLDATA_PKT:
2426 if (skb->len == HCI_ACL_HDR_SIZE) {
2427 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2428 scb->expect = __le16_to_cpu(h->dlen);
2429
2430 if (skb_tailroom(skb) < scb->expect) {
2431 kfree_skb(skb);
2432 hdev->reassembly[index] = NULL;
2433 return -ENOMEM;
2434 }
2435 }
2436 break;
2437
2438 case HCI_SCODATA_PKT:
2439 if (skb->len == HCI_SCO_HDR_SIZE) {
2440 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2441 scb->expect = h->dlen;
2442
2443 if (skb_tailroom(skb) < scb->expect) {
2444 kfree_skb(skb);
2445 hdev->reassembly[index] = NULL;
2446 return -ENOMEM;
2447 }
2448 }
2449 break;
2450 }
2451
2452 if (scb->expect == 0) {
2453 /* Complete frame */
2454
2455 bt_cb(skb)->pkt_type = type;
2456 hci_recv_frame(skb);
2457
2458 hdev->reassembly[index] = NULL;
2459 return remain;
2460 }
2461 }
2462
2463 return remain;
2464}
2465
Marcel Holtmannef222012007-07-11 06:42:04 +02002466int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2467{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302468 int rem = 0;
2469
Marcel Holtmannef222012007-07-11 06:42:04 +02002470 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2471 return -EILSEQ;
2472
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002473 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002474 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302475 if (rem < 0)
2476 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002477
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302478 data += (count - rem);
2479 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002480 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002481
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302482 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002483}
2484EXPORT_SYMBOL(hci_recv_fragment);
2485
Suraj Sumangala99811512010-07-14 13:02:19 +05302486#define STREAM_REASSEMBLY 0
2487
2488int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2489{
2490 int type;
2491 int rem = 0;
2492
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002493 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302494 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2495
2496 if (!skb) {
2497 struct { char type; } *pkt;
2498
2499 /* Start of the frame */
2500 pkt = data;
2501 type = pkt->type;
2502
2503 data++;
2504 count--;
2505 } else
2506 type = bt_cb(skb)->pkt_type;
2507
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002508 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002509 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302510 if (rem < 0)
2511 return rem;
2512
2513 data += (count - rem);
2514 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002515 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302516
2517 return rem;
2518}
2519EXPORT_SYMBOL(hci_recv_stream_fragment);
2520
Linus Torvalds1da177e2005-04-16 15:20:36 -07002521/* ---- Interface to upper protocols ---- */
2522
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523int hci_register_cb(struct hci_cb *cb)
2524{
2525 BT_DBG("%p name %s", cb, cb->name);
2526
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002527 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002529 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
2531 return 0;
2532}
2533EXPORT_SYMBOL(hci_register_cb);
2534
2535int hci_unregister_cb(struct hci_cb *cb)
2536{
2537 BT_DBG("%p name %s", cb, cb->name);
2538
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002539 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002540 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002541 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002542
2543 return 0;
2544}
2545EXPORT_SYMBOL(hci_unregister_cb);
2546
2547static int hci_send_frame(struct sk_buff *skb)
2548{
2549 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2550
2551 if (!hdev) {
2552 kfree_skb(skb);
2553 return -ENODEV;
2554 }
2555
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002556 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002558 /* Time stamp */
2559 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002561 /* Send copy to monitor */
2562 hci_send_to_monitor(hdev, skb);
2563
2564 if (atomic_read(&hdev->promisc)) {
2565 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002566 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 }
2568
2569 /* Get rid of skb owner, prior to sending to the driver. */
2570 skb_orphan(skb);
2571
2572 return hdev->send(skb);
2573}
2574
Johan Hedberg3119ae92013-03-05 20:37:44 +02002575void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2576{
2577 skb_queue_head_init(&req->cmd_q);
2578 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002579 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002580}
2581
2582int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2583{
2584 struct hci_dev *hdev = req->hdev;
2585 struct sk_buff *skb;
2586 unsigned long flags;
2587
2588 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2589
Andre Guedes5d73e032013-03-08 11:20:16 -03002590 /* If an error occured during request building, remove all HCI
2591 * commands queued on the HCI request queue.
2592 */
2593 if (req->err) {
2594 skb_queue_purge(&req->cmd_q);
2595 return req->err;
2596 }
2597
Johan Hedberg3119ae92013-03-05 20:37:44 +02002598 /* Do not allow empty requests */
2599 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002600 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002601
2602 skb = skb_peek_tail(&req->cmd_q);
2603 bt_cb(skb)->req.complete = complete;
2604
2605 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2606 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2607 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2608
2609 queue_work(hdev->workqueue, &hdev->cmd_work);
2610
2611 return 0;
2612}
2613
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002614static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002615 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616{
2617 int len = HCI_COMMAND_HDR_SIZE + plen;
2618 struct hci_command_hdr *hdr;
2619 struct sk_buff *skb;
2620
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002622 if (!skb)
2623 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
2625 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002626 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002627 hdr->plen = plen;
2628
2629 if (plen)
2630 memcpy(skb_put(skb, plen), param, plen);
2631
2632 BT_DBG("skb len %d", skb->len);
2633
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002634 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002636
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002637 return skb;
2638}
2639
2640/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002641int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2642 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002643{
2644 struct sk_buff *skb;
2645
2646 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2647
2648 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2649 if (!skb) {
2650 BT_ERR("%s no memory for command", hdev->name);
2651 return -ENOMEM;
2652 }
2653
Johan Hedberg11714b32013-03-05 20:37:47 +02002654 /* Stand-alone HCI commands must be flaged as
2655 * single-command requests.
2656 */
2657 bt_cb(skb)->req.start = true;
2658
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002660 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661
2662 return 0;
2663}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
Johan Hedberg71c76a12013-03-05 20:37:46 +02002665/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002666void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2667 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002668{
2669 struct hci_dev *hdev = req->hdev;
2670 struct sk_buff *skb;
2671
2672 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2673
Andre Guedes34739c12013-03-08 11:20:18 -03002674 /* If an error occured during request building, there is no point in
2675 * queueing the HCI command. We can simply return.
2676 */
2677 if (req->err)
2678 return;
2679
Johan Hedberg71c76a12013-03-05 20:37:46 +02002680 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2681 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002682 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2683 hdev->name, opcode);
2684 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002685 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002686 }
2687
2688 if (skb_queue_empty(&req->cmd_q))
2689 bt_cb(skb)->req.start = true;
2690
Johan Hedberg02350a72013-04-03 21:50:29 +03002691 bt_cb(skb)->req.event = event;
2692
Johan Hedberg71c76a12013-03-05 20:37:46 +02002693 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002694}
2695
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002696void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2697 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002698{
2699 hci_req_add_ev(req, opcode, plen, param, 0);
2700}
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002703void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704{
2705 struct hci_command_hdr *hdr;
2706
2707 if (!hdev->sent_cmd)
2708 return NULL;
2709
2710 hdr = (void *) hdev->sent_cmd->data;
2711
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002712 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 return NULL;
2714
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002715 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
2717 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2718}
2719
2720/* Send ACL data */
2721static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2722{
2723 struct hci_acl_hdr *hdr;
2724 int len = skb->len;
2725
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002726 skb_push(skb, HCI_ACL_HDR_SIZE);
2727 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002728 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002729 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2730 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731}
2732
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002733static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002734 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002736 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 struct hci_dev *hdev = conn->hdev;
2738 struct sk_buff *list;
2739
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002740 skb->len = skb_headlen(skb);
2741 skb->data_len = 0;
2742
2743 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002744
2745 switch (hdev->dev_type) {
2746 case HCI_BREDR:
2747 hci_add_acl_hdr(skb, conn->handle, flags);
2748 break;
2749 case HCI_AMP:
2750 hci_add_acl_hdr(skb, chan->handle, flags);
2751 break;
2752 default:
2753 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2754 return;
2755 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002756
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002757 list = skb_shinfo(skb)->frag_list;
2758 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 /* Non fragmented */
2760 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2761
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002762 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 } else {
2764 /* Fragmented */
2765 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2766
2767 skb_shinfo(skb)->frag_list = NULL;
2768
2769 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002770 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002772 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002773
2774 flags &= ~ACL_START;
2775 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 do {
2777 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002780 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002781 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782
2783 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2784
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002785 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 } while (list);
2787
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002788 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002790}
2791
2792void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2793{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002794 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002795
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002796 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002797
2798 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002799
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002800 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002802 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002806void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807{
2808 struct hci_dev *hdev = conn->hdev;
2809 struct hci_sco_hdr hdr;
2810
2811 BT_DBG("%s len %d", hdev->name, skb->len);
2812
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002813 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 hdr.dlen = skb->len;
2815
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002816 skb_push(skb, HCI_SCO_HDR_SIZE);
2817 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002818 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
2820 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002821 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002824 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
2827/* ---- HCI TX task (outgoing data) ---- */
2828
2829/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002830static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2831 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832{
2833 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002834 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002835 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002837 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002839
2840 rcu_read_lock();
2841
2842 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002843 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002845
2846 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2847 continue;
2848
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 num++;
2850
2851 if (c->sent < min) {
2852 min = c->sent;
2853 conn = c;
2854 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002855
2856 if (hci_conn_num(hdev, type) == num)
2857 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 }
2859
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002860 rcu_read_unlock();
2861
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002863 int cnt, q;
2864
2865 switch (conn->type) {
2866 case ACL_LINK:
2867 cnt = hdev->acl_cnt;
2868 break;
2869 case SCO_LINK:
2870 case ESCO_LINK:
2871 cnt = hdev->sco_cnt;
2872 break;
2873 case LE_LINK:
2874 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2875 break;
2876 default:
2877 cnt = 0;
2878 BT_ERR("Unknown link type");
2879 }
2880
2881 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 *quote = q ? q : 1;
2883 } else
2884 *quote = 0;
2885
2886 BT_DBG("conn %p quote %d", conn, *quote);
2887 return conn;
2888}
2889
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002890static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891{
2892 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002893 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894
Ville Tervobae1f5d92011-02-10 22:38:53 -03002895 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002897 rcu_read_lock();
2898
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002900 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002901 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002902 BT_ERR("%s killing stalled connection %pMR",
2903 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002904 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 }
2906 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002907
2908 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909}
2910
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002911static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2912 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002913{
2914 struct hci_conn_hash *h = &hdev->conn_hash;
2915 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002916 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002917 struct hci_conn *conn;
2918 int cnt, q, conn_num = 0;
2919
2920 BT_DBG("%s", hdev->name);
2921
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002922 rcu_read_lock();
2923
2924 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002925 struct hci_chan *tmp;
2926
2927 if (conn->type != type)
2928 continue;
2929
2930 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2931 continue;
2932
2933 conn_num++;
2934
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002935 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002936 struct sk_buff *skb;
2937
2938 if (skb_queue_empty(&tmp->data_q))
2939 continue;
2940
2941 skb = skb_peek(&tmp->data_q);
2942 if (skb->priority < cur_prio)
2943 continue;
2944
2945 if (skb->priority > cur_prio) {
2946 num = 0;
2947 min = ~0;
2948 cur_prio = skb->priority;
2949 }
2950
2951 num++;
2952
2953 if (conn->sent < min) {
2954 min = conn->sent;
2955 chan = tmp;
2956 }
2957 }
2958
2959 if (hci_conn_num(hdev, type) == conn_num)
2960 break;
2961 }
2962
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002963 rcu_read_unlock();
2964
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002965 if (!chan)
2966 return NULL;
2967
2968 switch (chan->conn->type) {
2969 case ACL_LINK:
2970 cnt = hdev->acl_cnt;
2971 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002972 case AMP_LINK:
2973 cnt = hdev->block_cnt;
2974 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002975 case SCO_LINK:
2976 case ESCO_LINK:
2977 cnt = hdev->sco_cnt;
2978 break;
2979 case LE_LINK:
2980 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2981 break;
2982 default:
2983 cnt = 0;
2984 BT_ERR("Unknown link type");
2985 }
2986
2987 q = cnt / num;
2988 *quote = q ? q : 1;
2989 BT_DBG("chan %p quote %d", chan, *quote);
2990 return chan;
2991}
2992
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002993static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2994{
2995 struct hci_conn_hash *h = &hdev->conn_hash;
2996 struct hci_conn *conn;
2997 int num = 0;
2998
2999 BT_DBG("%s", hdev->name);
3000
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003001 rcu_read_lock();
3002
3003 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003004 struct hci_chan *chan;
3005
3006 if (conn->type != type)
3007 continue;
3008
3009 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3010 continue;
3011
3012 num++;
3013
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003014 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003015 struct sk_buff *skb;
3016
3017 if (chan->sent) {
3018 chan->sent = 0;
3019 continue;
3020 }
3021
3022 if (skb_queue_empty(&chan->data_q))
3023 continue;
3024
3025 skb = skb_peek(&chan->data_q);
3026 if (skb->priority >= HCI_PRIO_MAX - 1)
3027 continue;
3028
3029 skb->priority = HCI_PRIO_MAX - 1;
3030
3031 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003032 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003033 }
3034
3035 if (hci_conn_num(hdev, type) == num)
3036 break;
3037 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003038
3039 rcu_read_unlock();
3040
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003041}
3042
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003043static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3044{
3045 /* Calculate count of blocks used by this packet */
3046 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3047}
3048
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003049static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003050{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 if (!test_bit(HCI_RAW, &hdev->flags)) {
3052 /* ACL tx timeout must be longer than maximum
3053 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003054 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003055 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003056 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003058}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003060static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003061{
3062 unsigned int cnt = hdev->acl_cnt;
3063 struct hci_chan *chan;
3064 struct sk_buff *skb;
3065 int quote;
3066
3067 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003068
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003069 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003070 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003071 u32 priority = (skb_peek(&chan->data_q))->priority;
3072 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003073 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003074 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003075
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003076 /* Stop if priority has changed */
3077 if (skb->priority < priority)
3078 break;
3079
3080 skb = skb_dequeue(&chan->data_q);
3081
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003082 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003083 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003084
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 hci_send_frame(skb);
3086 hdev->acl_last_tx = jiffies;
3087
3088 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003089 chan->sent++;
3090 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003091 }
3092 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003093
3094 if (cnt != hdev->acl_cnt)
3095 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003096}
3097
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003098static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003099{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003100 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003101 struct hci_chan *chan;
3102 struct sk_buff *skb;
3103 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003104 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003105
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003106 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003107
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003108 BT_DBG("%s", hdev->name);
3109
3110 if (hdev->dev_type == HCI_AMP)
3111 type = AMP_LINK;
3112 else
3113 type = ACL_LINK;
3114
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003115 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003116 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003117 u32 priority = (skb_peek(&chan->data_q))->priority;
3118 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3119 int blocks;
3120
3121 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003122 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003123
3124 /* Stop if priority has changed */
3125 if (skb->priority < priority)
3126 break;
3127
3128 skb = skb_dequeue(&chan->data_q);
3129
3130 blocks = __get_blocks(hdev, skb);
3131 if (blocks > hdev->block_cnt)
3132 return;
3133
3134 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003135 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003136
3137 hci_send_frame(skb);
3138 hdev->acl_last_tx = jiffies;
3139
3140 hdev->block_cnt -= blocks;
3141 quote -= blocks;
3142
3143 chan->sent += blocks;
3144 chan->conn->sent += blocks;
3145 }
3146 }
3147
3148 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003149 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003150}
3151
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003152static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003153{
3154 BT_DBG("%s", hdev->name);
3155
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003156 /* No ACL link over BR/EDR controller */
3157 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3158 return;
3159
3160 /* No AMP link over AMP controller */
3161 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003162 return;
3163
3164 switch (hdev->flow_ctl_mode) {
3165 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3166 hci_sched_acl_pkt(hdev);
3167 break;
3168
3169 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3170 hci_sched_acl_blk(hdev);
3171 break;
3172 }
3173}
3174
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003176static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177{
3178 struct hci_conn *conn;
3179 struct sk_buff *skb;
3180 int quote;
3181
3182 BT_DBG("%s", hdev->name);
3183
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003184 if (!hci_conn_num(hdev, SCO_LINK))
3185 return;
3186
Linus Torvalds1da177e2005-04-16 15:20:36 -07003187 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3188 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3189 BT_DBG("skb %p len %d", skb, skb->len);
3190 hci_send_frame(skb);
3191
3192 conn->sent++;
3193 if (conn->sent == ~0)
3194 conn->sent = 0;
3195 }
3196 }
3197}
3198
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003199static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003200{
3201 struct hci_conn *conn;
3202 struct sk_buff *skb;
3203 int quote;
3204
3205 BT_DBG("%s", hdev->name);
3206
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003207 if (!hci_conn_num(hdev, ESCO_LINK))
3208 return;
3209
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003210 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3211 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003212 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3213 BT_DBG("skb %p len %d", skb, skb->len);
3214 hci_send_frame(skb);
3215
3216 conn->sent++;
3217 if (conn->sent == ~0)
3218 conn->sent = 0;
3219 }
3220 }
3221}
3222
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003223static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003224{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003225 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003226 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003227 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003228
3229 BT_DBG("%s", hdev->name);
3230
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003231 if (!hci_conn_num(hdev, LE_LINK))
3232 return;
3233
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003234 if (!test_bit(HCI_RAW, &hdev->flags)) {
3235 /* LE tx timeout must be longer than maximum
3236 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003237 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003238 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003239 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003240 }
3241
3242 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003243 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003244 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003245 u32 priority = (skb_peek(&chan->data_q))->priority;
3246 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003247 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003248 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003249
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003250 /* Stop if priority has changed */
3251 if (skb->priority < priority)
3252 break;
3253
3254 skb = skb_dequeue(&chan->data_q);
3255
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003256 hci_send_frame(skb);
3257 hdev->le_last_tx = jiffies;
3258
3259 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003260 chan->sent++;
3261 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003262 }
3263 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003264
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003265 if (hdev->le_pkts)
3266 hdev->le_cnt = cnt;
3267 else
3268 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003269
3270 if (cnt != tmp)
3271 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003272}
3273
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003274static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003276 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003277 struct sk_buff *skb;
3278
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003279 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003280 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003281
3282 /* Schedule queues and send stuff to HCI driver */
3283
3284 hci_sched_acl(hdev);
3285
3286 hci_sched_sco(hdev);
3287
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003288 hci_sched_esco(hdev);
3289
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003290 hci_sched_le(hdev);
3291
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292 /* Send next queued raw (unknown type) packet */
3293 while ((skb = skb_dequeue(&hdev->raw_q)))
3294 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295}
3296
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003297/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003298
3299/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003300static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003301{
3302 struct hci_acl_hdr *hdr = (void *) skb->data;
3303 struct hci_conn *conn;
3304 __u16 handle, flags;
3305
3306 skb_pull(skb, HCI_ACL_HDR_SIZE);
3307
3308 handle = __le16_to_cpu(hdr->handle);
3309 flags = hci_flags(handle);
3310 handle = hci_handle(handle);
3311
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003312 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003313 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314
3315 hdev->stat.acl_rx++;
3316
3317 hci_dev_lock(hdev);
3318 conn = hci_conn_hash_lookup_handle(hdev, handle);
3319 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003320
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003322 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003323
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003325 l2cap_recv_acldata(conn, skb, flags);
3326 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003328 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003329 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003330 }
3331
3332 kfree_skb(skb);
3333}
3334
3335/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003336static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337{
3338 struct hci_sco_hdr *hdr = (void *) skb->data;
3339 struct hci_conn *conn;
3340 __u16 handle;
3341
3342 skb_pull(skb, HCI_SCO_HDR_SIZE);
3343
3344 handle = __le16_to_cpu(hdr->handle);
3345
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003346 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
3348 hdev->stat.sco_rx++;
3349
3350 hci_dev_lock(hdev);
3351 conn = hci_conn_hash_lookup_handle(hdev, handle);
3352 hci_dev_unlock(hdev);
3353
3354 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003356 sco_recv_scodata(conn, skb);
3357 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003358 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003359 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003360 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 }
3362
3363 kfree_skb(skb);
3364}
3365
Johan Hedberg9238f362013-03-05 20:37:48 +02003366static bool hci_req_is_complete(struct hci_dev *hdev)
3367{
3368 struct sk_buff *skb;
3369
3370 skb = skb_peek(&hdev->cmd_q);
3371 if (!skb)
3372 return true;
3373
3374 return bt_cb(skb)->req.start;
3375}
3376
Johan Hedberg42c6b122013-03-05 20:37:49 +02003377static void hci_resend_last(struct hci_dev *hdev)
3378{
3379 struct hci_command_hdr *sent;
3380 struct sk_buff *skb;
3381 u16 opcode;
3382
3383 if (!hdev->sent_cmd)
3384 return;
3385
3386 sent = (void *) hdev->sent_cmd->data;
3387 opcode = __le16_to_cpu(sent->opcode);
3388 if (opcode == HCI_OP_RESET)
3389 return;
3390
3391 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3392 if (!skb)
3393 return;
3394
3395 skb_queue_head(&hdev->cmd_q, skb);
3396 queue_work(hdev->workqueue, &hdev->cmd_work);
3397}
3398
Johan Hedberg9238f362013-03-05 20:37:48 +02003399void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3400{
3401 hci_req_complete_t req_complete = NULL;
3402 struct sk_buff *skb;
3403 unsigned long flags;
3404
3405 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3406
Johan Hedberg42c6b122013-03-05 20:37:49 +02003407 /* If the completed command doesn't match the last one that was
3408 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003409 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003410 if (!hci_sent_cmd_data(hdev, opcode)) {
3411 /* Some CSR based controllers generate a spontaneous
3412 * reset complete event during init and any pending
3413 * command will never be completed. In such a case we
3414 * need to resend whatever was the last sent
3415 * command.
3416 */
3417 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3418 hci_resend_last(hdev);
3419
Johan Hedberg9238f362013-03-05 20:37:48 +02003420 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003421 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003422
3423 /* If the command succeeded and there's still more commands in
3424 * this request the request is not yet complete.
3425 */
3426 if (!status && !hci_req_is_complete(hdev))
3427 return;
3428
3429 /* If this was the last command in a request the complete
3430 * callback would be found in hdev->sent_cmd instead of the
3431 * command queue (hdev->cmd_q).
3432 */
3433 if (hdev->sent_cmd) {
3434 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3435 if (req_complete)
3436 goto call_complete;
3437 }
3438
3439 /* Remove all pending commands belonging to this request */
3440 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3441 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3442 if (bt_cb(skb)->req.start) {
3443 __skb_queue_head(&hdev->cmd_q, skb);
3444 break;
3445 }
3446
3447 req_complete = bt_cb(skb)->req.complete;
3448 kfree_skb(skb);
3449 }
3450 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3451
3452call_complete:
3453 if (req_complete)
3454 req_complete(hdev, status);
3455}
3456
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003457static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003459 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003460 struct sk_buff *skb;
3461
3462 BT_DBG("%s", hdev->name);
3463
Linus Torvalds1da177e2005-04-16 15:20:36 -07003464 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003465 /* Send copy to monitor */
3466 hci_send_to_monitor(hdev, skb);
3467
Linus Torvalds1da177e2005-04-16 15:20:36 -07003468 if (atomic_read(&hdev->promisc)) {
3469 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003470 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 }
3472
3473 if (test_bit(HCI_RAW, &hdev->flags)) {
3474 kfree_skb(skb);
3475 continue;
3476 }
3477
3478 if (test_bit(HCI_INIT, &hdev->flags)) {
3479 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003480 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481 case HCI_ACLDATA_PKT:
3482 case HCI_SCODATA_PKT:
3483 kfree_skb(skb);
3484 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 }
3487
3488 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003489 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003490 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003491 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003492 hci_event_packet(hdev, skb);
3493 break;
3494
3495 case HCI_ACLDATA_PKT:
3496 BT_DBG("%s ACL data packet", hdev->name);
3497 hci_acldata_packet(hdev, skb);
3498 break;
3499
3500 case HCI_SCODATA_PKT:
3501 BT_DBG("%s SCO data packet", hdev->name);
3502 hci_scodata_packet(hdev, skb);
3503 break;
3504
3505 default:
3506 kfree_skb(skb);
3507 break;
3508 }
3509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510}
3511
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003512static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003514 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003515 struct sk_buff *skb;
3516
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003517 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3518 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003519
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003521 if (atomic_read(&hdev->cmd_cnt)) {
3522 skb = skb_dequeue(&hdev->cmd_q);
3523 if (!skb)
3524 return;
3525
Wei Yongjun7585b972009-02-25 18:29:52 +08003526 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003528 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3529 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 atomic_dec(&hdev->cmd_cnt);
3531 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003532 if (test_bit(HCI_RESET, &hdev->flags))
3533 del_timer(&hdev->cmd_timer);
3534 else
3535 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003536 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003537 } else {
3538 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003539 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003540 }
3541 }
3542}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003543
3544int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3545{
3546 /* General inquiry access code (GIAC) */
3547 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3548 struct hci_cp_inquiry cp;
3549
3550 BT_DBG("%s", hdev->name);
3551
3552 if (test_bit(HCI_INQUIRY, &hdev->flags))
3553 return -EINPROGRESS;
3554
Johan Hedberg46632622012-01-02 16:06:08 +02003555 inquiry_cache_flush(hdev);
3556
Andre Guedes2519a1f2011-11-07 11:45:24 -03003557 memset(&cp, 0, sizeof(cp));
3558 memcpy(&cp.lap, lap, sizeof(cp.lap));
3559 cp.length = length;
3560
3561 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3562}
Andre Guedes023d50492011-11-04 14:16:52 -03003563
3564int hci_cancel_inquiry(struct hci_dev *hdev)
3565{
3566 BT_DBG("%s", hdev->name);
3567
3568 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003569 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003570
3571 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3572}
Andre Guedes31f79562012-04-24 21:02:53 -03003573
3574u8 bdaddr_to_le(u8 bdaddr_type)
3575{
3576 switch (bdaddr_type) {
3577 case BDADDR_LE_PUBLIC:
3578 return ADDR_LE_DEV_PUBLIC;
3579
3580 default:
3581 /* Fallback to LE Random address type */
3582 return ADDR_LE_DEV_RANDOM;
3583 }
3584}