blob: 215db0801a6570ad14bb5db3379ad41e468610db [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Johan Hedberg7b1abbb2013-04-03 21:54:47 +030082struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030083{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
Johan Hedberg75e84b72013-04-02 13:35:04 +0300112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300137 const void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300147 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
Johan Hedberg07dc93d2013-04-19 10:14:51 +0300191 const void *param, u32 timeout)
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200198static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199 void (*func)(struct hci_request *req,
200 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200201 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_init(&req, hdev);
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 hdev->req_status = HCI_REQ_PEND;
212
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200214
Johan Hedberg42c6b122013-03-05 20:37:49 +0200215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200217 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223 */
Andre Guedes920c8302013-03-08 11:20:15 -0300224 if (err == -ENODATA)
225 return 0;
226
227 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200228 }
229
Andre Guedesbc4445c2013-03-08 11:20:13 -0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700242 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Johan Hedberga5040ef2011-01-10 13:28:59 +0200254 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
Johan Hedberg01178cd2013-03-05 20:37:41 +0200261static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 void (*req)(struct hci_request *req,
263 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200264 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 int ret;
267
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* Serialize all requests */
272 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200273 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200281 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200295 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200297
298 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200305
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200306 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300308
309 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300311
312 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200314}
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200317{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 struct hci_dev *hdev = req->hdev;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200319
320 BT_DBG("%s %ld", hdev->name, opt);
321
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300322 /* Reset */
323 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200324 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300325
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200326 switch (hdev->dev_type) {
327 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200328 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200329 break;
330
331 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200333 break;
334
335 default:
336 BT_ERR("Unknown device type %d", hdev->dev_type);
337 break;
338 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200339}
340
Johan Hedberg42c6b122013-03-05 20:37:49 +0200341static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200342{
343 struct hci_cp_delete_stored_link_key cp;
344 __le16 param;
345 __u8 flt_type;
346
347 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200349
350 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200351 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200352
353 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200354 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200355
356 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200357 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200358
359 /* Clear Event Filters */
360 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362
363 /* Connection accept timeout ~20 secs */
364 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200365 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200366
367 bacpy(&cp.bdaddr, BDADDR_ANY);
368 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200369 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500370
371 /* Read page scan parameters */
372 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
373 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
374 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
375 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200376}
377
Johan Hedberg42c6b122013-03-05 20:37:49 +0200378static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200379{
380 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200384 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200385
386 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200387 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200388
389 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200390 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200391
392 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200393 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200394}
395
396static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
397{
398 if (lmp_ext_inq_capable(hdev))
399 return 0x02;
400
401 if (lmp_inq_rssi_capable(hdev))
402 return 0x01;
403
404 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
405 hdev->lmp_subver == 0x0757)
406 return 0x01;
407
408 if (hdev->manufacturer == 15) {
409 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
410 return 0x01;
411 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
412 return 0x01;
413 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
414 return 0x01;
415 }
416
417 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
418 hdev->lmp_subver == 0x1805)
419 return 0x01;
420
421 return 0x00;
422}
423
Johan Hedberg42c6b122013-03-05 20:37:49 +0200424static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200425{
426 u8 mode;
427
Johan Hedberg42c6b122013-03-05 20:37:49 +0200428 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200429
Johan Hedberg42c6b122013-03-05 20:37:49 +0200430 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200431}
432
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 struct hci_dev *hdev = req->hdev;
436
Johan Hedberg2177bab2013-03-05 20:37:43 +0200437 /* The second byte is 0xff instead of 0x9f (two reserved bits
438 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
439 * command otherwise.
440 */
441 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
442
443 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
444 * any event mask for pre 1.2 devices.
445 */
446 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
447 return;
448
449 if (lmp_bredr_capable(hdev)) {
450 events[4] |= 0x01; /* Flow Specification Complete */
451 events[4] |= 0x02; /* Inquiry Result with RSSI */
452 events[4] |= 0x04; /* Read Remote Extended Features Complete */
453 events[5] |= 0x08; /* Synchronous Connection Complete */
454 events[5] |= 0x10; /* Synchronous Connection Changed */
455 }
456
457 if (lmp_inq_rssi_capable(hdev))
458 events[4] |= 0x02; /* Inquiry Result with RSSI */
459
460 if (lmp_sniffsubr_capable(hdev))
461 events[5] |= 0x20; /* Sniff Subrating */
462
463 if (lmp_pause_enc_capable(hdev))
464 events[5] |= 0x80; /* Encryption Key Refresh Complete */
465
466 if (lmp_ext_inq_capable(hdev))
467 events[5] |= 0x40; /* Extended Inquiry Result */
468
469 if (lmp_no_flush_capable(hdev))
470 events[7] |= 0x01; /* Enhanced Flush Complete */
471
472 if (lmp_lsto_capable(hdev))
473 events[6] |= 0x80; /* Link Supervision Timeout Changed */
474
475 if (lmp_ssp_capable(hdev)) {
476 events[6] |= 0x01; /* IO Capability Request */
477 events[6] |= 0x02; /* IO Capability Response */
478 events[6] |= 0x04; /* User Confirmation Request */
479 events[6] |= 0x08; /* User Passkey Request */
480 events[6] |= 0x10; /* Remote OOB Data Request */
481 events[6] |= 0x20; /* Simple Pairing Complete */
482 events[7] |= 0x04; /* User Passkey Notification */
483 events[7] |= 0x08; /* Keypress Notification */
484 events[7] |= 0x10; /* Remote Host Supported
485 * Features Notification
486 */
487 }
488
489 if (lmp_le_capable(hdev))
490 events[7] |= 0x20; /* LE Meta-Event */
491
Johan Hedberg42c6b122013-03-05 20:37:49 +0200492 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200493
494 if (lmp_le_capable(hdev)) {
495 memset(events, 0, sizeof(events));
496 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200497 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
498 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200499 }
500}
501
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200503{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200504 struct hci_dev *hdev = req->hdev;
505
Johan Hedberg2177bab2013-03-05 20:37:43 +0200506 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200507 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200508
509 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200510 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200511
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
514 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200515 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200516
517 if (lmp_ssp_capable(hdev)) {
518 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
519 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200520 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
521 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200522 } else {
523 struct hci_cp_write_eir cp;
524
525 memset(hdev->eir, 0, sizeof(hdev->eir));
526 memset(&cp, 0, sizeof(cp));
527
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200529 }
530 }
531
532 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200534
535 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200537
538 if (lmp_ext_feat_capable(hdev)) {
539 struct hci_cp_read_local_ext_features cp;
540
541 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200542 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
543 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200544 }
545
546 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
547 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
549 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200550 }
551}
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200556 struct hci_cp_write_def_link_policy cp;
557 u16 link_policy = 0;
558
559 if (lmp_rswitch_capable(hdev))
560 link_policy |= HCI_LP_RSWITCH;
561 if (lmp_hold_capable(hdev))
562 link_policy |= HCI_LP_HOLD;
563 if (lmp_sniff_capable(hdev))
564 link_policy |= HCI_LP_SNIFF;
565 if (lmp_park_capable(hdev))
566 link_policy |= HCI_LP_PARK;
567
568 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200569 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570}
571
Johan Hedberg42c6b122013-03-05 20:37:49 +0200572static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200573{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200574 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200575 struct hci_cp_write_le_host_supported cp;
576
577 memset(&cp, 0, sizeof(cp));
578
579 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
580 cp.le = 0x01;
581 cp.simul = lmp_le_br_capable(hdev);
582 }
583
584 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200585 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
586 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200587}
588
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200591 struct hci_dev *hdev = req->hdev;
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300592 u8 p;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200593
Johan Hedberg2177bab2013-03-05 20:37:43 +0200594 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200595 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200596
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500597 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200598 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500599 hci_update_ad(req);
600 }
Johan Hedbergd2c5d772013-04-17 15:00:52 +0300601
602 /* Read features beyond page 1 if available */
603 for (p = 2; p < HCI_MAX_PAGES && p <= hdev->max_page; p++) {
604 struct hci_cp_read_local_ext_features cp;
605
606 cp.page = p;
607 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
608 sizeof(cp), &cp);
609 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610}
611
612static int __hci_init(struct hci_dev *hdev)
613{
614 int err;
615
616 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
617 if (err < 0)
618 return err;
619
620 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
621 * BR/EDR/LE type controllers. AMP controllers only need the
622 * first stage init.
623 */
624 if (hdev->dev_type != HCI_BREDR)
625 return 0;
626
627 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
628 if (err < 0)
629 return err;
630
631 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
632}
633
Johan Hedberg42c6b122013-03-05 20:37:49 +0200634static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635{
636 __u8 scan = opt;
637
Johan Hedberg42c6b122013-03-05 20:37:49 +0200638 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200641 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700642}
643
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 __u8 auth = opt;
647
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 __u8 encrypt = opt;
657
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200660 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200665{
666 __le16 policy = cpu_to_le16(opt);
667
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200669
670 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200672}
673
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900674/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 * Device is held on return. */
676struct hci_dev *hci_dev_get(int index)
677{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200678 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679
680 BT_DBG("%d", index);
681
682 if (index < 0)
683 return NULL;
684
685 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200686 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (d->id == index) {
688 hdev = hci_dev_hold(d);
689 break;
690 }
691 }
692 read_unlock(&hci_dev_list_lock);
693 return hdev;
694}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
696/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200697
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200698bool hci_discovery_active(struct hci_dev *hdev)
699{
700 struct discovery_state *discov = &hdev->discovery;
701
Andre Guedes6fbe1952012-02-03 17:47:58 -0300702 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300703 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300704 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200705 return true;
706
Andre Guedes6fbe1952012-02-03 17:47:58 -0300707 default:
708 return false;
709 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200710}
711
Johan Hedbergff9ef572012-01-04 14:23:45 +0200712void hci_discovery_set_state(struct hci_dev *hdev, int state)
713{
714 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
715
716 if (hdev->discovery.state == state)
717 return;
718
719 switch (state) {
720 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300721 if (hdev->discovery.state != DISCOVERY_STARTING)
722 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200723 break;
724 case DISCOVERY_STARTING:
725 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300726 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200727 mgmt_discovering(hdev, 1);
728 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200729 case DISCOVERY_RESOLVING:
730 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200731 case DISCOVERY_STOPPING:
732 break;
733 }
734
735 hdev->discovery.state = state;
736}
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738static void inquiry_cache_flush(struct hci_dev *hdev)
739{
Johan Hedberg30883512012-01-04 14:16:21 +0200740 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200741 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
Johan Hedberg561aafb2012-01-04 13:31:59 +0200743 list_for_each_entry_safe(p, n, &cache->all, all) {
744 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200745 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200747
748 INIT_LIST_HEAD(&cache->unknown);
749 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750}
751
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300752struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
753 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754{
Johan Hedberg30883512012-01-04 14:16:21 +0200755 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 struct inquiry_entry *e;
757
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300758 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
Johan Hedberg561aafb2012-01-04 13:31:59 +0200760 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200762 return e;
763 }
764
765 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766}
767
Johan Hedberg561aafb2012-01-04 13:31:59 +0200768struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300769 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200770{
Johan Hedberg30883512012-01-04 14:16:21 +0200771 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200772 struct inquiry_entry *e;
773
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300774 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200775
776 list_for_each_entry(e, &cache->unknown, list) {
777 if (!bacmp(&e->data.bdaddr, bdaddr))
778 return e;
779 }
780
781 return NULL;
782}
783
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200784struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300785 bdaddr_t *bdaddr,
786 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200787{
788 struct discovery_state *cache = &hdev->discovery;
789 struct inquiry_entry *e;
790
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300791 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200792
793 list_for_each_entry(e, &cache->resolve, list) {
794 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
795 return e;
796 if (!bacmp(&e->data.bdaddr, bdaddr))
797 return e;
798 }
799
800 return NULL;
801}
802
Johan Hedberga3d4e202012-01-09 00:53:02 +0200803void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300804 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200805{
806 struct discovery_state *cache = &hdev->discovery;
807 struct list_head *pos = &cache->resolve;
808 struct inquiry_entry *p;
809
810 list_del(&ie->list);
811
812 list_for_each_entry(p, &cache->resolve, list) {
813 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300814 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200815 break;
816 pos = &p->list;
817 }
818
819 list_add(&ie->list, pos);
820}
821
Johan Hedberg31754052012-01-04 13:39:52 +0200822bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300823 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824{
Johan Hedberg30883512012-01-04 14:16:21 +0200825 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200826 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300828 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829
Szymon Janc2b2fec42012-11-20 11:38:54 +0100830 hci_remove_remote_oob_data(hdev, &data->bdaddr);
831
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200832 if (ssp)
833 *ssp = data->ssp_mode;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200836 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200837 if (ie->data.ssp_mode && ssp)
838 *ssp = true;
839
Johan Hedberga3d4e202012-01-09 00:53:02 +0200840 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300841 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200842 ie->data.rssi = data->rssi;
843 hci_inquiry_cache_update_resolve(hdev, ie);
844 }
845
Johan Hedberg561aafb2012-01-04 13:31:59 +0200846 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200847 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200848
Johan Hedberg561aafb2012-01-04 13:31:59 +0200849 /* Entry not in the cache. Add new one. */
850 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
851 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200852 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200853
854 list_add(&ie->all, &cache->all);
855
856 if (name_known) {
857 ie->name_state = NAME_KNOWN;
858 } else {
859 ie->name_state = NAME_NOT_KNOWN;
860 list_add(&ie->list, &cache->unknown);
861 }
862
863update:
864 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300865 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200866 ie->name_state = NAME_KNOWN;
867 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 }
869
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200870 memcpy(&ie->data, data, sizeof(*data));
871 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200873
874 if (ie->name_state == NAME_NOT_KNOWN)
875 return false;
876
877 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878}
879
880static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
881{
Johan Hedberg30883512012-01-04 14:16:21 +0200882 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 struct inquiry_info *info = (struct inquiry_info *) buf;
884 struct inquiry_entry *e;
885 int copied = 0;
886
Johan Hedberg561aafb2012-01-04 13:31:59 +0200887 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200889
890 if (copied >= num)
891 break;
892
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 bacpy(&info->bdaddr, &data->bdaddr);
894 info->pscan_rep_mode = data->pscan_rep_mode;
895 info->pscan_period_mode = data->pscan_period_mode;
896 info->pscan_mode = data->pscan_mode;
897 memcpy(info->dev_class, data->dev_class, 3);
898 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200899
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200901 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 }
903
904 BT_DBG("cache %p, copied %d", cache, copied);
905 return copied;
906}
907
Johan Hedberg42c6b122013-03-05 20:37:49 +0200908static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
910 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200911 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 struct hci_cp_inquiry cp;
913
914 BT_DBG("%s", hdev->name);
915
916 if (test_bit(HCI_INQUIRY, &hdev->flags))
917 return;
918
919 /* Start Inquiry */
920 memcpy(&cp.lap, &ir->lap, 3);
921 cp.length = ir->length;
922 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200923 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924}
925
Andre Guedes3e13fa12013-03-27 20:04:56 -0300926static int wait_inquiry(void *word)
927{
928 schedule();
929 return signal_pending(current);
930}
931
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932int hci_inquiry(void __user *arg)
933{
934 __u8 __user *ptr = arg;
935 struct hci_inquiry_req ir;
936 struct hci_dev *hdev;
937 int err = 0, do_inquiry = 0, max_rsp;
938 long timeo;
939 __u8 *buf;
940
941 if (copy_from_user(&ir, ptr, sizeof(ir)))
942 return -EFAULT;
943
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200944 hdev = hci_dev_get(ir.dev_id);
945 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 return -ENODEV;
947
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300948 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900949 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300950 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 inquiry_cache_flush(hdev);
952 do_inquiry = 1;
953 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300954 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955
Marcel Holtmann04837f62006-07-03 10:02:33 +0200956 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200957
958 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200959 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
960 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200961 if (err < 0)
962 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300963
964 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
965 * cleared). If it is interrupted by a signal, return -EINTR.
966 */
967 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
968 TASK_INTERRUPTIBLE))
969 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200970 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300972 /* for unlimited number of responses we will use buffer with
973 * 255 entries
974 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
976
977 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
978 * copy it to the user space.
979 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100980 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200981 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 err = -ENOMEM;
983 goto done;
984 }
985
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300986 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300988 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989
990 BT_DBG("num_rsp %d", ir.num_rsp);
991
992 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
993 ptr += sizeof(ir);
994 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300995 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900997 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 err = -EFAULT;
999
1000 kfree(buf);
1001
1002done:
1003 hci_dev_put(hdev);
1004 return err;
1005}
1006
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001007static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1008{
1009 u8 ad_len = 0, flags = 0;
1010 size_t name_len;
1011
1012 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1013 flags |= LE_AD_GENERAL;
1014
1015 if (!lmp_bredr_capable(hdev))
1016 flags |= LE_AD_NO_BREDR;
1017
1018 if (lmp_le_br_capable(hdev))
1019 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1020
1021 if (lmp_host_le_br_capable(hdev))
1022 flags |= LE_AD_SIM_LE_BREDR_HOST;
1023
1024 if (flags) {
1025 BT_DBG("adv flags 0x%02x", flags);
1026
1027 ptr[0] = 2;
1028 ptr[1] = EIR_FLAGS;
1029 ptr[2] = flags;
1030
1031 ad_len += 3;
1032 ptr += 3;
1033 }
1034
1035 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1036 ptr[0] = 2;
1037 ptr[1] = EIR_TX_POWER;
1038 ptr[2] = (u8) hdev->adv_tx_power;
1039
1040 ad_len += 3;
1041 ptr += 3;
1042 }
1043
1044 name_len = strlen(hdev->dev_name);
1045 if (name_len > 0) {
1046 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1047
1048 if (name_len > max_len) {
1049 name_len = max_len;
1050 ptr[1] = EIR_NAME_SHORT;
1051 } else
1052 ptr[1] = EIR_NAME_COMPLETE;
1053
1054 ptr[0] = name_len + 1;
1055
1056 memcpy(ptr + 2, hdev->dev_name, name_len);
1057
1058 ad_len += (name_len + 2);
1059 ptr += (name_len + 2);
1060 }
1061
1062 return ad_len;
1063}
1064
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001065void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001066{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001067 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001068 struct hci_cp_le_set_adv_data cp;
1069 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001070
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001071 if (!lmp_le_capable(hdev))
1072 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001073
1074 memset(&cp, 0, sizeof(cp));
1075
1076 len = create_ad(hdev, cp.data);
1077
1078 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001079 memcmp(cp.data, hdev->adv_data, len) == 0)
1080 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001081
1082 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1083 hdev->adv_data_len = len;
1084
1085 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001086
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001087 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001088}
1089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090/* ---- HCI ioctl helpers ---- */
1091
1092int hci_dev_open(__u16 dev)
1093{
1094 struct hci_dev *hdev;
1095 int ret = 0;
1096
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001097 hdev = hci_dev_get(dev);
1098 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 return -ENODEV;
1100
1101 BT_DBG("%s %p", hdev->name, hdev);
1102
1103 hci_req_lock(hdev);
1104
Johan Hovold94324962012-03-15 14:48:41 +01001105 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1106 ret = -ENODEV;
1107 goto done;
1108 }
1109
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001110 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1111 ret = -ERFKILL;
1112 goto done;
1113 }
1114
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115 if (test_bit(HCI_UP, &hdev->flags)) {
1116 ret = -EALREADY;
1117 goto done;
1118 }
1119
Linus Torvalds1da177e2005-04-16 15:20:36 -07001120 if (hdev->open(hdev)) {
1121 ret = -EIO;
1122 goto done;
1123 }
1124
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001125 atomic_set(&hdev->cmd_cnt, 1);
1126 set_bit(HCI_INIT, &hdev->flags);
1127
1128 if (hdev->setup && test_bit(HCI_SETUP, &hdev->dev_flags))
1129 ret = hdev->setup(hdev);
1130
1131 if (!ret) {
1132 /* Treat all non BR/EDR controllers as raw devices if
1133 * enable_hs is not set.
1134 */
1135 if (hdev->dev_type != HCI_BREDR && !enable_hs)
1136 set_bit(HCI_RAW, &hdev->flags);
1137
1138 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1139 set_bit(HCI_RAW, &hdev->flags);
1140
1141 if (!test_bit(HCI_RAW, &hdev->flags))
1142 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001143 }
1144
Marcel Holtmannf41c70c2012-11-12 14:02:14 +09001145 clear_bit(HCI_INIT, &hdev->flags);
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 if (!ret) {
1148 hci_dev_hold(hdev);
1149 set_bit(HCI_UP, &hdev->flags);
1150 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001151 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1152 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001153 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001154 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001155 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001156 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001157 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001159 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001160 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001161 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162
1163 skb_queue_purge(&hdev->cmd_q);
1164 skb_queue_purge(&hdev->rx_q);
1165
1166 if (hdev->flush)
1167 hdev->flush(hdev);
1168
1169 if (hdev->sent_cmd) {
1170 kfree_skb(hdev->sent_cmd);
1171 hdev->sent_cmd = NULL;
1172 }
1173
1174 hdev->close(hdev);
1175 hdev->flags = 0;
1176 }
1177
1178done:
1179 hci_req_unlock(hdev);
1180 hci_dev_put(hdev);
1181 return ret;
1182}
1183
1184static int hci_dev_do_close(struct hci_dev *hdev)
1185{
1186 BT_DBG("%s %p", hdev->name, hdev);
1187
Andre Guedes28b75a82012-02-03 17:48:00 -03001188 cancel_work_sync(&hdev->le_scan);
1189
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001190 cancel_delayed_work(&hdev->power_off);
1191
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 hci_req_cancel(hdev, ENODEV);
1193 hci_req_lock(hdev);
1194
1195 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001196 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197 hci_req_unlock(hdev);
1198 return 0;
1199 }
1200
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001201 /* Flush RX and TX works */
1202 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001203 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001205 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001206 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001207 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001208 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001209 }
1210
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001211 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001212 cancel_delayed_work(&hdev->service_cache);
1213
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001214 cancel_delayed_work_sync(&hdev->le_scan_disable);
1215
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001216 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001217 inquiry_cache_flush(hdev);
1218 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001219 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220
1221 hci_notify(hdev, HCI_DEV_DOWN);
1222
1223 if (hdev->flush)
1224 hdev->flush(hdev);
1225
1226 /* Reset device */
1227 skb_queue_purge(&hdev->cmd_q);
1228 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001229 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001230 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001232 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 clear_bit(HCI_INIT, &hdev->flags);
1234 }
1235
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001236 /* flush cmd work */
1237 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238
1239 /* Drop queues */
1240 skb_queue_purge(&hdev->rx_q);
1241 skb_queue_purge(&hdev->cmd_q);
1242 skb_queue_purge(&hdev->raw_q);
1243
1244 /* Drop last sent command */
1245 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001246 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001247 kfree_skb(hdev->sent_cmd);
1248 hdev->sent_cmd = NULL;
1249 }
1250
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001251 kfree_skb(hdev->recv_evt);
1252 hdev->recv_evt = NULL;
1253
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254 /* After this point our queues are empty
1255 * and no tasks are scheduled. */
1256 hdev->close(hdev);
1257
Johan Hedberg35b973c2013-03-15 17:06:59 -05001258 /* Clear flags */
1259 hdev->flags = 0;
1260 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1261
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001262 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1263 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001264 hci_dev_lock(hdev);
1265 mgmt_powered(hdev, 0);
1266 hci_dev_unlock(hdev);
1267 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001268
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001269 /* Controller radio is available but is currently powered down */
1270 hdev->amp_status = 0;
1271
Johan Hedberge59fda82012-02-22 18:11:53 +02001272 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001273 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001274
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275 hci_req_unlock(hdev);
1276
1277 hci_dev_put(hdev);
1278 return 0;
1279}
1280
1281int hci_dev_close(__u16 dev)
1282{
1283 struct hci_dev *hdev;
1284 int err;
1285
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001286 hdev = hci_dev_get(dev);
1287 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001289
1290 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1291 cancel_delayed_work(&hdev->power_off);
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001294
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 hci_dev_put(hdev);
1296 return err;
1297}
1298
1299int hci_dev_reset(__u16 dev)
1300{
1301 struct hci_dev *hdev;
1302 int ret = 0;
1303
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001304 hdev = hci_dev_get(dev);
1305 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306 return -ENODEV;
1307
1308 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309
1310 if (!test_bit(HCI_UP, &hdev->flags))
1311 goto done;
1312
1313 /* Drop queues */
1314 skb_queue_purge(&hdev->rx_q);
1315 skb_queue_purge(&hdev->cmd_q);
1316
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001317 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318 inquiry_cache_flush(hdev);
1319 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001320 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321
1322 if (hdev->flush)
1323 hdev->flush(hdev);
1324
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001325 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001326 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001327
1328 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001329 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 hci_req_unlock(hdev);
1333 hci_dev_put(hdev);
1334 return ret;
1335}
1336
1337int hci_dev_reset_stat(__u16 dev)
1338{
1339 struct hci_dev *hdev;
1340 int ret = 0;
1341
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001342 hdev = hci_dev_get(dev);
1343 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001344 return -ENODEV;
1345
1346 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1347
1348 hci_dev_put(hdev);
1349
1350 return ret;
1351}
1352
1353int hci_dev_cmd(unsigned int cmd, void __user *arg)
1354{
1355 struct hci_dev *hdev;
1356 struct hci_dev_req dr;
1357 int err = 0;
1358
1359 if (copy_from_user(&dr, arg, sizeof(dr)))
1360 return -EFAULT;
1361
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001362 hdev = hci_dev_get(dr.dev_id);
1363 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 return -ENODEV;
1365
1366 switch (cmd) {
1367 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001368 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1369 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001370 break;
1371
1372 case HCISETENCRYPT:
1373 if (!lmp_encrypt_capable(hdev)) {
1374 err = -EOPNOTSUPP;
1375 break;
1376 }
1377
1378 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1379 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001380 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1381 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 if (err)
1383 break;
1384 }
1385
Johan Hedberg01178cd2013-03-05 20:37:41 +02001386 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1387 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388 break;
1389
1390 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001391 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1392 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 break;
1394
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001395 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001396 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1397 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001398 break;
1399
1400 case HCISETLINKMODE:
1401 hdev->link_mode = ((__u16) dr.dev_opt) &
1402 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1403 break;
1404
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 case HCISETPTYPE:
1406 hdev->pkt_type = (__u16) dr.dev_opt;
1407 break;
1408
Linus Torvalds1da177e2005-04-16 15:20:36 -07001409 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001410 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1411 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 break;
1413
1414 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001415 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1416 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 break;
1418
1419 default:
1420 err = -EINVAL;
1421 break;
1422 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001423
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424 hci_dev_put(hdev);
1425 return err;
1426}
1427
1428int hci_get_dev_list(void __user *arg)
1429{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001430 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001431 struct hci_dev_list_req *dl;
1432 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001433 int n = 0, size, err;
1434 __u16 dev_num;
1435
1436 if (get_user(dev_num, (__u16 __user *) arg))
1437 return -EFAULT;
1438
1439 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1440 return -EINVAL;
1441
1442 size = sizeof(*dl) + dev_num * sizeof(*dr);
1443
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001444 dl = kzalloc(size, GFP_KERNEL);
1445 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446 return -ENOMEM;
1447
1448 dr = dl->dev_req;
1449
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001450 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001451 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001452 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001453 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001454
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001455 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1456 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001457
Linus Torvalds1da177e2005-04-16 15:20:36 -07001458 (dr + n)->dev_id = hdev->id;
1459 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 if (++n >= dev_num)
1462 break;
1463 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001464 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001465
1466 dl->dev_num = n;
1467 size = sizeof(*dl) + n * sizeof(*dr);
1468
1469 err = copy_to_user(arg, dl, size);
1470 kfree(dl);
1471
1472 return err ? -EFAULT : 0;
1473}
1474
1475int hci_get_dev_info(void __user *arg)
1476{
1477 struct hci_dev *hdev;
1478 struct hci_dev_info di;
1479 int err = 0;
1480
1481 if (copy_from_user(&di, arg, sizeof(di)))
1482 return -EFAULT;
1483
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001484 hdev = hci_dev_get(di.dev_id);
1485 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001486 return -ENODEV;
1487
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001488 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001489 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001490
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001491 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1492 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001493
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 strcpy(di.name, hdev->name);
1495 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001496 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 di.flags = hdev->flags;
1498 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001499 if (lmp_bredr_capable(hdev)) {
1500 di.acl_mtu = hdev->acl_mtu;
1501 di.acl_pkts = hdev->acl_pkts;
1502 di.sco_mtu = hdev->sco_mtu;
1503 di.sco_pkts = hdev->sco_pkts;
1504 } else {
1505 di.acl_mtu = hdev->le_mtu;
1506 di.acl_pkts = hdev->le_pkts;
1507 di.sco_mtu = 0;
1508 di.sco_pkts = 0;
1509 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001510 di.link_policy = hdev->link_policy;
1511 di.link_mode = hdev->link_mode;
1512
1513 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1514 memcpy(&di.features, &hdev->features, sizeof(di.features));
1515
1516 if (copy_to_user(arg, &di, sizeof(di)))
1517 err = -EFAULT;
1518
1519 hci_dev_put(hdev);
1520
1521 return err;
1522}
1523
1524/* ---- Interface to HCI drivers ---- */
1525
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001526static int hci_rfkill_set_block(void *data, bool blocked)
1527{
1528 struct hci_dev *hdev = data;
1529
1530 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1531
1532 if (!blocked)
1533 return 0;
1534
1535 hci_dev_do_close(hdev);
1536
1537 return 0;
1538}
1539
1540static const struct rfkill_ops hci_rfkill_ops = {
1541 .set_block = hci_rfkill_set_block,
1542};
1543
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001544static void hci_power_on(struct work_struct *work)
1545{
1546 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1547
1548 BT_DBG("%s", hdev->name);
1549
1550 if (hci_dev_open(hdev->id) < 0)
1551 return;
1552
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001553 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001554 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1555 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001556
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001557 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001558 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559}
1560
1561static void hci_power_off(struct work_struct *work)
1562{
Johan Hedberg32435532011-11-07 22:16:04 +02001563 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001564 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001565
1566 BT_DBG("%s", hdev->name);
1567
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001568 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001569}
1570
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001571static void hci_discov_off(struct work_struct *work)
1572{
1573 struct hci_dev *hdev;
1574 u8 scan = SCAN_PAGE;
1575
1576 hdev = container_of(work, struct hci_dev, discov_off.work);
1577
1578 BT_DBG("%s", hdev->name);
1579
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001580 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001581
1582 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1583
1584 hdev->discov_timeout = 0;
1585
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001586 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001587}
1588
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001589int hci_uuids_clear(struct hci_dev *hdev)
1590{
Johan Hedberg48210022013-01-27 00:31:28 +02001591 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001592
Johan Hedberg48210022013-01-27 00:31:28 +02001593 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1594 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001595 kfree(uuid);
1596 }
1597
1598 return 0;
1599}
1600
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001601int hci_link_keys_clear(struct hci_dev *hdev)
1602{
1603 struct list_head *p, *n;
1604
1605 list_for_each_safe(p, n, &hdev->link_keys) {
1606 struct link_key *key;
1607
1608 key = list_entry(p, struct link_key, list);
1609
1610 list_del(p);
1611 kfree(key);
1612 }
1613
1614 return 0;
1615}
1616
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001617int hci_smp_ltks_clear(struct hci_dev *hdev)
1618{
1619 struct smp_ltk *k, *tmp;
1620
1621 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1622 list_del(&k->list);
1623 kfree(k);
1624 }
1625
1626 return 0;
1627}
1628
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001629struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1630{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001631 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001632
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001633 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001634 if (bacmp(bdaddr, &k->bdaddr) == 0)
1635 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001636
1637 return NULL;
1638}
1639
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301640static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001641 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001642{
1643 /* Legacy key */
1644 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301645 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001646
1647 /* Debug keys are insecure so don't store them persistently */
1648 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301649 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001650
1651 /* Changed combination key and there's no previous one */
1652 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301653 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001654
1655 /* Security mode 3 case */
1656 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301657 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001658
1659 /* Neither local nor remote side had no-bonding as requirement */
1660 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301661 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001662
1663 /* Local side had dedicated bonding as requirement */
1664 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301665 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001666
1667 /* Remote side had dedicated bonding as requirement */
1668 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301669 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001670
1671 /* If none of the above criteria match, then don't store the key
1672 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301673 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001674}
1675
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001676struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001677{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001678 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001679
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001680 list_for_each_entry(k, &hdev->long_term_keys, list) {
1681 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001682 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001683 continue;
1684
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001685 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001686 }
1687
1688 return NULL;
1689}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001690
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001691struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001692 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001693{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001694 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001695
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001696 list_for_each_entry(k, &hdev->long_term_keys, list)
1697 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001698 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001699 return k;
1700
1701 return NULL;
1702}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001703
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001704int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001705 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001706{
1707 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301708 u8 old_key_type;
1709 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001710
1711 old_key = hci_find_link_key(hdev, bdaddr);
1712 if (old_key) {
1713 old_key_type = old_key->type;
1714 key = old_key;
1715 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001716 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001717 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1718 if (!key)
1719 return -ENOMEM;
1720 list_add(&key->list, &hdev->link_keys);
1721 }
1722
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001723 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001724
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001725 /* Some buggy controller combinations generate a changed
1726 * combination key for legacy pairing even when there's no
1727 * previous key */
1728 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001729 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001730 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001731 if (conn)
1732 conn->key_type = type;
1733 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001734
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001735 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001736 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001737 key->pin_len = pin_len;
1738
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001739 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001740 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001741 else
1742 key->type = type;
1743
Johan Hedberg4df378a2011-04-28 11:29:03 -07001744 if (!new_key)
1745 return 0;
1746
1747 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1748
Johan Hedberg744cf192011-11-08 20:40:14 +02001749 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001750
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301751 if (conn)
1752 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001753
1754 return 0;
1755}
1756
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001757int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001758 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001759 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001760{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001761 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001762
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001763 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1764 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001765
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001766 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1767 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001768 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001769 else {
1770 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001771 if (!key)
1772 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001773 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774 }
1775
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001776 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001777 key->bdaddr_type = addr_type;
1778 memcpy(key->val, tk, sizeof(key->val));
1779 key->authenticated = authenticated;
1780 key->ediv = ediv;
1781 key->enc_size = enc_size;
1782 key->type = type;
1783 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001784
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001785 if (!new_key)
1786 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001787
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001788 if (type & HCI_SMP_LTK)
1789 mgmt_new_ltk(hdev, key, 1);
1790
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001791 return 0;
1792}
1793
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001794int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1795{
1796 struct link_key *key;
1797
1798 key = hci_find_link_key(hdev, bdaddr);
1799 if (!key)
1800 return -ENOENT;
1801
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001802 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001803
1804 list_del(&key->list);
1805 kfree(key);
1806
1807 return 0;
1808}
1809
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001810int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1811{
1812 struct smp_ltk *k, *tmp;
1813
1814 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1815 if (bacmp(bdaddr, &k->bdaddr))
1816 continue;
1817
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001818 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001819
1820 list_del(&k->list);
1821 kfree(k);
1822 }
1823
1824 return 0;
1825}
1826
Ville Tervo6bd32322011-02-16 16:32:41 +02001827/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001828static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001829{
1830 struct hci_dev *hdev = (void *) arg;
1831
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001832 if (hdev->sent_cmd) {
1833 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1834 u16 opcode = __le16_to_cpu(sent->opcode);
1835
1836 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1837 } else {
1838 BT_ERR("%s command tx timeout", hdev->name);
1839 }
1840
Ville Tervo6bd32322011-02-16 16:32:41 +02001841 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001842 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001843}
1844
Szymon Janc2763eda2011-03-22 13:12:22 +01001845struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001846 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001847{
1848 struct oob_data *data;
1849
1850 list_for_each_entry(data, &hdev->remote_oob_data, list)
1851 if (bacmp(bdaddr, &data->bdaddr) == 0)
1852 return data;
1853
1854 return NULL;
1855}
1856
1857int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1858{
1859 struct oob_data *data;
1860
1861 data = hci_find_remote_oob_data(hdev, bdaddr);
1862 if (!data)
1863 return -ENOENT;
1864
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001865 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001866
1867 list_del(&data->list);
1868 kfree(data);
1869
1870 return 0;
1871}
1872
1873int hci_remote_oob_data_clear(struct hci_dev *hdev)
1874{
1875 struct oob_data *data, *n;
1876
1877 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1878 list_del(&data->list);
1879 kfree(data);
1880 }
1881
1882 return 0;
1883}
1884
1885int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001886 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001887{
1888 struct oob_data *data;
1889
1890 data = hci_find_remote_oob_data(hdev, bdaddr);
1891
1892 if (!data) {
1893 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1894 if (!data)
1895 return -ENOMEM;
1896
1897 bacpy(&data->bdaddr, bdaddr);
1898 list_add(&data->list, &hdev->remote_oob_data);
1899 }
1900
1901 memcpy(data->hash, hash, sizeof(data->hash));
1902 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1903
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001904 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001905
1906 return 0;
1907}
1908
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001909struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001910{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001911 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001912
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001913 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001914 if (bacmp(bdaddr, &b->bdaddr) == 0)
1915 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001916
1917 return NULL;
1918}
1919
1920int hci_blacklist_clear(struct hci_dev *hdev)
1921{
1922 struct list_head *p, *n;
1923
1924 list_for_each_safe(p, n, &hdev->blacklist) {
1925 struct bdaddr_list *b;
1926
1927 b = list_entry(p, struct bdaddr_list, list);
1928
1929 list_del(p);
1930 kfree(b);
1931 }
1932
1933 return 0;
1934}
1935
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001936int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001937{
1938 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001939
1940 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1941 return -EBADF;
1942
Antti Julku5e762442011-08-25 16:48:02 +03001943 if (hci_blacklist_lookup(hdev, bdaddr))
1944 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001945
1946 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001947 if (!entry)
1948 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001949
1950 bacpy(&entry->bdaddr, bdaddr);
1951
1952 list_add(&entry->list, &hdev->blacklist);
1953
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001954 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001955}
1956
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001957int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001958{
1959 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001960
Szymon Janc1ec918c2011-11-16 09:32:21 +01001961 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001962 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001963
1964 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001965 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001966 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001967
1968 list_del(&entry->list);
1969 kfree(entry);
1970
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001971 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001972}
1973
Johan Hedberg42c6b122013-03-05 20:37:49 +02001974static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001975{
1976 struct le_scan_params *param = (struct le_scan_params *) opt;
1977 struct hci_cp_le_set_scan_param cp;
1978
1979 memset(&cp, 0, sizeof(cp));
1980 cp.type = param->type;
1981 cp.interval = cpu_to_le16(param->interval);
1982 cp.window = cpu_to_le16(param->window);
1983
Johan Hedberg42c6b122013-03-05 20:37:49 +02001984 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001985}
1986
Johan Hedberg42c6b122013-03-05 20:37:49 +02001987static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001988{
1989 struct hci_cp_le_set_scan_enable cp;
1990
1991 memset(&cp, 0, sizeof(cp));
Andre Guedes76a388b2013-04-04 20:21:02 -03001992 cp.enable = LE_SCAN_ENABLE;
Andre Guedes525e2962013-04-04 20:21:01 -03001993 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001994
Johan Hedberg42c6b122013-03-05 20:37:49 +02001995 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001996}
1997
1998static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001999 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002000{
2001 long timeo = msecs_to_jiffies(3000);
2002 struct le_scan_params param;
2003 int err;
2004
2005 BT_DBG("%s", hdev->name);
2006
2007 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2008 return -EINPROGRESS;
2009
2010 param.type = type;
2011 param.interval = interval;
2012 param.window = window;
2013
2014 hci_req_lock(hdev);
2015
Johan Hedberg01178cd2013-03-05 20:37:41 +02002016 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2017 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002018 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002019 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002020
2021 hci_req_unlock(hdev);
2022
2023 if (err < 0)
2024 return err;
2025
Johan Hedberg46818ed2013-01-14 22:33:52 +02002026 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
Andre Guedesb6c75152013-04-04 20:20:59 -03002027 timeout);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002028
2029 return 0;
2030}
2031
Andre Guedes7dbfac12012-03-15 16:52:07 -03002032int hci_cancel_le_scan(struct hci_dev *hdev)
2033{
2034 BT_DBG("%s", hdev->name);
2035
2036 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2037 return -EALREADY;
2038
2039 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2040 struct hci_cp_le_set_scan_enable cp;
2041
2042 /* Send HCI command to disable LE Scan */
2043 memset(&cp, 0, sizeof(cp));
2044 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2045 }
2046
2047 return 0;
2048}
2049
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002050static void le_scan_disable_work(struct work_struct *work)
2051{
2052 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002053 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002054 struct hci_cp_le_set_scan_enable cp;
2055
2056 BT_DBG("%s", hdev->name);
2057
2058 memset(&cp, 0, sizeof(cp));
2059
2060 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2061}
2062
Andre Guedes28b75a82012-02-03 17:48:00 -03002063static void le_scan_work(struct work_struct *work)
2064{
2065 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2066 struct le_scan_params *param = &hdev->le_scan_params;
2067
2068 BT_DBG("%s", hdev->name);
2069
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002070 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2071 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002072}
2073
2074int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002075 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002076{
2077 struct le_scan_params *param = &hdev->le_scan_params;
2078
2079 BT_DBG("%s", hdev->name);
2080
Johan Hedbergf15504782012-10-24 21:12:03 +03002081 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2082 return -ENOTSUPP;
2083
Andre Guedes28b75a82012-02-03 17:48:00 -03002084 if (work_busy(&hdev->le_scan))
2085 return -EINPROGRESS;
2086
2087 param->type = type;
2088 param->interval = interval;
2089 param->window = window;
2090 param->timeout = timeout;
2091
2092 queue_work(system_long_wq, &hdev->le_scan);
2093
2094 return 0;
2095}
2096
David Herrmann9be0dab2012-04-22 14:39:57 +02002097/* Alloc HCI device */
2098struct hci_dev *hci_alloc_dev(void)
2099{
2100 struct hci_dev *hdev;
2101
2102 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2103 if (!hdev)
2104 return NULL;
2105
David Herrmannb1b813d2012-04-22 14:39:58 +02002106 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2107 hdev->esco_type = (ESCO_HV1);
2108 hdev->link_mode = (HCI_LM_ACCEPT);
2109 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002110 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2111 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002112
David Herrmannb1b813d2012-04-22 14:39:58 +02002113 hdev->sniff_max_interval = 800;
2114 hdev->sniff_min_interval = 80;
2115
2116 mutex_init(&hdev->lock);
2117 mutex_init(&hdev->req_lock);
2118
2119 INIT_LIST_HEAD(&hdev->mgmt_pending);
2120 INIT_LIST_HEAD(&hdev->blacklist);
2121 INIT_LIST_HEAD(&hdev->uuids);
2122 INIT_LIST_HEAD(&hdev->link_keys);
2123 INIT_LIST_HEAD(&hdev->long_term_keys);
2124 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002125 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002126
2127 INIT_WORK(&hdev->rx_work, hci_rx_work);
2128 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2129 INIT_WORK(&hdev->tx_work, hci_tx_work);
2130 INIT_WORK(&hdev->power_on, hci_power_on);
2131 INIT_WORK(&hdev->le_scan, le_scan_work);
2132
David Herrmannb1b813d2012-04-22 14:39:58 +02002133 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2134 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2135 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2136
David Herrmannb1b813d2012-04-22 14:39:58 +02002137 skb_queue_head_init(&hdev->rx_q);
2138 skb_queue_head_init(&hdev->cmd_q);
2139 skb_queue_head_init(&hdev->raw_q);
2140
2141 init_waitqueue_head(&hdev->req_wait_q);
2142
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002143 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002144
David Herrmannb1b813d2012-04-22 14:39:58 +02002145 hci_init_sysfs(hdev);
2146 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002147
2148 return hdev;
2149}
2150EXPORT_SYMBOL(hci_alloc_dev);
2151
2152/* Free HCI device */
2153void hci_free_dev(struct hci_dev *hdev)
2154{
David Herrmann9be0dab2012-04-22 14:39:57 +02002155 /* will free via device release */
2156 put_device(&hdev->dev);
2157}
2158EXPORT_SYMBOL(hci_free_dev);
2159
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160/* Register HCI device */
2161int hci_register_dev(struct hci_dev *hdev)
2162{
David Herrmannb1b813d2012-04-22 14:39:58 +02002163 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
David Herrmann010666a2012-01-07 15:47:07 +01002165 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 return -EINVAL;
2167
Mat Martineau08add512011-11-02 16:18:36 -07002168 /* Do not allow HCI_AMP devices to register at index 0,
2169 * so the index can be used as the AMP controller ID.
2170 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002171 switch (hdev->dev_type) {
2172 case HCI_BREDR:
2173 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2174 break;
2175 case HCI_AMP:
2176 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2177 break;
2178 default:
2179 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002181
Sasha Levin3df92b32012-05-27 22:36:56 +02002182 if (id < 0)
2183 return id;
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 sprintf(hdev->name, "hci%d", id);
2186 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002187
2188 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2189
Sasha Levin3df92b32012-05-27 22:36:56 +02002190 write_lock(&hci_dev_list_lock);
2191 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002192 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002194 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002195 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002196 if (!hdev->workqueue) {
2197 error = -ENOMEM;
2198 goto err;
2199 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002200
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002201 hdev->req_workqueue = alloc_workqueue(hdev->name,
2202 WQ_HIGHPRI | WQ_UNBOUND |
2203 WQ_MEM_RECLAIM, 1);
2204 if (!hdev->req_workqueue) {
2205 destroy_workqueue(hdev->workqueue);
2206 error = -ENOMEM;
2207 goto err;
2208 }
2209
David Herrmann33ca9542011-10-08 14:58:49 +02002210 error = hci_add_sysfs(hdev);
2211 if (error < 0)
2212 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002214 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002215 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2216 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002217 if (hdev->rfkill) {
2218 if (rfkill_register(hdev->rfkill) < 0) {
2219 rfkill_destroy(hdev->rfkill);
2220 hdev->rfkill = NULL;
2221 }
2222 }
2223
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002224 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002225
2226 if (hdev->dev_type != HCI_AMP)
2227 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002230 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
Johan Hedberg19202572013-01-14 22:33:51 +02002232 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002233
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002235
David Herrmann33ca9542011-10-08 14:58:49 +02002236err_wqueue:
2237 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002238 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002239err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002240 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002241 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002242 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002243 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002244
David Herrmann33ca9542011-10-08 14:58:49 +02002245 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247EXPORT_SYMBOL(hci_register_dev);
2248
2249/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002250void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
Sasha Levin3df92b32012-05-27 22:36:56 +02002252 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002253
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002254 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
Johan Hovold94324962012-03-15 14:48:41 +01002256 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2257
Sasha Levin3df92b32012-05-27 22:36:56 +02002258 id = hdev->id;
2259
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002260 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002262 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263
2264 hci_dev_do_close(hdev);
2265
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302266 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002267 kfree_skb(hdev->reassembly[i]);
2268
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002269 cancel_work_sync(&hdev->power_on);
2270
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002271 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002272 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002273 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002274 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002275 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002276 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002277
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002278 /* mgmt_index_removed should take care of emptying the
2279 * pending list */
2280 BUG_ON(!list_empty(&hdev->mgmt_pending));
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 hci_notify(hdev, HCI_DEV_UNREG);
2283
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002284 if (hdev->rfkill) {
2285 rfkill_unregister(hdev->rfkill);
2286 rfkill_destroy(hdev->rfkill);
2287 }
2288
David Herrmannce242972011-10-08 14:58:48 +02002289 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002290
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002291 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002292 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002293
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002294 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002295 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002296 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002297 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002298 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002299 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002300 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002301
David Herrmanndc946bd2012-01-07 15:47:24 +01002302 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002303
2304 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305}
2306EXPORT_SYMBOL(hci_unregister_dev);
2307
2308/* Suspend HCI device */
2309int hci_suspend_dev(struct hci_dev *hdev)
2310{
2311 hci_notify(hdev, HCI_DEV_SUSPEND);
2312 return 0;
2313}
2314EXPORT_SYMBOL(hci_suspend_dev);
2315
2316/* Resume HCI device */
2317int hci_resume_dev(struct hci_dev *hdev)
2318{
2319 hci_notify(hdev, HCI_DEV_RESUME);
2320 return 0;
2321}
2322EXPORT_SYMBOL(hci_resume_dev);
2323
Marcel Holtmann76bca882009-11-18 00:40:39 +01002324/* Receive frame from HCI drivers */
2325int hci_recv_frame(struct sk_buff *skb)
2326{
2327 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2328 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002329 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002330 kfree_skb(skb);
2331 return -ENXIO;
2332 }
2333
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002334 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002335 bt_cb(skb)->incoming = 1;
2336
2337 /* Time stamp */
2338 __net_timestamp(skb);
2339
Marcel Holtmann76bca882009-11-18 00:40:39 +01002340 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002341 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002342
Marcel Holtmann76bca882009-11-18 00:40:39 +01002343 return 0;
2344}
2345EXPORT_SYMBOL(hci_recv_frame);
2346
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302347static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002348 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302349{
2350 int len = 0;
2351 int hlen = 0;
2352 int remain = count;
2353 struct sk_buff *skb;
2354 struct bt_skb_cb *scb;
2355
2356 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002357 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302358 return -EILSEQ;
2359
2360 skb = hdev->reassembly[index];
2361
2362 if (!skb) {
2363 switch (type) {
2364 case HCI_ACLDATA_PKT:
2365 len = HCI_MAX_FRAME_SIZE;
2366 hlen = HCI_ACL_HDR_SIZE;
2367 break;
2368 case HCI_EVENT_PKT:
2369 len = HCI_MAX_EVENT_SIZE;
2370 hlen = HCI_EVENT_HDR_SIZE;
2371 break;
2372 case HCI_SCODATA_PKT:
2373 len = HCI_MAX_SCO_SIZE;
2374 hlen = HCI_SCO_HDR_SIZE;
2375 break;
2376 }
2377
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002378 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302379 if (!skb)
2380 return -ENOMEM;
2381
2382 scb = (void *) skb->cb;
2383 scb->expect = hlen;
2384 scb->pkt_type = type;
2385
2386 skb->dev = (void *) hdev;
2387 hdev->reassembly[index] = skb;
2388 }
2389
2390 while (count) {
2391 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002392 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302393
2394 memcpy(skb_put(skb, len), data, len);
2395
2396 count -= len;
2397 data += len;
2398 scb->expect -= len;
2399 remain = count;
2400
2401 switch (type) {
2402 case HCI_EVENT_PKT:
2403 if (skb->len == HCI_EVENT_HDR_SIZE) {
2404 struct hci_event_hdr *h = hci_event_hdr(skb);
2405 scb->expect = h->plen;
2406
2407 if (skb_tailroom(skb) < scb->expect) {
2408 kfree_skb(skb);
2409 hdev->reassembly[index] = NULL;
2410 return -ENOMEM;
2411 }
2412 }
2413 break;
2414
2415 case HCI_ACLDATA_PKT:
2416 if (skb->len == HCI_ACL_HDR_SIZE) {
2417 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2418 scb->expect = __le16_to_cpu(h->dlen);
2419
2420 if (skb_tailroom(skb) < scb->expect) {
2421 kfree_skb(skb);
2422 hdev->reassembly[index] = NULL;
2423 return -ENOMEM;
2424 }
2425 }
2426 break;
2427
2428 case HCI_SCODATA_PKT:
2429 if (skb->len == HCI_SCO_HDR_SIZE) {
2430 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2431 scb->expect = h->dlen;
2432
2433 if (skb_tailroom(skb) < scb->expect) {
2434 kfree_skb(skb);
2435 hdev->reassembly[index] = NULL;
2436 return -ENOMEM;
2437 }
2438 }
2439 break;
2440 }
2441
2442 if (scb->expect == 0) {
2443 /* Complete frame */
2444
2445 bt_cb(skb)->pkt_type = type;
2446 hci_recv_frame(skb);
2447
2448 hdev->reassembly[index] = NULL;
2449 return remain;
2450 }
2451 }
2452
2453 return remain;
2454}
2455
Marcel Holtmannef222012007-07-11 06:42:04 +02002456int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2457{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302458 int rem = 0;
2459
Marcel Holtmannef222012007-07-11 06:42:04 +02002460 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2461 return -EILSEQ;
2462
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002463 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002464 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302465 if (rem < 0)
2466 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002467
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302468 data += (count - rem);
2469 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002470 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002471
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302472 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002473}
2474EXPORT_SYMBOL(hci_recv_fragment);
2475
Suraj Sumangala99811512010-07-14 13:02:19 +05302476#define STREAM_REASSEMBLY 0
2477
2478int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2479{
2480 int type;
2481 int rem = 0;
2482
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002483 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302484 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2485
2486 if (!skb) {
2487 struct { char type; } *pkt;
2488
2489 /* Start of the frame */
2490 pkt = data;
2491 type = pkt->type;
2492
2493 data++;
2494 count--;
2495 } else
2496 type = bt_cb(skb)->pkt_type;
2497
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002498 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002499 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302500 if (rem < 0)
2501 return rem;
2502
2503 data += (count - rem);
2504 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002505 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302506
2507 return rem;
2508}
2509EXPORT_SYMBOL(hci_recv_stream_fragment);
2510
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511/* ---- Interface to upper protocols ---- */
2512
Linus Torvalds1da177e2005-04-16 15:20:36 -07002513int hci_register_cb(struct hci_cb *cb)
2514{
2515 BT_DBG("%p name %s", cb, cb->name);
2516
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002517 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002518 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002519 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002520
2521 return 0;
2522}
2523EXPORT_SYMBOL(hci_register_cb);
2524
2525int hci_unregister_cb(struct hci_cb *cb)
2526{
2527 BT_DBG("%p name %s", cb, cb->name);
2528
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002529 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002531 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
2533 return 0;
2534}
2535EXPORT_SYMBOL(hci_unregister_cb);
2536
2537static int hci_send_frame(struct sk_buff *skb)
2538{
2539 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2540
2541 if (!hdev) {
2542 kfree_skb(skb);
2543 return -ENODEV;
2544 }
2545
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002546 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002547
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002548 /* Time stamp */
2549 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002551 /* Send copy to monitor */
2552 hci_send_to_monitor(hdev, skb);
2553
2554 if (atomic_read(&hdev->promisc)) {
2555 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002556 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 }
2558
2559 /* Get rid of skb owner, prior to sending to the driver. */
2560 skb_orphan(skb);
2561
2562 return hdev->send(skb);
2563}
2564
Johan Hedberg3119ae92013-03-05 20:37:44 +02002565void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2566{
2567 skb_queue_head_init(&req->cmd_q);
2568 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002569 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002570}
2571
2572int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2573{
2574 struct hci_dev *hdev = req->hdev;
2575 struct sk_buff *skb;
2576 unsigned long flags;
2577
2578 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2579
Andre Guedes5d73e032013-03-08 11:20:16 -03002580 /* If an error occured during request building, remove all HCI
2581 * commands queued on the HCI request queue.
2582 */
2583 if (req->err) {
2584 skb_queue_purge(&req->cmd_q);
2585 return req->err;
2586 }
2587
Johan Hedberg3119ae92013-03-05 20:37:44 +02002588 /* Do not allow empty requests */
2589 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002590 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002591
2592 skb = skb_peek_tail(&req->cmd_q);
2593 bt_cb(skb)->req.complete = complete;
2594
2595 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2596 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2597 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2598
2599 queue_work(hdev->workqueue, &hdev->cmd_work);
2600
2601 return 0;
2602}
2603
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002604static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002605 u32 plen, const void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
2607 int len = HCI_COMMAND_HDR_SIZE + plen;
2608 struct hci_command_hdr *hdr;
2609 struct sk_buff *skb;
2610
Linus Torvalds1da177e2005-04-16 15:20:36 -07002611 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002612 if (!skb)
2613 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002614
2615 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002616 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 hdr->plen = plen;
2618
2619 if (plen)
2620 memcpy(skb_put(skb, plen), param, plen);
2621
2622 BT_DBG("skb len %d", skb->len);
2623
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002624 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002625 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002626
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002627 return skb;
2628}
2629
2630/* Send HCI command */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002631int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen,
2632 const void *param)
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002633{
2634 struct sk_buff *skb;
2635
2636 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2637
2638 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2639 if (!skb) {
2640 BT_ERR("%s no memory for command", hdev->name);
2641 return -ENOMEM;
2642 }
2643
Johan Hedberg11714b32013-03-05 20:37:47 +02002644 /* Stand-alone HCI commands must be flaged as
2645 * single-command requests.
2646 */
2647 bt_cb(skb)->req.start = true;
2648
Linus Torvalds1da177e2005-04-16 15:20:36 -07002649 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002650 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002651
2652 return 0;
2653}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Johan Hedberg71c76a12013-03-05 20:37:46 +02002655/* Queue a command to an asynchronous HCI request */
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002656void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
2657 const void *param, u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002658{
2659 struct hci_dev *hdev = req->hdev;
2660 struct sk_buff *skb;
2661
2662 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2663
Andre Guedes34739c12013-03-08 11:20:18 -03002664 /* If an error occured during request building, there is no point in
2665 * queueing the HCI command. We can simply return.
2666 */
2667 if (req->err)
2668 return;
2669
Johan Hedberg71c76a12013-03-05 20:37:46 +02002670 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2671 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002672 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2673 hdev->name, opcode);
2674 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002675 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002676 }
2677
2678 if (skb_queue_empty(&req->cmd_q))
2679 bt_cb(skb)->req.start = true;
2680
Johan Hedberg02350a72013-04-03 21:50:29 +03002681 bt_cb(skb)->req.event = event;
2682
Johan Hedberg71c76a12013-03-05 20:37:46 +02002683 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002684}
2685
Johan Hedberg07dc93d2013-04-19 10:14:51 +03002686void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
2687 const void *param)
Johan Hedberg02350a72013-04-03 21:50:29 +03002688{
2689 hci_req_add_ev(req, opcode, plen, param, 0);
2690}
2691
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002693void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694{
2695 struct hci_command_hdr *hdr;
2696
2697 if (!hdev->sent_cmd)
2698 return NULL;
2699
2700 hdr = (void *) hdev->sent_cmd->data;
2701
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002702 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 return NULL;
2704
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002705 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2708}
2709
2710/* Send ACL data */
2711static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2712{
2713 struct hci_acl_hdr *hdr;
2714 int len = skb->len;
2715
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002716 skb_push(skb, HCI_ACL_HDR_SIZE);
2717 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002718 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002719 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2720 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721}
2722
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002723static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002724 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002726 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 struct hci_dev *hdev = conn->hdev;
2728 struct sk_buff *list;
2729
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002730 skb->len = skb_headlen(skb);
2731 skb->data_len = 0;
2732
2733 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002734
2735 switch (hdev->dev_type) {
2736 case HCI_BREDR:
2737 hci_add_acl_hdr(skb, conn->handle, flags);
2738 break;
2739 case HCI_AMP:
2740 hci_add_acl_hdr(skb, chan->handle, flags);
2741 break;
2742 default:
2743 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2744 return;
2745 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002746
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002747 list = skb_shinfo(skb)->frag_list;
2748 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 /* Non fragmented */
2750 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2751
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002752 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 } else {
2754 /* Fragmented */
2755 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2756
2757 skb_shinfo(skb)->frag_list = NULL;
2758
2759 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002760 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002762 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002763
2764 flags &= ~ACL_START;
2765 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 do {
2767 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002768
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002770 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002771 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772
2773 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2774
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002775 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 } while (list);
2777
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002778 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002780}
2781
2782void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2783{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002784 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002785
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002786 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002787
2788 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002789
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002790 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002792 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794
2795/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002796void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797{
2798 struct hci_dev *hdev = conn->hdev;
2799 struct hci_sco_hdr hdr;
2800
2801 BT_DBG("%s len %d", hdev->name, skb->len);
2802
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002803 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hdr.dlen = skb->len;
2805
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002806 skb_push(skb, HCI_SCO_HDR_SIZE);
2807 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002808 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809
2810 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002811 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002814 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816
2817/* ---- HCI TX task (outgoing data) ---- */
2818
2819/* HCI Connection scheduler */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002820static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2821 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822{
2823 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002824 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002825 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002827 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002829
2830 rcu_read_lock();
2831
2832 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002833 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002835
2836 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2837 continue;
2838
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 num++;
2840
2841 if (c->sent < min) {
2842 min = c->sent;
2843 conn = c;
2844 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002845
2846 if (hci_conn_num(hdev, type) == num)
2847 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 }
2849
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002850 rcu_read_unlock();
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002853 int cnt, q;
2854
2855 switch (conn->type) {
2856 case ACL_LINK:
2857 cnt = hdev->acl_cnt;
2858 break;
2859 case SCO_LINK:
2860 case ESCO_LINK:
2861 cnt = hdev->sco_cnt;
2862 break;
2863 case LE_LINK:
2864 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2865 break;
2866 default:
2867 cnt = 0;
2868 BT_ERR("Unknown link type");
2869 }
2870
2871 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 *quote = q ? q : 1;
2873 } else
2874 *quote = 0;
2875
2876 BT_DBG("conn %p quote %d", conn, *quote);
2877 return conn;
2878}
2879
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002880static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881{
2882 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002883 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884
Ville Tervobae1f5d92011-02-10 22:38:53 -03002885 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002887 rcu_read_lock();
2888
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002890 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002891 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002892 BT_ERR("%s killing stalled connection %pMR",
2893 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002894 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 }
2896 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002897
2898 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899}
2900
Gustavo Padovan6039aa72012-05-23 04:04:18 -03002901static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2902 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002903{
2904 struct hci_conn_hash *h = &hdev->conn_hash;
2905 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002906 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002907 struct hci_conn *conn;
2908 int cnt, q, conn_num = 0;
2909
2910 BT_DBG("%s", hdev->name);
2911
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002912 rcu_read_lock();
2913
2914 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002915 struct hci_chan *tmp;
2916
2917 if (conn->type != type)
2918 continue;
2919
2920 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2921 continue;
2922
2923 conn_num++;
2924
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002925 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002926 struct sk_buff *skb;
2927
2928 if (skb_queue_empty(&tmp->data_q))
2929 continue;
2930
2931 skb = skb_peek(&tmp->data_q);
2932 if (skb->priority < cur_prio)
2933 continue;
2934
2935 if (skb->priority > cur_prio) {
2936 num = 0;
2937 min = ~0;
2938 cur_prio = skb->priority;
2939 }
2940
2941 num++;
2942
2943 if (conn->sent < min) {
2944 min = conn->sent;
2945 chan = tmp;
2946 }
2947 }
2948
2949 if (hci_conn_num(hdev, type) == conn_num)
2950 break;
2951 }
2952
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002953 rcu_read_unlock();
2954
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002955 if (!chan)
2956 return NULL;
2957
2958 switch (chan->conn->type) {
2959 case ACL_LINK:
2960 cnt = hdev->acl_cnt;
2961 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002962 case AMP_LINK:
2963 cnt = hdev->block_cnt;
2964 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002965 case SCO_LINK:
2966 case ESCO_LINK:
2967 cnt = hdev->sco_cnt;
2968 break;
2969 case LE_LINK:
2970 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2971 break;
2972 default:
2973 cnt = 0;
2974 BT_ERR("Unknown link type");
2975 }
2976
2977 q = cnt / num;
2978 *quote = q ? q : 1;
2979 BT_DBG("chan %p quote %d", chan, *quote);
2980 return chan;
2981}
2982
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002983static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2984{
2985 struct hci_conn_hash *h = &hdev->conn_hash;
2986 struct hci_conn *conn;
2987 int num = 0;
2988
2989 BT_DBG("%s", hdev->name);
2990
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002991 rcu_read_lock();
2992
2993 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002994 struct hci_chan *chan;
2995
2996 if (conn->type != type)
2997 continue;
2998
2999 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3000 continue;
3001
3002 num++;
3003
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003004 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003005 struct sk_buff *skb;
3006
3007 if (chan->sent) {
3008 chan->sent = 0;
3009 continue;
3010 }
3011
3012 if (skb_queue_empty(&chan->data_q))
3013 continue;
3014
3015 skb = skb_peek(&chan->data_q);
3016 if (skb->priority >= HCI_PRIO_MAX - 1)
3017 continue;
3018
3019 skb->priority = HCI_PRIO_MAX - 1;
3020
3021 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003022 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003023 }
3024
3025 if (hci_conn_num(hdev, type) == num)
3026 break;
3027 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003028
3029 rcu_read_unlock();
3030
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003031}
3032
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003033static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3034{
3035 /* Calculate count of blocks used by this packet */
3036 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3037}
3038
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003039static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003041 if (!test_bit(HCI_RAW, &hdev->flags)) {
3042 /* ACL tx timeout must be longer than maximum
3043 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003044 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003045 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003046 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003048}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003050static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003051{
3052 unsigned int cnt = hdev->acl_cnt;
3053 struct hci_chan *chan;
3054 struct sk_buff *skb;
3055 int quote;
3056
3057 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003058
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003059 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003060 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003061 u32 priority = (skb_peek(&chan->data_q))->priority;
3062 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003063 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003064 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003065
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003066 /* Stop if priority has changed */
3067 if (skb->priority < priority)
3068 break;
3069
3070 skb = skb_dequeue(&chan->data_q);
3071
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003072 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003073 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003074
Linus Torvalds1da177e2005-04-16 15:20:36 -07003075 hci_send_frame(skb);
3076 hdev->acl_last_tx = jiffies;
3077
3078 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003079 chan->sent++;
3080 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003081 }
3082 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003083
3084 if (cnt != hdev->acl_cnt)
3085 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003086}
3087
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003088static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003089{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003090 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003091 struct hci_chan *chan;
3092 struct sk_buff *skb;
3093 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003094 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003095
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003096 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003097
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003098 BT_DBG("%s", hdev->name);
3099
3100 if (hdev->dev_type == HCI_AMP)
3101 type = AMP_LINK;
3102 else
3103 type = ACL_LINK;
3104
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003105 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003106 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003107 u32 priority = (skb_peek(&chan->data_q))->priority;
3108 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3109 int blocks;
3110
3111 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003112 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003113
3114 /* Stop if priority has changed */
3115 if (skb->priority < priority)
3116 break;
3117
3118 skb = skb_dequeue(&chan->data_q);
3119
3120 blocks = __get_blocks(hdev, skb);
3121 if (blocks > hdev->block_cnt)
3122 return;
3123
3124 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003125 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003126
3127 hci_send_frame(skb);
3128 hdev->acl_last_tx = jiffies;
3129
3130 hdev->block_cnt -= blocks;
3131 quote -= blocks;
3132
3133 chan->sent += blocks;
3134 chan->conn->sent += blocks;
3135 }
3136 }
3137
3138 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003139 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003140}
3141
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003142static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003143{
3144 BT_DBG("%s", hdev->name);
3145
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003146 /* No ACL link over BR/EDR controller */
3147 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3148 return;
3149
3150 /* No AMP link over AMP controller */
3151 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003152 return;
3153
3154 switch (hdev->flow_ctl_mode) {
3155 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3156 hci_sched_acl_pkt(hdev);
3157 break;
3158
3159 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3160 hci_sched_acl_blk(hdev);
3161 break;
3162 }
3163}
3164
Linus Torvalds1da177e2005-04-16 15:20:36 -07003165/* Schedule SCO */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003166static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167{
3168 struct hci_conn *conn;
3169 struct sk_buff *skb;
3170 int quote;
3171
3172 BT_DBG("%s", hdev->name);
3173
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003174 if (!hci_conn_num(hdev, SCO_LINK))
3175 return;
3176
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3178 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3179 BT_DBG("skb %p len %d", skb, skb->len);
3180 hci_send_frame(skb);
3181
3182 conn->sent++;
3183 if (conn->sent == ~0)
3184 conn->sent = 0;
3185 }
3186 }
3187}
3188
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003189static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003190{
3191 struct hci_conn *conn;
3192 struct sk_buff *skb;
3193 int quote;
3194
3195 BT_DBG("%s", hdev->name);
3196
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003197 if (!hci_conn_num(hdev, ESCO_LINK))
3198 return;
3199
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003200 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3201 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003202 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3203 BT_DBG("skb %p len %d", skb, skb->len);
3204 hci_send_frame(skb);
3205
3206 conn->sent++;
3207 if (conn->sent == ~0)
3208 conn->sent = 0;
3209 }
3210 }
3211}
3212
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003213static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003214{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003215 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003216 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003217 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003218
3219 BT_DBG("%s", hdev->name);
3220
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003221 if (!hci_conn_num(hdev, LE_LINK))
3222 return;
3223
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003224 if (!test_bit(HCI_RAW, &hdev->flags)) {
3225 /* LE tx timeout must be longer than maximum
3226 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003227 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003228 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003229 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003230 }
3231
3232 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003233 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003234 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003235 u32 priority = (skb_peek(&chan->data_q))->priority;
3236 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003237 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003238 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003239
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003240 /* Stop if priority has changed */
3241 if (skb->priority < priority)
3242 break;
3243
3244 skb = skb_dequeue(&chan->data_q);
3245
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003246 hci_send_frame(skb);
3247 hdev->le_last_tx = jiffies;
3248
3249 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003250 chan->sent++;
3251 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003252 }
3253 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003254
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003255 if (hdev->le_pkts)
3256 hdev->le_cnt = cnt;
3257 else
3258 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003259
3260 if (cnt != tmp)
3261 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003262}
3263
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003264static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003266 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267 struct sk_buff *skb;
3268
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003269 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003270 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271
3272 /* Schedule queues and send stuff to HCI driver */
3273
3274 hci_sched_acl(hdev);
3275
3276 hci_sched_sco(hdev);
3277
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003278 hci_sched_esco(hdev);
3279
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003280 hci_sched_le(hdev);
3281
Linus Torvalds1da177e2005-04-16 15:20:36 -07003282 /* Send next queued raw (unknown type) packet */
3283 while ((skb = skb_dequeue(&hdev->raw_q)))
3284 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285}
3286
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003287/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
3289/* ACL data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003290static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291{
3292 struct hci_acl_hdr *hdr = (void *) skb->data;
3293 struct hci_conn *conn;
3294 __u16 handle, flags;
3295
3296 skb_pull(skb, HCI_ACL_HDR_SIZE);
3297
3298 handle = __le16_to_cpu(hdr->handle);
3299 flags = hci_flags(handle);
3300 handle = hci_handle(handle);
3301
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003302 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003303 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003304
3305 hdev->stat.acl_rx++;
3306
3307 hci_dev_lock(hdev);
3308 conn = hci_conn_hash_lookup_handle(hdev, handle);
3309 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003310
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003312 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003313
Linus Torvalds1da177e2005-04-16 15:20:36 -07003314 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003315 l2cap_recv_acldata(conn, skb, flags);
3316 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003318 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003319 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003320 }
3321
3322 kfree_skb(skb);
3323}
3324
3325/* SCO data packet */
Gustavo Padovan6039aa72012-05-23 04:04:18 -03003326static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003327{
3328 struct hci_sco_hdr *hdr = (void *) skb->data;
3329 struct hci_conn *conn;
3330 __u16 handle;
3331
3332 skb_pull(skb, HCI_SCO_HDR_SIZE);
3333
3334 handle = __le16_to_cpu(hdr->handle);
3335
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003336 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337
3338 hdev->stat.sco_rx++;
3339
3340 hci_dev_lock(hdev);
3341 conn = hci_conn_hash_lookup_handle(hdev, handle);
3342 hci_dev_unlock(hdev);
3343
3344 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003345 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003346 sco_recv_scodata(conn, skb);
3347 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003348 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003349 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003350 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351 }
3352
3353 kfree_skb(skb);
3354}
3355
Johan Hedberg9238f362013-03-05 20:37:48 +02003356static bool hci_req_is_complete(struct hci_dev *hdev)
3357{
3358 struct sk_buff *skb;
3359
3360 skb = skb_peek(&hdev->cmd_q);
3361 if (!skb)
3362 return true;
3363
3364 return bt_cb(skb)->req.start;
3365}
3366
Johan Hedberg42c6b122013-03-05 20:37:49 +02003367static void hci_resend_last(struct hci_dev *hdev)
3368{
3369 struct hci_command_hdr *sent;
3370 struct sk_buff *skb;
3371 u16 opcode;
3372
3373 if (!hdev->sent_cmd)
3374 return;
3375
3376 sent = (void *) hdev->sent_cmd->data;
3377 opcode = __le16_to_cpu(sent->opcode);
3378 if (opcode == HCI_OP_RESET)
3379 return;
3380
3381 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3382 if (!skb)
3383 return;
3384
3385 skb_queue_head(&hdev->cmd_q, skb);
3386 queue_work(hdev->workqueue, &hdev->cmd_work);
3387}
3388
Johan Hedberg9238f362013-03-05 20:37:48 +02003389void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3390{
3391 hci_req_complete_t req_complete = NULL;
3392 struct sk_buff *skb;
3393 unsigned long flags;
3394
3395 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3396
Johan Hedberg42c6b122013-03-05 20:37:49 +02003397 /* If the completed command doesn't match the last one that was
3398 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003399 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003400 if (!hci_sent_cmd_data(hdev, opcode)) {
3401 /* Some CSR based controllers generate a spontaneous
3402 * reset complete event during init and any pending
3403 * command will never be completed. In such a case we
3404 * need to resend whatever was the last sent
3405 * command.
3406 */
3407 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3408 hci_resend_last(hdev);
3409
Johan Hedberg9238f362013-03-05 20:37:48 +02003410 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003411 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003412
3413 /* If the command succeeded and there's still more commands in
3414 * this request the request is not yet complete.
3415 */
3416 if (!status && !hci_req_is_complete(hdev))
3417 return;
3418
3419 /* If this was the last command in a request the complete
3420 * callback would be found in hdev->sent_cmd instead of the
3421 * command queue (hdev->cmd_q).
3422 */
3423 if (hdev->sent_cmd) {
3424 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3425 if (req_complete)
3426 goto call_complete;
3427 }
3428
3429 /* Remove all pending commands belonging to this request */
3430 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3431 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3432 if (bt_cb(skb)->req.start) {
3433 __skb_queue_head(&hdev->cmd_q, skb);
3434 break;
3435 }
3436
3437 req_complete = bt_cb(skb)->req.complete;
3438 kfree_skb(skb);
3439 }
3440 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3441
3442call_complete:
3443 if (req_complete)
3444 req_complete(hdev, status);
3445}
3446
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003447static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003448{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003449 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003450 struct sk_buff *skb;
3451
3452 BT_DBG("%s", hdev->name);
3453
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003455 /* Send copy to monitor */
3456 hci_send_to_monitor(hdev, skb);
3457
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 if (atomic_read(&hdev->promisc)) {
3459 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003460 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003461 }
3462
3463 if (test_bit(HCI_RAW, &hdev->flags)) {
3464 kfree_skb(skb);
3465 continue;
3466 }
3467
3468 if (test_bit(HCI_INIT, &hdev->flags)) {
3469 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003470 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003471 case HCI_ACLDATA_PKT:
3472 case HCI_SCODATA_PKT:
3473 kfree_skb(skb);
3474 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003476 }
3477
3478 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003479 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003481 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003482 hci_event_packet(hdev, skb);
3483 break;
3484
3485 case HCI_ACLDATA_PKT:
3486 BT_DBG("%s ACL data packet", hdev->name);
3487 hci_acldata_packet(hdev, skb);
3488 break;
3489
3490 case HCI_SCODATA_PKT:
3491 BT_DBG("%s SCO data packet", hdev->name);
3492 hci_scodata_packet(hdev, skb);
3493 break;
3494
3495 default:
3496 kfree_skb(skb);
3497 break;
3498 }
3499 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003500}
3501
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003502static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003503{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003504 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003505 struct sk_buff *skb;
3506
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003507 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3508 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509
Linus Torvalds1da177e2005-04-16 15:20:36 -07003510 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003511 if (atomic_read(&hdev->cmd_cnt)) {
3512 skb = skb_dequeue(&hdev->cmd_q);
3513 if (!skb)
3514 return;
3515
Wei Yongjun7585b972009-02-25 18:29:52 +08003516 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003517
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003518 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3519 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003520 atomic_dec(&hdev->cmd_cnt);
3521 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003522 if (test_bit(HCI_RESET, &hdev->flags))
3523 del_timer(&hdev->cmd_timer);
3524 else
3525 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003526 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003527 } else {
3528 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003529 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003530 }
3531 }
3532}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003533
3534int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3535{
3536 /* General inquiry access code (GIAC) */
3537 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3538 struct hci_cp_inquiry cp;
3539
3540 BT_DBG("%s", hdev->name);
3541
3542 if (test_bit(HCI_INQUIRY, &hdev->flags))
3543 return -EINPROGRESS;
3544
Johan Hedberg46632622012-01-02 16:06:08 +02003545 inquiry_cache_flush(hdev);
3546
Andre Guedes2519a1f2011-11-07 11:45:24 -03003547 memset(&cp, 0, sizeof(cp));
3548 memcpy(&cp.lap, lap, sizeof(cp.lap));
3549 cp.length = length;
3550
3551 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3552}
Andre Guedes023d50492011-11-04 14:16:52 -03003553
3554int hci_cancel_inquiry(struct hci_dev *hdev)
3555{
3556 BT_DBG("%s", hdev->name);
3557
3558 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003559 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003560
3561 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3562}
Andre Guedes31f79562012-04-24 21:02:53 -03003563
3564u8 bdaddr_to_le(u8 bdaddr_type)
3565{
3566 switch (bdaddr_type) {
3567 case BDADDR_LE_PUBLIC:
3568 return ADDR_LE_DEV_PUBLIC;
3569
3570 default:
3571 /* Fallback to LE Random address type */
3572 return ADDR_LE_DEV_RANDOM;
3573 }
3574}