blob: 9567e32a1f0c43b9e494370a57ac58bb750ae208 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
Johan Hedberg7b1abbb2013-04-03 21:54:47 +030082struct sk_buff *hci_get_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 event)
Johan Hedberg75e84b72013-04-02 13:35:04 +030083{
84 struct hci_ev_cmd_complete *ev;
85 struct hci_event_hdr *hdr;
86 struct sk_buff *skb;
87
88 hci_dev_lock(hdev);
89
90 skb = hdev->recv_evt;
91 hdev->recv_evt = NULL;
92
93 hci_dev_unlock(hdev);
94
95 if (!skb)
96 return ERR_PTR(-ENODATA);
97
98 if (skb->len < sizeof(*hdr)) {
99 BT_ERR("Too short HCI event");
100 goto failed;
101 }
102
103 hdr = (void *) skb->data;
104 skb_pull(skb, HCI_EVENT_HDR_SIZE);
105
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300106 if (event) {
107 if (hdr->evt != event)
108 goto failed;
109 return skb;
110 }
111
Johan Hedberg75e84b72013-04-02 13:35:04 +0300112 if (hdr->evt != HCI_EV_CMD_COMPLETE) {
113 BT_DBG("Last event is not cmd complete (0x%2.2x)", hdr->evt);
114 goto failed;
115 }
116
117 if (skb->len < sizeof(*ev)) {
118 BT_ERR("Too short cmd_complete event");
119 goto failed;
120 }
121
122 ev = (void *) skb->data;
123 skb_pull(skb, sizeof(*ev));
124
125 if (opcode == __le16_to_cpu(ev->opcode))
126 return skb;
127
128 BT_DBG("opcode doesn't match (0x%2.2x != 0x%2.2x)", opcode,
129 __le16_to_cpu(ev->opcode));
130
131failed:
132 kfree_skb(skb);
133 return ERR_PTR(-ENODATA);
134}
135
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300136struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
137 void *param, u8 event, u32 timeout)
Johan Hedberg75e84b72013-04-02 13:35:04 +0300138{
139 DECLARE_WAITQUEUE(wait, current);
140 struct hci_request req;
141 int err = 0;
142
143 BT_DBG("%s", hdev->name);
144
145 hci_req_init(&req, hdev);
146
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300147 hci_req_add_ev(&req, opcode, plen, param, event);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300148
149 hdev->req_status = HCI_REQ_PEND;
150
151 err = hci_req_run(&req, hci_req_sync_complete);
152 if (err < 0)
153 return ERR_PTR(err);
154
155 add_wait_queue(&hdev->req_wait_q, &wait);
156 set_current_state(TASK_INTERRUPTIBLE);
157
158 schedule_timeout(timeout);
159
160 remove_wait_queue(&hdev->req_wait_q, &wait);
161
162 if (signal_pending(current))
163 return ERR_PTR(-EINTR);
164
165 switch (hdev->req_status) {
166 case HCI_REQ_DONE:
167 err = -bt_to_errno(hdev->req_result);
168 break;
169
170 case HCI_REQ_CANCELED:
171 err = -hdev->req_result;
172 break;
173
174 default:
175 err = -ETIMEDOUT;
176 break;
177 }
178
179 hdev->req_status = hdev->req_result = 0;
180
181 BT_DBG("%s end: err %d", hdev->name, err);
182
183 if (err < 0)
184 return ERR_PTR(err);
185
Johan Hedberg7b1abbb2013-04-03 21:54:47 +0300186 return hci_get_cmd_complete(hdev, opcode, event);
187}
188EXPORT_SYMBOL(__hci_cmd_sync_ev);
189
190struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
191 void *param, u32 timeout)
192{
193 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
Johan Hedberg75e84b72013-04-02 13:35:04 +0300194}
195EXPORT_SYMBOL(__hci_cmd_sync);
196
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200198static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200199 void (*func)(struct hci_request *req,
200 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200201 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 DECLARE_WAITQUEUE(wait, current);
205 int err = 0;
206
207 BT_DBG("%s start", hdev->name);
208
Johan Hedberg42c6b122013-03-05 20:37:49 +0200209 hci_req_init(&req, hdev);
210
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 hdev->req_status = HCI_REQ_PEND;
212
Johan Hedberg42c6b122013-03-05 20:37:49 +0200213 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200214
Johan Hedberg42c6b122013-03-05 20:37:49 +0200215 err = hci_req_run(&req, hci_req_sync_complete);
216 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200217 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200223 */
Andre Guedes920c8302013-03-08 11:20:15 -0300224 if (err == -ENODATA)
225 return 0;
226
227 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200228 }
229
Andre Guedesbc4445c2013-03-08 11:20:13 -0300230 add_wait_queue(&hdev->req_wait_q, &wait);
231 set_current_state(TASK_INTERRUPTIBLE);
232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 schedule_timeout(timeout);
234
235 remove_wait_queue(&hdev->req_wait_q, &wait);
236
237 if (signal_pending(current))
238 return -EINTR;
239
240 switch (hdev->req_status) {
241 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700242 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 break;
244
245 case HCI_REQ_CANCELED:
246 err = -hdev->req_result;
247 break;
248
249 default:
250 err = -ETIMEDOUT;
251 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Johan Hedberga5040ef2011-01-10 13:28:59 +0200254 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256 BT_DBG("%s end: err %d", hdev->name, err);
257
258 return err;
259}
260
Johan Hedberg01178cd2013-03-05 20:37:41 +0200261static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 void (*req)(struct hci_request *req,
263 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200264 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265{
266 int ret;
267
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200268 if (!test_bit(HCI_UP, &hdev->flags))
269 return -ENETDOWN;
270
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271 /* Serialize all requests */
272 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200273 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274 hci_req_unlock(hdev);
275
276 return ret;
277}
278
Johan Hedberg42c6b122013-03-05 20:37:49 +0200279static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200281 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200284 set_bit(HCI_RESET, &req->hdev->flags);
285 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286}
287
Johan Hedberg42c6b122013-03-05 20:37:49 +0200288static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200290 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200291
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200293 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200295 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200296 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200297
298 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200299 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
Johan Hedberg42c6b122013-03-05 20:37:49 +0200302static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200303{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200304 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200305
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200306 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200307 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300308
309 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200310 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300311
312 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200313 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200314}
315
Johan Hedberg42c6b122013-03-05 20:37:49 +0200316static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200317{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200318 struct hci_dev *hdev = req->hdev;
319 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200320 struct sk_buff *skb;
321
322 BT_DBG("%s %ld", hdev->name, opt);
323
324 /* Driver initialization */
325
Johan Hedberg42c6b122013-03-05 20:37:49 +0200326 hci_req_init(&init_req, hdev);
327
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200328 /* Special commands */
329 while ((skb = skb_dequeue(&hdev->driver_init))) {
330 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
331 skb->dev = (void *) hdev;
332
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 if (skb_queue_empty(&init_req.cmd_q))
334 bt_cb(skb)->req.start = true;
335
336 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200337 }
338 skb_queue_purge(&hdev->driver_init);
339
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 hci_req_run(&init_req, NULL);
341
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300342 /* Reset */
343 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200344 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300345
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200346 switch (hdev->dev_type) {
347 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200348 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200349 break;
350
351 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200352 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200353 break;
354
355 default:
356 BT_ERR("Unknown device type %d", hdev->dev_type);
357 break;
358 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200359}
360
Johan Hedberg42c6b122013-03-05 20:37:49 +0200361static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200362{
363 struct hci_cp_delete_stored_link_key cp;
364 __le16 param;
365 __u8 flt_type;
366
367 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200368 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200369
370 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200371 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200372
373 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200374 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200375
376 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200377 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200378
379 /* Clear Event Filters */
380 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200381 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200382
383 /* Connection accept timeout ~20 secs */
384 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200385 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200386
387 bacpy(&cp.bdaddr, BDADDR_ANY);
388 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200389 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500390
391 /* Read page scan parameters */
392 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
393 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
394 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
395 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200396}
397
Johan Hedberg42c6b122013-03-05 20:37:49 +0200398static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200399{
400 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402
403 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200404 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405
406 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408
409 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200410 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411
412 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200413 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200414}
415
416static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
417{
418 if (lmp_ext_inq_capable(hdev))
419 return 0x02;
420
421 if (lmp_inq_rssi_capable(hdev))
422 return 0x01;
423
424 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
425 hdev->lmp_subver == 0x0757)
426 return 0x01;
427
428 if (hdev->manufacturer == 15) {
429 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
430 return 0x01;
431 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
432 return 0x01;
433 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
434 return 0x01;
435 }
436
437 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
438 hdev->lmp_subver == 0x1805)
439 return 0x01;
440
441 return 0x00;
442}
443
Johan Hedberg42c6b122013-03-05 20:37:49 +0200444static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200445{
446 u8 mode;
447
Johan Hedberg42c6b122013-03-05 20:37:49 +0200448 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449
Johan Hedberg42c6b122013-03-05 20:37:49 +0200450 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200451}
452
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200454{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200455 struct hci_dev *hdev = req->hdev;
456
Johan Hedberg2177bab2013-03-05 20:37:43 +0200457 /* The second byte is 0xff instead of 0x9f (two reserved bits
458 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
459 * command otherwise.
460 */
461 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
462
463 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
464 * any event mask for pre 1.2 devices.
465 */
466 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
467 return;
468
469 if (lmp_bredr_capable(hdev)) {
470 events[4] |= 0x01; /* Flow Specification Complete */
471 events[4] |= 0x02; /* Inquiry Result with RSSI */
472 events[4] |= 0x04; /* Read Remote Extended Features Complete */
473 events[5] |= 0x08; /* Synchronous Connection Complete */
474 events[5] |= 0x10; /* Synchronous Connection Changed */
475 }
476
477 if (lmp_inq_rssi_capable(hdev))
478 events[4] |= 0x02; /* Inquiry Result with RSSI */
479
480 if (lmp_sniffsubr_capable(hdev))
481 events[5] |= 0x20; /* Sniff Subrating */
482
483 if (lmp_pause_enc_capable(hdev))
484 events[5] |= 0x80; /* Encryption Key Refresh Complete */
485
486 if (lmp_ext_inq_capable(hdev))
487 events[5] |= 0x40; /* Extended Inquiry Result */
488
489 if (lmp_no_flush_capable(hdev))
490 events[7] |= 0x01; /* Enhanced Flush Complete */
491
492 if (lmp_lsto_capable(hdev))
493 events[6] |= 0x80; /* Link Supervision Timeout Changed */
494
495 if (lmp_ssp_capable(hdev)) {
496 events[6] |= 0x01; /* IO Capability Request */
497 events[6] |= 0x02; /* IO Capability Response */
498 events[6] |= 0x04; /* User Confirmation Request */
499 events[6] |= 0x08; /* User Passkey Request */
500 events[6] |= 0x10; /* Remote OOB Data Request */
501 events[6] |= 0x20; /* Simple Pairing Complete */
502 events[7] |= 0x04; /* User Passkey Notification */
503 events[7] |= 0x08; /* Keypress Notification */
504 events[7] |= 0x10; /* Remote Host Supported
505 * Features Notification
506 */
507 }
508
509 if (lmp_le_capable(hdev))
510 events[7] |= 0x20; /* LE Meta-Event */
511
Johan Hedberg42c6b122013-03-05 20:37:49 +0200512 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200513
514 if (lmp_le_capable(hdev)) {
515 memset(events, 0, sizeof(events));
516 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200517 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
518 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200519 }
520}
521
Johan Hedberg42c6b122013-03-05 20:37:49 +0200522static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200523{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200524 struct hci_dev *hdev = req->hdev;
525
Johan Hedberg2177bab2013-03-05 20:37:43 +0200526 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200527 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200528
529 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200530 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200531
Johan Hedberg42c6b122013-03-05 20:37:49 +0200532 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200533
534 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200536
537 if (lmp_ssp_capable(hdev)) {
538 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
539 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200540 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
541 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200542 } else {
543 struct hci_cp_write_eir cp;
544
545 memset(hdev->eir, 0, sizeof(hdev->eir));
546 memset(&cp, 0, sizeof(cp));
547
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200549 }
550 }
551
552 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200554
555 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200557
558 if (lmp_ext_feat_capable(hdev)) {
559 struct hci_cp_read_local_ext_features cp;
560
561 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200562 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
563 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200564 }
565
566 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
567 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200568 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
569 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200570 }
571}
572
Johan Hedberg42c6b122013-03-05 20:37:49 +0200573static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200574{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200575 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200576 struct hci_cp_write_def_link_policy cp;
577 u16 link_policy = 0;
578
579 if (lmp_rswitch_capable(hdev))
580 link_policy |= HCI_LP_RSWITCH;
581 if (lmp_hold_capable(hdev))
582 link_policy |= HCI_LP_HOLD;
583 if (lmp_sniff_capable(hdev))
584 link_policy |= HCI_LP_SNIFF;
585 if (lmp_park_capable(hdev))
586 link_policy |= HCI_LP_PARK;
587
588 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200589 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200590}
591
Johan Hedberg42c6b122013-03-05 20:37:49 +0200592static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200593{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200594 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200595 struct hci_cp_write_le_host_supported cp;
596
597 memset(&cp, 0, sizeof(cp));
598
599 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
600 cp.le = 0x01;
601 cp.simul = lmp_le_br_capable(hdev);
602 }
603
604 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200605 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
606 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200607}
608
Johan Hedberg42c6b122013-03-05 20:37:49 +0200609static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200610{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200611 struct hci_dev *hdev = req->hdev;
612
Johan Hedberg2177bab2013-03-05 20:37:43 +0200613 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200614 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200615
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500616 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200617 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500618 hci_update_ad(req);
619 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200620}
621
622static int __hci_init(struct hci_dev *hdev)
623{
624 int err;
625
626 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
627 if (err < 0)
628 return err;
629
630 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
631 * BR/EDR/LE type controllers. AMP controllers only need the
632 * first stage init.
633 */
634 if (hdev->dev_type != HCI_BREDR)
635 return 0;
636
637 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
638 if (err < 0)
639 return err;
640
641 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
642}
643
Johan Hedberg42c6b122013-03-05 20:37:49 +0200644static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645{
646 __u8 scan = opt;
647
Johan Hedberg42c6b122013-03-05 20:37:49 +0200648 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649
650 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200651 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652}
653
Johan Hedberg42c6b122013-03-05 20:37:49 +0200654static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655{
656 __u8 auth = opt;
657
Johan Hedberg42c6b122013-03-05 20:37:49 +0200658 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700659
660 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200661 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662}
663
Johan Hedberg42c6b122013-03-05 20:37:49 +0200664static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665{
666 __u8 encrypt = opt;
667
Johan Hedberg42c6b122013-03-05 20:37:49 +0200668 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200670 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200671 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672}
673
Johan Hedberg42c6b122013-03-05 20:37:49 +0200674static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200675{
676 __le16 policy = cpu_to_le16(opt);
677
Johan Hedberg42c6b122013-03-05 20:37:49 +0200678 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200679
680 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200681 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200682}
683
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900684/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685 * Device is held on return. */
686struct hci_dev *hci_dev_get(int index)
687{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200688 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
690 BT_DBG("%d", index);
691
692 if (index < 0)
693 return NULL;
694
695 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200696 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697 if (d->id == index) {
698 hdev = hci_dev_hold(d);
699 break;
700 }
701 }
702 read_unlock(&hci_dev_list_lock);
703 return hdev;
704}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
706/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200707
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200708bool hci_discovery_active(struct hci_dev *hdev)
709{
710 struct discovery_state *discov = &hdev->discovery;
711
Andre Guedes6fbe1952012-02-03 17:47:58 -0300712 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300713 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300714 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200715 return true;
716
Andre Guedes6fbe1952012-02-03 17:47:58 -0300717 default:
718 return false;
719 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200720}
721
Johan Hedbergff9ef572012-01-04 14:23:45 +0200722void hci_discovery_set_state(struct hci_dev *hdev, int state)
723{
724 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
725
726 if (hdev->discovery.state == state)
727 return;
728
729 switch (state) {
730 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300731 if (hdev->discovery.state != DISCOVERY_STARTING)
732 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200733 break;
734 case DISCOVERY_STARTING:
735 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300736 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200737 mgmt_discovering(hdev, 1);
738 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200739 case DISCOVERY_RESOLVING:
740 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200741 case DISCOVERY_STOPPING:
742 break;
743 }
744
745 hdev->discovery.state = state;
746}
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748static void inquiry_cache_flush(struct hci_dev *hdev)
749{
Johan Hedberg30883512012-01-04 14:16:21 +0200750 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200751 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Johan Hedberg561aafb2012-01-04 13:31:59 +0200753 list_for_each_entry_safe(p, n, &cache->all, all) {
754 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200755 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200757
758 INIT_LIST_HEAD(&cache->unknown);
759 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760}
761
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300762struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
763 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764{
Johan Hedberg30883512012-01-04 14:16:21 +0200765 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 struct inquiry_entry *e;
767
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300768 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Johan Hedberg561aafb2012-01-04 13:31:59 +0200770 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200772 return e;
773 }
774
775 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776}
777
Johan Hedberg561aafb2012-01-04 13:31:59 +0200778struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300779 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200780{
Johan Hedberg30883512012-01-04 14:16:21 +0200781 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 struct inquiry_entry *e;
783
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300784 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200785
786 list_for_each_entry(e, &cache->unknown, list) {
787 if (!bacmp(&e->data.bdaddr, bdaddr))
788 return e;
789 }
790
791 return NULL;
792}
793
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200794struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300795 bdaddr_t *bdaddr,
796 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200797{
798 struct discovery_state *cache = &hdev->discovery;
799 struct inquiry_entry *e;
800
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300801 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200802
803 list_for_each_entry(e, &cache->resolve, list) {
804 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
805 return e;
806 if (!bacmp(&e->data.bdaddr, bdaddr))
807 return e;
808 }
809
810 return NULL;
811}
812
Johan Hedberga3d4e202012-01-09 00:53:02 +0200813void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300814 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200815{
816 struct discovery_state *cache = &hdev->discovery;
817 struct list_head *pos = &cache->resolve;
818 struct inquiry_entry *p;
819
820 list_del(&ie->list);
821
822 list_for_each_entry(p, &cache->resolve, list) {
823 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300824 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200825 break;
826 pos = &p->list;
827 }
828
829 list_add(&ie->list, pos);
830}
831
Johan Hedberg31754052012-01-04 13:39:52 +0200832bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300833 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834{
Johan Hedberg30883512012-01-04 14:16:21 +0200835 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200836 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300838 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839
Szymon Janc2b2fec42012-11-20 11:38:54 +0100840 hci_remove_remote_oob_data(hdev, &data->bdaddr);
841
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200842 if (ssp)
843 *ssp = data->ssp_mode;
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200846 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200847 if (ie->data.ssp_mode && ssp)
848 *ssp = true;
849
Johan Hedberga3d4e202012-01-09 00:53:02 +0200850 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300851 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200852 ie->data.rssi = data->rssi;
853 hci_inquiry_cache_update_resolve(hdev, ie);
854 }
855
Johan Hedberg561aafb2012-01-04 13:31:59 +0200856 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200857 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858
Johan Hedberg561aafb2012-01-04 13:31:59 +0200859 /* Entry not in the cache. Add new one. */
860 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
861 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200862 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200863
864 list_add(&ie->all, &cache->all);
865
866 if (name_known) {
867 ie->name_state = NAME_KNOWN;
868 } else {
869 ie->name_state = NAME_NOT_KNOWN;
870 list_add(&ie->list, &cache->unknown);
871 }
872
873update:
874 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300875 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200876 ie->name_state = NAME_KNOWN;
877 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 }
879
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200880 memcpy(&ie->data, data, sizeof(*data));
881 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200883
884 if (ie->name_state == NAME_NOT_KNOWN)
885 return false;
886
887 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888}
889
890static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
891{
Johan Hedberg30883512012-01-04 14:16:21 +0200892 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 struct inquiry_info *info = (struct inquiry_info *) buf;
894 struct inquiry_entry *e;
895 int copied = 0;
896
Johan Hedberg561aafb2012-01-04 13:31:59 +0200897 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700898 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200899
900 if (copied >= num)
901 break;
902
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 bacpy(&info->bdaddr, &data->bdaddr);
904 info->pscan_rep_mode = data->pscan_rep_mode;
905 info->pscan_period_mode = data->pscan_period_mode;
906 info->pscan_mode = data->pscan_mode;
907 memcpy(info->dev_class, data->dev_class, 3);
908 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200909
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200911 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700912 }
913
914 BT_DBG("cache %p, copied %d", cache, copied);
915 return copied;
916}
917
Johan Hedberg42c6b122013-03-05 20:37:49 +0200918static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919{
920 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200921 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 struct hci_cp_inquiry cp;
923
924 BT_DBG("%s", hdev->name);
925
926 if (test_bit(HCI_INQUIRY, &hdev->flags))
927 return;
928
929 /* Start Inquiry */
930 memcpy(&cp.lap, &ir->lap, 3);
931 cp.length = ir->length;
932 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200933 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934}
935
Andre Guedes3e13fa12013-03-27 20:04:56 -0300936static int wait_inquiry(void *word)
937{
938 schedule();
939 return signal_pending(current);
940}
941
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942int hci_inquiry(void __user *arg)
943{
944 __u8 __user *ptr = arg;
945 struct hci_inquiry_req ir;
946 struct hci_dev *hdev;
947 int err = 0, do_inquiry = 0, max_rsp;
948 long timeo;
949 __u8 *buf;
950
951 if (copy_from_user(&ir, ptr, sizeof(ir)))
952 return -EFAULT;
953
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200954 hdev = hci_dev_get(ir.dev_id);
955 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 return -ENODEV;
957
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300958 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900959 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300960 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 inquiry_cache_flush(hdev);
962 do_inquiry = 1;
963 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300964 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965
Marcel Holtmann04837f62006-07-03 10:02:33 +0200966 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200967
968 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200969 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
970 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200971 if (err < 0)
972 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300973
974 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
975 * cleared). If it is interrupted by a signal, return -EINTR.
976 */
977 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
978 TASK_INTERRUPTIBLE))
979 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300982 /* for unlimited number of responses we will use buffer with
983 * 255 entries
984 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
986
987 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
988 * copy it to the user space.
989 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100990 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200991 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 err = -ENOMEM;
993 goto done;
994 }
995
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300996 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300998 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999
1000 BT_DBG("num_rsp %d", ir.num_rsp);
1001
1002 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
1003 ptr += sizeof(ir);
1004 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001005 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001007 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 err = -EFAULT;
1009
1010 kfree(buf);
1011
1012done:
1013 hci_dev_put(hdev);
1014 return err;
1015}
1016
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001017static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
1018{
1019 u8 ad_len = 0, flags = 0;
1020 size_t name_len;
1021
1022 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1023 flags |= LE_AD_GENERAL;
1024
1025 if (!lmp_bredr_capable(hdev))
1026 flags |= LE_AD_NO_BREDR;
1027
1028 if (lmp_le_br_capable(hdev))
1029 flags |= LE_AD_SIM_LE_BREDR_CTRL;
1030
1031 if (lmp_host_le_br_capable(hdev))
1032 flags |= LE_AD_SIM_LE_BREDR_HOST;
1033
1034 if (flags) {
1035 BT_DBG("adv flags 0x%02x", flags);
1036
1037 ptr[0] = 2;
1038 ptr[1] = EIR_FLAGS;
1039 ptr[2] = flags;
1040
1041 ad_len += 3;
1042 ptr += 3;
1043 }
1044
1045 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
1046 ptr[0] = 2;
1047 ptr[1] = EIR_TX_POWER;
1048 ptr[2] = (u8) hdev->adv_tx_power;
1049
1050 ad_len += 3;
1051 ptr += 3;
1052 }
1053
1054 name_len = strlen(hdev->dev_name);
1055 if (name_len > 0) {
1056 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
1057
1058 if (name_len > max_len) {
1059 name_len = max_len;
1060 ptr[1] = EIR_NAME_SHORT;
1061 } else
1062 ptr[1] = EIR_NAME_COMPLETE;
1063
1064 ptr[0] = name_len + 1;
1065
1066 memcpy(ptr + 2, hdev->dev_name, name_len);
1067
1068 ad_len += (name_len + 2);
1069 ptr += (name_len + 2);
1070 }
1071
1072 return ad_len;
1073}
1074
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001075void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001076{
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001077 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001078 struct hci_cp_le_set_adv_data cp;
1079 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001080
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001081 if (!lmp_le_capable(hdev))
1082 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001083
1084 memset(&cp, 0, sizeof(cp));
1085
1086 len = create_ad(hdev, cp.data);
1087
1088 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001089 memcmp(cp.data, hdev->adv_data, len) == 0)
1090 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001091
1092 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1093 hdev->adv_data_len = len;
1094
1095 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001096
Johan Hedberg04b4edc2013-03-15 17:07:01 -05001097 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001098}
1099
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100/* ---- HCI ioctl helpers ---- */
1101
1102int hci_dev_open(__u16 dev)
1103{
1104 struct hci_dev *hdev;
1105 int ret = 0;
1106
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02001107 hdev = hci_dev_get(dev);
1108 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 return -ENODEV;
1110
1111 BT_DBG("%s %p", hdev->name, hdev);
1112
1113 hci_req_lock(hdev);
1114
Johan Hovold94324962012-03-15 14:48:41 +01001115 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1116 ret = -ENODEV;
1117 goto done;
1118 }
1119
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001120 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1121 ret = -ERFKILL;
1122 goto done;
1123 }
1124
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 if (test_bit(HCI_UP, &hdev->flags)) {
1126 ret = -EALREADY;
1127 goto done;
1128 }
1129
1130 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1131 set_bit(HCI_RAW, &hdev->flags);
1132
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001133 /* Treat all non BR/EDR controllers as raw devices if
1134 enable_hs is not set */
1135 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001136 set_bit(HCI_RAW, &hdev->flags);
1137
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 if (hdev->open(hdev)) {
1139 ret = -EIO;
1140 goto done;
1141 }
1142
1143 if (!test_bit(HCI_RAW, &hdev->flags)) {
1144 atomic_set(&hdev->cmd_cnt, 1);
1145 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001146 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147 clear_bit(HCI_INIT, &hdev->flags);
1148 }
1149
1150 if (!ret) {
1151 hci_dev_hold(hdev);
1152 set_bit(HCI_UP, &hdev->flags);
1153 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001154 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1155 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001156 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001157 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001158 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001159 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001160 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001162 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001163 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001164 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165
1166 skb_queue_purge(&hdev->cmd_q);
1167 skb_queue_purge(&hdev->rx_q);
1168
1169 if (hdev->flush)
1170 hdev->flush(hdev);
1171
1172 if (hdev->sent_cmd) {
1173 kfree_skb(hdev->sent_cmd);
1174 hdev->sent_cmd = NULL;
1175 }
1176
1177 hdev->close(hdev);
1178 hdev->flags = 0;
1179 }
1180
1181done:
1182 hci_req_unlock(hdev);
1183 hci_dev_put(hdev);
1184 return ret;
1185}
1186
1187static int hci_dev_do_close(struct hci_dev *hdev)
1188{
1189 BT_DBG("%s %p", hdev->name, hdev);
1190
Andre Guedes28b75a82012-02-03 17:48:00 -03001191 cancel_work_sync(&hdev->le_scan);
1192
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001193 cancel_delayed_work(&hdev->power_off);
1194
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 hci_req_cancel(hdev, ENODEV);
1196 hci_req_lock(hdev);
1197
1198 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001199 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 hci_req_unlock(hdev);
1201 return 0;
1202 }
1203
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001204 /* Flush RX and TX works */
1205 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001206 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001208 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001209 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001210 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001211 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001212 }
1213
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001214 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001215 cancel_delayed_work(&hdev->service_cache);
1216
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001217 cancel_delayed_work_sync(&hdev->le_scan_disable);
1218
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001219 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 inquiry_cache_flush(hdev);
1221 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001222 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223
1224 hci_notify(hdev, HCI_DEV_DOWN);
1225
1226 if (hdev->flush)
1227 hdev->flush(hdev);
1228
1229 /* Reset device */
1230 skb_queue_purge(&hdev->cmd_q);
1231 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001232 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001233 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001234 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001235 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 clear_bit(HCI_INIT, &hdev->flags);
1237 }
1238
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001239 /* flush cmd work */
1240 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242 /* Drop queues */
1243 skb_queue_purge(&hdev->rx_q);
1244 skb_queue_purge(&hdev->cmd_q);
1245 skb_queue_purge(&hdev->raw_q);
1246
1247 /* Drop last sent command */
1248 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001249 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 kfree_skb(hdev->sent_cmd);
1251 hdev->sent_cmd = NULL;
1252 }
1253
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001254 kfree_skb(hdev->recv_evt);
1255 hdev->recv_evt = NULL;
1256
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257 /* After this point our queues are empty
1258 * and no tasks are scheduled. */
1259 hdev->close(hdev);
1260
Johan Hedberg35b973c2013-03-15 17:06:59 -05001261 /* Clear flags */
1262 hdev->flags = 0;
1263 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1264
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001265 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1266 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001267 hci_dev_lock(hdev);
1268 mgmt_powered(hdev, 0);
1269 hci_dev_unlock(hdev);
1270 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001271
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001272 /* Controller radio is available but is currently powered down */
1273 hdev->amp_status = 0;
1274
Johan Hedberge59fda82012-02-22 18:11:53 +02001275 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001276 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001277
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 hci_req_unlock(hdev);
1279
1280 hci_dev_put(hdev);
1281 return 0;
1282}
1283
1284int hci_dev_close(__u16 dev)
1285{
1286 struct hci_dev *hdev;
1287 int err;
1288
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001289 hdev = hci_dev_get(dev);
1290 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001292
1293 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1294 cancel_delayed_work(&hdev->power_off);
1295
Linus Torvalds1da177e2005-04-16 15:20:36 -07001296 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 hci_dev_put(hdev);
1299 return err;
1300}
1301
1302int hci_dev_reset(__u16 dev)
1303{
1304 struct hci_dev *hdev;
1305 int ret = 0;
1306
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001307 hdev = hci_dev_get(dev);
1308 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309 return -ENODEV;
1310
1311 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
1313 if (!test_bit(HCI_UP, &hdev->flags))
1314 goto done;
1315
1316 /* Drop queues */
1317 skb_queue_purge(&hdev->rx_q);
1318 skb_queue_purge(&hdev->cmd_q);
1319
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001320 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 inquiry_cache_flush(hdev);
1322 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001323 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001324
1325 if (hdev->flush)
1326 hdev->flush(hdev);
1327
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001328 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001329 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001330
1331 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001332 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001333
1334done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 hci_req_unlock(hdev);
1336 hci_dev_put(hdev);
1337 return ret;
1338}
1339
1340int hci_dev_reset_stat(__u16 dev)
1341{
1342 struct hci_dev *hdev;
1343 int ret = 0;
1344
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001345 hdev = hci_dev_get(dev);
1346 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001347 return -ENODEV;
1348
1349 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1350
1351 hci_dev_put(hdev);
1352
1353 return ret;
1354}
1355
1356int hci_dev_cmd(unsigned int cmd, void __user *arg)
1357{
1358 struct hci_dev *hdev;
1359 struct hci_dev_req dr;
1360 int err = 0;
1361
1362 if (copy_from_user(&dr, arg, sizeof(dr)))
1363 return -EFAULT;
1364
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001365 hdev = hci_dev_get(dr.dev_id);
1366 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 return -ENODEV;
1368
1369 switch (cmd) {
1370 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001371 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1372 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 break;
1374
1375 case HCISETENCRYPT:
1376 if (!lmp_encrypt_capable(hdev)) {
1377 err = -EOPNOTSUPP;
1378 break;
1379 }
1380
1381 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1382 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001383 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1384 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 if (err)
1386 break;
1387 }
1388
Johan Hedberg01178cd2013-03-05 20:37:41 +02001389 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1390 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 break;
1392
1393 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001394 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1395 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001396 break;
1397
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001398 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001399 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1400 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001401 break;
1402
1403 case HCISETLINKMODE:
1404 hdev->link_mode = ((__u16) dr.dev_opt) &
1405 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1406 break;
1407
Linus Torvalds1da177e2005-04-16 15:20:36 -07001408 case HCISETPTYPE:
1409 hdev->pkt_type = (__u16) dr.dev_opt;
1410 break;
1411
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001413 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1414 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001415 break;
1416
1417 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001418 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1419 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001420 break;
1421
1422 default:
1423 err = -EINVAL;
1424 break;
1425 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001426
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 hci_dev_put(hdev);
1428 return err;
1429}
1430
1431int hci_get_dev_list(void __user *arg)
1432{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001433 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 struct hci_dev_list_req *dl;
1435 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001436 int n = 0, size, err;
1437 __u16 dev_num;
1438
1439 if (get_user(dev_num, (__u16 __user *) arg))
1440 return -EFAULT;
1441
1442 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1443 return -EINVAL;
1444
1445 size = sizeof(*dl) + dev_num * sizeof(*dr);
1446
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001447 dl = kzalloc(size, GFP_KERNEL);
1448 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449 return -ENOMEM;
1450
1451 dr = dl->dev_req;
1452
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001453 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001454 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001455 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001456 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001457
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001458 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1459 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001460
Linus Torvalds1da177e2005-04-16 15:20:36 -07001461 (dr + n)->dev_id = hdev->id;
1462 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001463
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 if (++n >= dev_num)
1465 break;
1466 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001467 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468
1469 dl->dev_num = n;
1470 size = sizeof(*dl) + n * sizeof(*dr);
1471
1472 err = copy_to_user(arg, dl, size);
1473 kfree(dl);
1474
1475 return err ? -EFAULT : 0;
1476}
1477
1478int hci_get_dev_info(void __user *arg)
1479{
1480 struct hci_dev *hdev;
1481 struct hci_dev_info di;
1482 int err = 0;
1483
1484 if (copy_from_user(&di, arg, sizeof(di)))
1485 return -EFAULT;
1486
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001487 hdev = hci_dev_get(di.dev_id);
1488 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489 return -ENODEV;
1490
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001491 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001492 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001493
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001494 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1495 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001496
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 strcpy(di.name, hdev->name);
1498 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001499 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001500 di.flags = hdev->flags;
1501 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001502 if (lmp_bredr_capable(hdev)) {
1503 di.acl_mtu = hdev->acl_mtu;
1504 di.acl_pkts = hdev->acl_pkts;
1505 di.sco_mtu = hdev->sco_mtu;
1506 di.sco_pkts = hdev->sco_pkts;
1507 } else {
1508 di.acl_mtu = hdev->le_mtu;
1509 di.acl_pkts = hdev->le_pkts;
1510 di.sco_mtu = 0;
1511 di.sco_pkts = 0;
1512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 di.link_policy = hdev->link_policy;
1514 di.link_mode = hdev->link_mode;
1515
1516 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1517 memcpy(&di.features, &hdev->features, sizeof(di.features));
1518
1519 if (copy_to_user(arg, &di, sizeof(di)))
1520 err = -EFAULT;
1521
1522 hci_dev_put(hdev);
1523
1524 return err;
1525}
1526
1527/* ---- Interface to HCI drivers ---- */
1528
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001529static int hci_rfkill_set_block(void *data, bool blocked)
1530{
1531 struct hci_dev *hdev = data;
1532
1533 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1534
1535 if (!blocked)
1536 return 0;
1537
1538 hci_dev_do_close(hdev);
1539
1540 return 0;
1541}
1542
1543static const struct rfkill_ops hci_rfkill_ops = {
1544 .set_block = hci_rfkill_set_block,
1545};
1546
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001547static void hci_power_on(struct work_struct *work)
1548{
1549 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1550
1551 BT_DBG("%s", hdev->name);
1552
1553 if (hci_dev_open(hdev->id) < 0)
1554 return;
1555
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001556 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001557 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1558 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001559
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001560 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001561 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001562}
1563
1564static void hci_power_off(struct work_struct *work)
1565{
Johan Hedberg32435532011-11-07 22:16:04 +02001566 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001567 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001568
1569 BT_DBG("%s", hdev->name);
1570
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001571 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001572}
1573
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001574static void hci_discov_off(struct work_struct *work)
1575{
1576 struct hci_dev *hdev;
1577 u8 scan = SCAN_PAGE;
1578
1579 hdev = container_of(work, struct hci_dev, discov_off.work);
1580
1581 BT_DBG("%s", hdev->name);
1582
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001583 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001584
1585 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1586
1587 hdev->discov_timeout = 0;
1588
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001589 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001590}
1591
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001592int hci_uuids_clear(struct hci_dev *hdev)
1593{
Johan Hedberg48210022013-01-27 00:31:28 +02001594 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001595
Johan Hedberg48210022013-01-27 00:31:28 +02001596 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1597 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001598 kfree(uuid);
1599 }
1600
1601 return 0;
1602}
1603
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001604int hci_link_keys_clear(struct hci_dev *hdev)
1605{
1606 struct list_head *p, *n;
1607
1608 list_for_each_safe(p, n, &hdev->link_keys) {
1609 struct link_key *key;
1610
1611 key = list_entry(p, struct link_key, list);
1612
1613 list_del(p);
1614 kfree(key);
1615 }
1616
1617 return 0;
1618}
1619
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001620int hci_smp_ltks_clear(struct hci_dev *hdev)
1621{
1622 struct smp_ltk *k, *tmp;
1623
1624 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1625 list_del(&k->list);
1626 kfree(k);
1627 }
1628
1629 return 0;
1630}
1631
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001632struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1633{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001634 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001635
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001636 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001637 if (bacmp(bdaddr, &k->bdaddr) == 0)
1638 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001639
1640 return NULL;
1641}
1642
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301643static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001644 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001645{
1646 /* Legacy key */
1647 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301648 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001649
1650 /* Debug keys are insecure so don't store them persistently */
1651 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301652 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001653
1654 /* Changed combination key and there's no previous one */
1655 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301656 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001657
1658 /* Security mode 3 case */
1659 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301660 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001661
1662 /* Neither local nor remote side had no-bonding as requirement */
1663 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301664 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001665
1666 /* Local side had dedicated bonding as requirement */
1667 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301668 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001669
1670 /* Remote side had dedicated bonding as requirement */
1671 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301672 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001673
1674 /* If none of the above criteria match, then don't store the key
1675 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301676 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001677}
1678
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001679struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001680{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001681 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001682
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001683 list_for_each_entry(k, &hdev->long_term_keys, list) {
1684 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001685 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001686 continue;
1687
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001688 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001689 }
1690
1691 return NULL;
1692}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001693
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001694struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001696{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001697 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001698
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001699 list_for_each_entry(k, &hdev->long_term_keys, list)
1700 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001701 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001702 return k;
1703
1704 return NULL;
1705}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001706
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001707int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001708 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001709{
1710 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301711 u8 old_key_type;
1712 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001713
1714 old_key = hci_find_link_key(hdev, bdaddr);
1715 if (old_key) {
1716 old_key_type = old_key->type;
1717 key = old_key;
1718 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001719 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001720 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1721 if (!key)
1722 return -ENOMEM;
1723 list_add(&key->list, &hdev->link_keys);
1724 }
1725
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001726 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001727
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001728 /* Some buggy controller combinations generate a changed
1729 * combination key for legacy pairing even when there's no
1730 * previous key */
1731 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001733 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001734 if (conn)
1735 conn->key_type = type;
1736 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001737
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001738 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001739 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001740 key->pin_len = pin_len;
1741
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001742 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001743 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001744 else
1745 key->type = type;
1746
Johan Hedberg4df378a2011-04-28 11:29:03 -07001747 if (!new_key)
1748 return 0;
1749
1750 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1751
Johan Hedberg744cf192011-11-08 20:40:14 +02001752 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001753
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301754 if (conn)
1755 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001756
1757 return 0;
1758}
1759
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001760int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001761 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001762 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001763{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001764 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001765
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001766 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1767 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001768
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001769 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1770 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001771 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001772 else {
1773 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001774 if (!key)
1775 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001776 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001777 }
1778
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001779 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001780 key->bdaddr_type = addr_type;
1781 memcpy(key->val, tk, sizeof(key->val));
1782 key->authenticated = authenticated;
1783 key->ediv = ediv;
1784 key->enc_size = enc_size;
1785 key->type = type;
1786 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001787
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001788 if (!new_key)
1789 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001790
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001791 if (type & HCI_SMP_LTK)
1792 mgmt_new_ltk(hdev, key, 1);
1793
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001794 return 0;
1795}
1796
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001797int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1798{
1799 struct link_key *key;
1800
1801 key = hci_find_link_key(hdev, bdaddr);
1802 if (!key)
1803 return -ENOENT;
1804
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001805 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001806
1807 list_del(&key->list);
1808 kfree(key);
1809
1810 return 0;
1811}
1812
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001813int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1814{
1815 struct smp_ltk *k, *tmp;
1816
1817 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1818 if (bacmp(bdaddr, &k->bdaddr))
1819 continue;
1820
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001821 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001822
1823 list_del(&k->list);
1824 kfree(k);
1825 }
1826
1827 return 0;
1828}
1829
Ville Tervo6bd32322011-02-16 16:32:41 +02001830/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001831static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001832{
1833 struct hci_dev *hdev = (void *) arg;
1834
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001835 if (hdev->sent_cmd) {
1836 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1837 u16 opcode = __le16_to_cpu(sent->opcode);
1838
1839 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1840 } else {
1841 BT_ERR("%s command tx timeout", hdev->name);
1842 }
1843
Ville Tervo6bd32322011-02-16 16:32:41 +02001844 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001845 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001846}
1847
Szymon Janc2763eda2011-03-22 13:12:22 +01001848struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001849 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001850{
1851 struct oob_data *data;
1852
1853 list_for_each_entry(data, &hdev->remote_oob_data, list)
1854 if (bacmp(bdaddr, &data->bdaddr) == 0)
1855 return data;
1856
1857 return NULL;
1858}
1859
1860int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1861{
1862 struct oob_data *data;
1863
1864 data = hci_find_remote_oob_data(hdev, bdaddr);
1865 if (!data)
1866 return -ENOENT;
1867
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001868 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001869
1870 list_del(&data->list);
1871 kfree(data);
1872
1873 return 0;
1874}
1875
1876int hci_remote_oob_data_clear(struct hci_dev *hdev)
1877{
1878 struct oob_data *data, *n;
1879
1880 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1881 list_del(&data->list);
1882 kfree(data);
1883 }
1884
1885 return 0;
1886}
1887
1888int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001889 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001890{
1891 struct oob_data *data;
1892
1893 data = hci_find_remote_oob_data(hdev, bdaddr);
1894
1895 if (!data) {
1896 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1897 if (!data)
1898 return -ENOMEM;
1899
1900 bacpy(&data->bdaddr, bdaddr);
1901 list_add(&data->list, &hdev->remote_oob_data);
1902 }
1903
1904 memcpy(data->hash, hash, sizeof(data->hash));
1905 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1906
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001907 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001908
1909 return 0;
1910}
1911
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001912struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001913{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001914 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001915
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001916 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001917 if (bacmp(bdaddr, &b->bdaddr) == 0)
1918 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001919
1920 return NULL;
1921}
1922
1923int hci_blacklist_clear(struct hci_dev *hdev)
1924{
1925 struct list_head *p, *n;
1926
1927 list_for_each_safe(p, n, &hdev->blacklist) {
1928 struct bdaddr_list *b;
1929
1930 b = list_entry(p, struct bdaddr_list, list);
1931
1932 list_del(p);
1933 kfree(b);
1934 }
1935
1936 return 0;
1937}
1938
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001939int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001940{
1941 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001942
1943 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1944 return -EBADF;
1945
Antti Julku5e762442011-08-25 16:48:02 +03001946 if (hci_blacklist_lookup(hdev, bdaddr))
1947 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001948
1949 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001950 if (!entry)
1951 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001952
1953 bacpy(&entry->bdaddr, bdaddr);
1954
1955 list_add(&entry->list, &hdev->blacklist);
1956
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001957 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001958}
1959
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001960int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001961{
1962 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001963
Szymon Janc1ec918c2011-11-16 09:32:21 +01001964 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001965 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001966
1967 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001968 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001969 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001970
1971 list_del(&entry->list);
1972 kfree(entry);
1973
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001974 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001975}
1976
Johan Hedberg42c6b122013-03-05 20:37:49 +02001977static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001978{
1979 struct le_scan_params *param = (struct le_scan_params *) opt;
1980 struct hci_cp_le_set_scan_param cp;
1981
1982 memset(&cp, 0, sizeof(cp));
1983 cp.type = param->type;
1984 cp.interval = cpu_to_le16(param->interval);
1985 cp.window = cpu_to_le16(param->window);
1986
Johan Hedberg42c6b122013-03-05 20:37:49 +02001987 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001988}
1989
Johan Hedberg42c6b122013-03-05 20:37:49 +02001990static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001991{
1992 struct hci_cp_le_set_scan_enable cp;
1993
1994 memset(&cp, 0, sizeof(cp));
1995 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001996 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001997
Johan Hedberg42c6b122013-03-05 20:37:49 +02001998 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001999}
2000
2001static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002002 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002003{
2004 long timeo = msecs_to_jiffies(3000);
2005 struct le_scan_params param;
2006 int err;
2007
2008 BT_DBG("%s", hdev->name);
2009
2010 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2011 return -EINPROGRESS;
2012
2013 param.type = type;
2014 param.interval = interval;
2015 param.window = window;
2016
2017 hci_req_lock(hdev);
2018
Johan Hedberg01178cd2013-03-05 20:37:41 +02002019 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
2020 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002021 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02002022 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002023
2024 hci_req_unlock(hdev);
2025
2026 if (err < 0)
2027 return err;
2028
Johan Hedberg46818ed2013-01-14 22:33:52 +02002029 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
2030 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002031
2032 return 0;
2033}
2034
Andre Guedes7dbfac12012-03-15 16:52:07 -03002035int hci_cancel_le_scan(struct hci_dev *hdev)
2036{
2037 BT_DBG("%s", hdev->name);
2038
2039 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
2040 return -EALREADY;
2041
2042 if (cancel_delayed_work(&hdev->le_scan_disable)) {
2043 struct hci_cp_le_set_scan_enable cp;
2044
2045 /* Send HCI command to disable LE Scan */
2046 memset(&cp, 0, sizeof(cp));
2047 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2048 }
2049
2050 return 0;
2051}
2052
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002053static void le_scan_disable_work(struct work_struct *work)
2054{
2055 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002056 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03002057 struct hci_cp_le_set_scan_enable cp;
2058
2059 BT_DBG("%s", hdev->name);
2060
2061 memset(&cp, 0, sizeof(cp));
2062
2063 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2064}
2065
Andre Guedes28b75a82012-02-03 17:48:00 -03002066static void le_scan_work(struct work_struct *work)
2067{
2068 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
2069 struct le_scan_params *param = &hdev->le_scan_params;
2070
2071 BT_DBG("%s", hdev->name);
2072
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002073 hci_do_le_scan(hdev, param->type, param->interval, param->window,
2074 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03002075}
2076
2077int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002078 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03002079{
2080 struct le_scan_params *param = &hdev->le_scan_params;
2081
2082 BT_DBG("%s", hdev->name);
2083
Johan Hedbergf1550472012-10-24 21:12:03 +03002084 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
2085 return -ENOTSUPP;
2086
Andre Guedes28b75a82012-02-03 17:48:00 -03002087 if (work_busy(&hdev->le_scan))
2088 return -EINPROGRESS;
2089
2090 param->type = type;
2091 param->interval = interval;
2092 param->window = window;
2093 param->timeout = timeout;
2094
2095 queue_work(system_long_wq, &hdev->le_scan);
2096
2097 return 0;
2098}
2099
David Herrmann9be0dab2012-04-22 14:39:57 +02002100/* Alloc HCI device */
2101struct hci_dev *hci_alloc_dev(void)
2102{
2103 struct hci_dev *hdev;
2104
2105 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
2106 if (!hdev)
2107 return NULL;
2108
David Herrmannb1b813d2012-04-22 14:39:58 +02002109 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
2110 hdev->esco_type = (ESCO_HV1);
2111 hdev->link_mode = (HCI_LM_ACCEPT);
2112 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01002113 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
2114 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002115
David Herrmannb1b813d2012-04-22 14:39:58 +02002116 hdev->sniff_max_interval = 800;
2117 hdev->sniff_min_interval = 80;
2118
2119 mutex_init(&hdev->lock);
2120 mutex_init(&hdev->req_lock);
2121
2122 INIT_LIST_HEAD(&hdev->mgmt_pending);
2123 INIT_LIST_HEAD(&hdev->blacklist);
2124 INIT_LIST_HEAD(&hdev->uuids);
2125 INIT_LIST_HEAD(&hdev->link_keys);
2126 INIT_LIST_HEAD(&hdev->long_term_keys);
2127 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002128 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002129
2130 INIT_WORK(&hdev->rx_work, hci_rx_work);
2131 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2132 INIT_WORK(&hdev->tx_work, hci_tx_work);
2133 INIT_WORK(&hdev->power_on, hci_power_on);
2134 INIT_WORK(&hdev->le_scan, le_scan_work);
2135
David Herrmannb1b813d2012-04-22 14:39:58 +02002136 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2137 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2138 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2139
David Herrmann9be0dab2012-04-22 14:39:57 +02002140 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002141 skb_queue_head_init(&hdev->rx_q);
2142 skb_queue_head_init(&hdev->cmd_q);
2143 skb_queue_head_init(&hdev->raw_q);
2144
2145 init_waitqueue_head(&hdev->req_wait_q);
2146
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002147 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002148
David Herrmannb1b813d2012-04-22 14:39:58 +02002149 hci_init_sysfs(hdev);
2150 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002151
2152 return hdev;
2153}
2154EXPORT_SYMBOL(hci_alloc_dev);
2155
2156/* Free HCI device */
2157void hci_free_dev(struct hci_dev *hdev)
2158{
2159 skb_queue_purge(&hdev->driver_init);
2160
2161 /* will free via device release */
2162 put_device(&hdev->dev);
2163}
2164EXPORT_SYMBOL(hci_free_dev);
2165
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166/* Register HCI device */
2167int hci_register_dev(struct hci_dev *hdev)
2168{
David Herrmannb1b813d2012-04-22 14:39:58 +02002169 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
David Herrmann010666a2012-01-07 15:47:07 +01002171 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 return -EINVAL;
2173
Mat Martineau08add512011-11-02 16:18:36 -07002174 /* Do not allow HCI_AMP devices to register at index 0,
2175 * so the index can be used as the AMP controller ID.
2176 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002177 switch (hdev->dev_type) {
2178 case HCI_BREDR:
2179 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2180 break;
2181 case HCI_AMP:
2182 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2183 break;
2184 default:
2185 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002187
Sasha Levin3df92b32012-05-27 22:36:56 +02002188 if (id < 0)
2189 return id;
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 sprintf(hdev->name, "hci%d", id);
2192 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002193
2194 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2195
Sasha Levin3df92b32012-05-27 22:36:56 +02002196 write_lock(&hci_dev_list_lock);
2197 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002198 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002200 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002201 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002202 if (!hdev->workqueue) {
2203 error = -ENOMEM;
2204 goto err;
2205 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002206
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002207 hdev->req_workqueue = alloc_workqueue(hdev->name,
2208 WQ_HIGHPRI | WQ_UNBOUND |
2209 WQ_MEM_RECLAIM, 1);
2210 if (!hdev->req_workqueue) {
2211 destroy_workqueue(hdev->workqueue);
2212 error = -ENOMEM;
2213 goto err;
2214 }
2215
David Herrmann33ca9542011-10-08 14:58:49 +02002216 error = hci_add_sysfs(hdev);
2217 if (error < 0)
2218 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002220 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002221 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2222 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002223 if (hdev->rfkill) {
2224 if (rfkill_register(hdev->rfkill) < 0) {
2225 rfkill_destroy(hdev->rfkill);
2226 hdev->rfkill = NULL;
2227 }
2228 }
2229
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002230 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002231
2232 if (hdev->dev_type != HCI_AMP)
2233 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002236 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
Johan Hedberg19202572013-01-14 22:33:51 +02002238 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002241
David Herrmann33ca9542011-10-08 14:58:49 +02002242err_wqueue:
2243 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002244 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002245err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002246 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002247 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002248 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002249 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002250
David Herrmann33ca9542011-10-08 14:58:49 +02002251 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252}
2253EXPORT_SYMBOL(hci_register_dev);
2254
2255/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002256void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257{
Sasha Levin3df92b32012-05-27 22:36:56 +02002258 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002259
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002260 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
Johan Hovold94324962012-03-15 14:48:41 +01002262 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2263
Sasha Levin3df92b32012-05-27 22:36:56 +02002264 id = hdev->id;
2265
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002266 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002268 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
2270 hci_dev_do_close(hdev);
2271
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302272 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002273 kfree_skb(hdev->reassembly[i]);
2274
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002275 cancel_work_sync(&hdev->power_on);
2276
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002277 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002278 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002279 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002280 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002281 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002282 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002283
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002284 /* mgmt_index_removed should take care of emptying the
2285 * pending list */
2286 BUG_ON(!list_empty(&hdev->mgmt_pending));
2287
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 hci_notify(hdev, HCI_DEV_UNREG);
2289
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002290 if (hdev->rfkill) {
2291 rfkill_unregister(hdev->rfkill);
2292 rfkill_destroy(hdev->rfkill);
2293 }
2294
David Herrmannce242972011-10-08 14:58:48 +02002295 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002296
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002297 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002298 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002299
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002300 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002301 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002302 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002303 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002304 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002305 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002306 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002307
David Herrmanndc946bd2012-01-07 15:47:24 +01002308 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002309
2310 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311}
2312EXPORT_SYMBOL(hci_unregister_dev);
2313
2314/* Suspend HCI device */
2315int hci_suspend_dev(struct hci_dev *hdev)
2316{
2317 hci_notify(hdev, HCI_DEV_SUSPEND);
2318 return 0;
2319}
2320EXPORT_SYMBOL(hci_suspend_dev);
2321
2322/* Resume HCI device */
2323int hci_resume_dev(struct hci_dev *hdev)
2324{
2325 hci_notify(hdev, HCI_DEV_RESUME);
2326 return 0;
2327}
2328EXPORT_SYMBOL(hci_resume_dev);
2329
Marcel Holtmann76bca882009-11-18 00:40:39 +01002330/* Receive frame from HCI drivers */
2331int hci_recv_frame(struct sk_buff *skb)
2332{
2333 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2334 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002335 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002336 kfree_skb(skb);
2337 return -ENXIO;
2338 }
2339
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002340 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002341 bt_cb(skb)->incoming = 1;
2342
2343 /* Time stamp */
2344 __net_timestamp(skb);
2345
Marcel Holtmann76bca882009-11-18 00:40:39 +01002346 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002347 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002348
Marcel Holtmann76bca882009-11-18 00:40:39 +01002349 return 0;
2350}
2351EXPORT_SYMBOL(hci_recv_frame);
2352
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302353static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002354 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302355{
2356 int len = 0;
2357 int hlen = 0;
2358 int remain = count;
2359 struct sk_buff *skb;
2360 struct bt_skb_cb *scb;
2361
2362 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002363 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302364 return -EILSEQ;
2365
2366 skb = hdev->reassembly[index];
2367
2368 if (!skb) {
2369 switch (type) {
2370 case HCI_ACLDATA_PKT:
2371 len = HCI_MAX_FRAME_SIZE;
2372 hlen = HCI_ACL_HDR_SIZE;
2373 break;
2374 case HCI_EVENT_PKT:
2375 len = HCI_MAX_EVENT_SIZE;
2376 hlen = HCI_EVENT_HDR_SIZE;
2377 break;
2378 case HCI_SCODATA_PKT:
2379 len = HCI_MAX_SCO_SIZE;
2380 hlen = HCI_SCO_HDR_SIZE;
2381 break;
2382 }
2383
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002384 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302385 if (!skb)
2386 return -ENOMEM;
2387
2388 scb = (void *) skb->cb;
2389 scb->expect = hlen;
2390 scb->pkt_type = type;
2391
2392 skb->dev = (void *) hdev;
2393 hdev->reassembly[index] = skb;
2394 }
2395
2396 while (count) {
2397 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002398 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302399
2400 memcpy(skb_put(skb, len), data, len);
2401
2402 count -= len;
2403 data += len;
2404 scb->expect -= len;
2405 remain = count;
2406
2407 switch (type) {
2408 case HCI_EVENT_PKT:
2409 if (skb->len == HCI_EVENT_HDR_SIZE) {
2410 struct hci_event_hdr *h = hci_event_hdr(skb);
2411 scb->expect = h->plen;
2412
2413 if (skb_tailroom(skb) < scb->expect) {
2414 kfree_skb(skb);
2415 hdev->reassembly[index] = NULL;
2416 return -ENOMEM;
2417 }
2418 }
2419 break;
2420
2421 case HCI_ACLDATA_PKT:
2422 if (skb->len == HCI_ACL_HDR_SIZE) {
2423 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2424 scb->expect = __le16_to_cpu(h->dlen);
2425
2426 if (skb_tailroom(skb) < scb->expect) {
2427 kfree_skb(skb);
2428 hdev->reassembly[index] = NULL;
2429 return -ENOMEM;
2430 }
2431 }
2432 break;
2433
2434 case HCI_SCODATA_PKT:
2435 if (skb->len == HCI_SCO_HDR_SIZE) {
2436 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2437 scb->expect = h->dlen;
2438
2439 if (skb_tailroom(skb) < scb->expect) {
2440 kfree_skb(skb);
2441 hdev->reassembly[index] = NULL;
2442 return -ENOMEM;
2443 }
2444 }
2445 break;
2446 }
2447
2448 if (scb->expect == 0) {
2449 /* Complete frame */
2450
2451 bt_cb(skb)->pkt_type = type;
2452 hci_recv_frame(skb);
2453
2454 hdev->reassembly[index] = NULL;
2455 return remain;
2456 }
2457 }
2458
2459 return remain;
2460}
2461
Marcel Holtmannef222012007-07-11 06:42:04 +02002462int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2463{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302464 int rem = 0;
2465
Marcel Holtmannef222012007-07-11 06:42:04 +02002466 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2467 return -EILSEQ;
2468
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002469 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002470 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302471 if (rem < 0)
2472 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002473
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302474 data += (count - rem);
2475 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002476 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002477
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302478 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002479}
2480EXPORT_SYMBOL(hci_recv_fragment);
2481
Suraj Sumangala99811512010-07-14 13:02:19 +05302482#define STREAM_REASSEMBLY 0
2483
2484int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2485{
2486 int type;
2487 int rem = 0;
2488
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002489 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302490 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2491
2492 if (!skb) {
2493 struct { char type; } *pkt;
2494
2495 /* Start of the frame */
2496 pkt = data;
2497 type = pkt->type;
2498
2499 data++;
2500 count--;
2501 } else
2502 type = bt_cb(skb)->pkt_type;
2503
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002504 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002505 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302506 if (rem < 0)
2507 return rem;
2508
2509 data += (count - rem);
2510 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002511 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302512
2513 return rem;
2514}
2515EXPORT_SYMBOL(hci_recv_stream_fragment);
2516
Linus Torvalds1da177e2005-04-16 15:20:36 -07002517/* ---- Interface to upper protocols ---- */
2518
Linus Torvalds1da177e2005-04-16 15:20:36 -07002519int hci_register_cb(struct hci_cb *cb)
2520{
2521 BT_DBG("%p name %s", cb, cb->name);
2522
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002523 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002525 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002526
2527 return 0;
2528}
2529EXPORT_SYMBOL(hci_register_cb);
2530
2531int hci_unregister_cb(struct hci_cb *cb)
2532{
2533 BT_DBG("%p name %s", cb, cb->name);
2534
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002535 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002537 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
2539 return 0;
2540}
2541EXPORT_SYMBOL(hci_unregister_cb);
2542
2543static int hci_send_frame(struct sk_buff *skb)
2544{
2545 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2546
2547 if (!hdev) {
2548 kfree_skb(skb);
2549 return -ENODEV;
2550 }
2551
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002552 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002553
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002554 /* Time stamp */
2555 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002557 /* Send copy to monitor */
2558 hci_send_to_monitor(hdev, skb);
2559
2560 if (atomic_read(&hdev->promisc)) {
2561 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002562 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 }
2564
2565 /* Get rid of skb owner, prior to sending to the driver. */
2566 skb_orphan(skb);
2567
2568 return hdev->send(skb);
2569}
2570
Johan Hedberg3119ae92013-03-05 20:37:44 +02002571void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2572{
2573 skb_queue_head_init(&req->cmd_q);
2574 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002575 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002576}
2577
2578int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2579{
2580 struct hci_dev *hdev = req->hdev;
2581 struct sk_buff *skb;
2582 unsigned long flags;
2583
2584 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2585
Andre Guedes5d73e032013-03-08 11:20:16 -03002586 /* If an error occured during request building, remove all HCI
2587 * commands queued on the HCI request queue.
2588 */
2589 if (req->err) {
2590 skb_queue_purge(&req->cmd_q);
2591 return req->err;
2592 }
2593
Johan Hedberg3119ae92013-03-05 20:37:44 +02002594 /* Do not allow empty requests */
2595 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002596 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002597
2598 skb = skb_peek_tail(&req->cmd_q);
2599 bt_cb(skb)->req.complete = complete;
2600
2601 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2602 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2603 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2604
2605 queue_work(hdev->workqueue, &hdev->cmd_work);
2606
2607 return 0;
2608}
2609
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002610static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2611 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612{
2613 int len = HCI_COMMAND_HDR_SIZE + plen;
2614 struct hci_command_hdr *hdr;
2615 struct sk_buff *skb;
2616
Linus Torvalds1da177e2005-04-16 15:20:36 -07002617 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002618 if (!skb)
2619 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002620
2621 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002622 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002623 hdr->plen = plen;
2624
2625 if (plen)
2626 memcpy(skb_put(skb, plen), param, plen);
2627
2628 BT_DBG("skb len %d", skb->len);
2629
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002630 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002631 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002632
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002633 return skb;
2634}
2635
2636/* Send HCI command */
2637int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2638{
2639 struct sk_buff *skb;
2640
2641 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2642
2643 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2644 if (!skb) {
2645 BT_ERR("%s no memory for command", hdev->name);
2646 return -ENOMEM;
2647 }
2648
Johan Hedberg11714b32013-03-05 20:37:47 +02002649 /* Stand-alone HCI commands must be flaged as
2650 * single-command requests.
2651 */
2652 bt_cb(skb)->req.start = true;
2653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002655 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656
2657 return 0;
2658}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659
Johan Hedberg71c76a12013-03-05 20:37:46 +02002660/* Queue a command to an asynchronous HCI request */
Johan Hedberg02350a72013-04-03 21:50:29 +03002661void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, void *param,
2662 u8 event)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002663{
2664 struct hci_dev *hdev = req->hdev;
2665 struct sk_buff *skb;
2666
2667 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2668
Andre Guedes34739c12013-03-08 11:20:18 -03002669 /* If an error occured during request building, there is no point in
2670 * queueing the HCI command. We can simply return.
2671 */
2672 if (req->err)
2673 return;
2674
Johan Hedberg71c76a12013-03-05 20:37:46 +02002675 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2676 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002677 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2678 hdev->name, opcode);
2679 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002680 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002681 }
2682
2683 if (skb_queue_empty(&req->cmd_q))
2684 bt_cb(skb)->req.start = true;
2685
Johan Hedberg02350a72013-04-03 21:50:29 +03002686 bt_cb(skb)->req.event = event;
2687
Johan Hedberg71c76a12013-03-05 20:37:46 +02002688 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002689}
2690
Johan Hedberg02350a72013-04-03 21:50:29 +03002691void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2692{
2693 hci_req_add_ev(req, opcode, plen, param, 0);
2694}
2695
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002697void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002698{
2699 struct hci_command_hdr *hdr;
2700
2701 if (!hdev->sent_cmd)
2702 return NULL;
2703
2704 hdr = (void *) hdev->sent_cmd->data;
2705
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002706 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 return NULL;
2708
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002709 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2712}
2713
2714/* Send ACL data */
2715static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2716{
2717 struct hci_acl_hdr *hdr;
2718 int len = skb->len;
2719
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002720 skb_push(skb, HCI_ACL_HDR_SIZE);
2721 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002722 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002723 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2724 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725}
2726
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002727static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002728 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002730 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 struct hci_dev *hdev = conn->hdev;
2732 struct sk_buff *list;
2733
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002734 skb->len = skb_headlen(skb);
2735 skb->data_len = 0;
2736
2737 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002738
2739 switch (hdev->dev_type) {
2740 case HCI_BREDR:
2741 hci_add_acl_hdr(skb, conn->handle, flags);
2742 break;
2743 case HCI_AMP:
2744 hci_add_acl_hdr(skb, chan->handle, flags);
2745 break;
2746 default:
2747 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2748 return;
2749 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002750
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002751 list = skb_shinfo(skb)->frag_list;
2752 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 /* Non fragmented */
2754 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2755
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002756 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 } else {
2758 /* Fragmented */
2759 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2760
2761 skb_shinfo(skb)->frag_list = NULL;
2762
2763 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002764 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002766 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002767
2768 flags &= ~ACL_START;
2769 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 do {
2771 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002772
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002774 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002775 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776
2777 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2778
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002779 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 } while (list);
2781
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002782 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002784}
2785
2786void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2787{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002788 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002789
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002790 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002791
2792 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002793
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002794 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002796 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798
2799/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002800void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801{
2802 struct hci_dev *hdev = conn->hdev;
2803 struct hci_sco_hdr hdr;
2804
2805 BT_DBG("%s len %d", hdev->name, skb->len);
2806
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002807 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 hdr.dlen = skb->len;
2809
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002810 skb_push(skb, HCI_SCO_HDR_SIZE);
2811 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002812 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813
2814 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002815 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002818 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
2821/* ---- HCI TX task (outgoing data) ---- */
2822
2823/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002824static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2825 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
2827 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002828 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002829 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002831 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002833
2834 rcu_read_lock();
2835
2836 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002837 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002839
2840 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2841 continue;
2842
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 num++;
2844
2845 if (c->sent < min) {
2846 min = c->sent;
2847 conn = c;
2848 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002849
2850 if (hci_conn_num(hdev, type) == num)
2851 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002854 rcu_read_unlock();
2855
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002857 int cnt, q;
2858
2859 switch (conn->type) {
2860 case ACL_LINK:
2861 cnt = hdev->acl_cnt;
2862 break;
2863 case SCO_LINK:
2864 case ESCO_LINK:
2865 cnt = hdev->sco_cnt;
2866 break;
2867 case LE_LINK:
2868 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2869 break;
2870 default:
2871 cnt = 0;
2872 BT_ERR("Unknown link type");
2873 }
2874
2875 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876 *quote = q ? q : 1;
2877 } else
2878 *quote = 0;
2879
2880 BT_DBG("conn %p quote %d", conn, *quote);
2881 return conn;
2882}
2883
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002884static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885{
2886 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002887 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888
Ville Tervobae1f5d92011-02-10 22:38:53 -03002889 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002891 rcu_read_lock();
2892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002894 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002895 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002896 BT_ERR("%s killing stalled connection %pMR",
2897 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002898 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 }
2900 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002901
2902 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903}
2904
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002905static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2906 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002907{
2908 struct hci_conn_hash *h = &hdev->conn_hash;
2909 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002910 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002911 struct hci_conn *conn;
2912 int cnt, q, conn_num = 0;
2913
2914 BT_DBG("%s", hdev->name);
2915
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002916 rcu_read_lock();
2917
2918 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002919 struct hci_chan *tmp;
2920
2921 if (conn->type != type)
2922 continue;
2923
2924 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2925 continue;
2926
2927 conn_num++;
2928
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002929 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002930 struct sk_buff *skb;
2931
2932 if (skb_queue_empty(&tmp->data_q))
2933 continue;
2934
2935 skb = skb_peek(&tmp->data_q);
2936 if (skb->priority < cur_prio)
2937 continue;
2938
2939 if (skb->priority > cur_prio) {
2940 num = 0;
2941 min = ~0;
2942 cur_prio = skb->priority;
2943 }
2944
2945 num++;
2946
2947 if (conn->sent < min) {
2948 min = conn->sent;
2949 chan = tmp;
2950 }
2951 }
2952
2953 if (hci_conn_num(hdev, type) == conn_num)
2954 break;
2955 }
2956
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002957 rcu_read_unlock();
2958
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002959 if (!chan)
2960 return NULL;
2961
2962 switch (chan->conn->type) {
2963 case ACL_LINK:
2964 cnt = hdev->acl_cnt;
2965 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002966 case AMP_LINK:
2967 cnt = hdev->block_cnt;
2968 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002969 case SCO_LINK:
2970 case ESCO_LINK:
2971 cnt = hdev->sco_cnt;
2972 break;
2973 case LE_LINK:
2974 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2975 break;
2976 default:
2977 cnt = 0;
2978 BT_ERR("Unknown link type");
2979 }
2980
2981 q = cnt / num;
2982 *quote = q ? q : 1;
2983 BT_DBG("chan %p quote %d", chan, *quote);
2984 return chan;
2985}
2986
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002987static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2988{
2989 struct hci_conn_hash *h = &hdev->conn_hash;
2990 struct hci_conn *conn;
2991 int num = 0;
2992
2993 BT_DBG("%s", hdev->name);
2994
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002995 rcu_read_lock();
2996
2997 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002998 struct hci_chan *chan;
2999
3000 if (conn->type != type)
3001 continue;
3002
3003 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
3004 continue;
3005
3006 num++;
3007
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02003008 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003009 struct sk_buff *skb;
3010
3011 if (chan->sent) {
3012 chan->sent = 0;
3013 continue;
3014 }
3015
3016 if (skb_queue_empty(&chan->data_q))
3017 continue;
3018
3019 skb = skb_peek(&chan->data_q);
3020 if (skb->priority >= HCI_PRIO_MAX - 1)
3021 continue;
3022
3023 skb->priority = HCI_PRIO_MAX - 1;
3024
3025 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003026 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003027 }
3028
3029 if (hci_conn_num(hdev, type) == num)
3030 break;
3031 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02003032
3033 rcu_read_unlock();
3034
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003035}
3036
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003037static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
3038{
3039 /* Calculate count of blocks used by this packet */
3040 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
3041}
3042
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003043static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003044{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003045 if (!test_bit(HCI_RAW, &hdev->flags)) {
3046 /* ACL tx timeout must be longer than maximum
3047 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003048 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003049 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003050 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003051 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003052}
Linus Torvalds1da177e2005-04-16 15:20:36 -07003053
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003054static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003055{
3056 unsigned int cnt = hdev->acl_cnt;
3057 struct hci_chan *chan;
3058 struct sk_buff *skb;
3059 int quote;
3060
3061 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003062
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003063 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003064 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003065 u32 priority = (skb_peek(&chan->data_q))->priority;
3066 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003067 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003068 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003069
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003070 /* Stop if priority has changed */
3071 if (skb->priority < priority)
3072 break;
3073
3074 skb = skb_dequeue(&chan->data_q);
3075
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003076 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03003077 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003078
Linus Torvalds1da177e2005-04-16 15:20:36 -07003079 hci_send_frame(skb);
3080 hdev->acl_last_tx = jiffies;
3081
3082 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003083 chan->sent++;
3084 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003085 }
3086 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003087
3088 if (cnt != hdev->acl_cnt)
3089 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003090}
3091
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003092static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003093{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003094 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003095 struct hci_chan *chan;
3096 struct sk_buff *skb;
3097 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003098 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003099
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02003100 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003101
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003102 BT_DBG("%s", hdev->name);
3103
3104 if (hdev->dev_type == HCI_AMP)
3105 type = AMP_LINK;
3106 else
3107 type = ACL_LINK;
3108
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003109 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003110 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003111 u32 priority = (skb_peek(&chan->data_q))->priority;
3112 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
3113 int blocks;
3114
3115 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003116 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003117
3118 /* Stop if priority has changed */
3119 if (skb->priority < priority)
3120 break;
3121
3122 skb = skb_dequeue(&chan->data_q);
3123
3124 blocks = __get_blocks(hdev, skb);
3125 if (blocks > hdev->block_cnt)
3126 return;
3127
3128 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003129 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003130
3131 hci_send_frame(skb);
3132 hdev->acl_last_tx = jiffies;
3133
3134 hdev->block_cnt -= blocks;
3135 quote -= blocks;
3136
3137 chan->sent += blocks;
3138 chan->conn->sent += blocks;
3139 }
3140 }
3141
3142 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003143 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003144}
3145
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003146static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003147{
3148 BT_DBG("%s", hdev->name);
3149
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003150 /* No ACL link over BR/EDR controller */
3151 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3152 return;
3153
3154 /* No AMP link over AMP controller */
3155 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003156 return;
3157
3158 switch (hdev->flow_ctl_mode) {
3159 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3160 hci_sched_acl_pkt(hdev);
3161 break;
3162
3163 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3164 hci_sched_acl_blk(hdev);
3165 break;
3166 }
3167}
3168
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003170static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003171{
3172 struct hci_conn *conn;
3173 struct sk_buff *skb;
3174 int quote;
3175
3176 BT_DBG("%s", hdev->name);
3177
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003178 if (!hci_conn_num(hdev, SCO_LINK))
3179 return;
3180
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3182 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3183 BT_DBG("skb %p len %d", skb, skb->len);
3184 hci_send_frame(skb);
3185
3186 conn->sent++;
3187 if (conn->sent == ~0)
3188 conn->sent = 0;
3189 }
3190 }
3191}
3192
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003193static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003194{
3195 struct hci_conn *conn;
3196 struct sk_buff *skb;
3197 int quote;
3198
3199 BT_DBG("%s", hdev->name);
3200
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003201 if (!hci_conn_num(hdev, ESCO_LINK))
3202 return;
3203
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003204 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3205 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003206 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3207 BT_DBG("skb %p len %d", skb, skb->len);
3208 hci_send_frame(skb);
3209
3210 conn->sent++;
3211 if (conn->sent == ~0)
3212 conn->sent = 0;
3213 }
3214 }
3215}
3216
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003217static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003218{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003219 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003220 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003221 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003222
3223 BT_DBG("%s", hdev->name);
3224
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003225 if (!hci_conn_num(hdev, LE_LINK))
3226 return;
3227
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003228 if (!test_bit(HCI_RAW, &hdev->flags)) {
3229 /* LE tx timeout must be longer than maximum
3230 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003231 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003232 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003233 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003234 }
3235
3236 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003237 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003238 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003239 u32 priority = (skb_peek(&chan->data_q))->priority;
3240 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003241 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003242 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003243
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003244 /* Stop if priority has changed */
3245 if (skb->priority < priority)
3246 break;
3247
3248 skb = skb_dequeue(&chan->data_q);
3249
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003250 hci_send_frame(skb);
3251 hdev->le_last_tx = jiffies;
3252
3253 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003254 chan->sent++;
3255 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003256 }
3257 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003258
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003259 if (hdev->le_pkts)
3260 hdev->le_cnt = cnt;
3261 else
3262 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003263
3264 if (cnt != tmp)
3265 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003266}
3267
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003268static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003270 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003271 struct sk_buff *skb;
3272
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003273 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003274 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003275
3276 /* Schedule queues and send stuff to HCI driver */
3277
3278 hci_sched_acl(hdev);
3279
3280 hci_sched_sco(hdev);
3281
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003282 hci_sched_esco(hdev);
3283
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003284 hci_sched_le(hdev);
3285
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286 /* Send next queued raw (unknown type) packet */
3287 while ((skb = skb_dequeue(&hdev->raw_q)))
3288 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003289}
3290
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003291/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
3293/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003294static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295{
3296 struct hci_acl_hdr *hdr = (void *) skb->data;
3297 struct hci_conn *conn;
3298 __u16 handle, flags;
3299
3300 skb_pull(skb, HCI_ACL_HDR_SIZE);
3301
3302 handle = __le16_to_cpu(hdr->handle);
3303 flags = hci_flags(handle);
3304 handle = hci_handle(handle);
3305
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003306 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003307 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
3309 hdev->stat.acl_rx++;
3310
3311 hci_dev_lock(hdev);
3312 conn = hci_conn_hash_lookup_handle(hdev, handle);
3313 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003314
Linus Torvalds1da177e2005-04-16 15:20:36 -07003315 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003316 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003317
Linus Torvalds1da177e2005-04-16 15:20:36 -07003318 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003319 l2cap_recv_acldata(conn, skb, flags);
3320 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003321 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003322 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003323 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003324 }
3325
3326 kfree_skb(skb);
3327}
3328
3329/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003330static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331{
3332 struct hci_sco_hdr *hdr = (void *) skb->data;
3333 struct hci_conn *conn;
3334 __u16 handle;
3335
3336 skb_pull(skb, HCI_SCO_HDR_SIZE);
3337
3338 handle = __le16_to_cpu(hdr->handle);
3339
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003340 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
3342 hdev->stat.sco_rx++;
3343
3344 hci_dev_lock(hdev);
3345 conn = hci_conn_hash_lookup_handle(hdev, handle);
3346 hci_dev_unlock(hdev);
3347
3348 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003349 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003350 sco_recv_scodata(conn, skb);
3351 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003353 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003354 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003355 }
3356
3357 kfree_skb(skb);
3358}
3359
Johan Hedberg9238f362013-03-05 20:37:48 +02003360static bool hci_req_is_complete(struct hci_dev *hdev)
3361{
3362 struct sk_buff *skb;
3363
3364 skb = skb_peek(&hdev->cmd_q);
3365 if (!skb)
3366 return true;
3367
3368 return bt_cb(skb)->req.start;
3369}
3370
Johan Hedberg42c6b122013-03-05 20:37:49 +02003371static void hci_resend_last(struct hci_dev *hdev)
3372{
3373 struct hci_command_hdr *sent;
3374 struct sk_buff *skb;
3375 u16 opcode;
3376
3377 if (!hdev->sent_cmd)
3378 return;
3379
3380 sent = (void *) hdev->sent_cmd->data;
3381 opcode = __le16_to_cpu(sent->opcode);
3382 if (opcode == HCI_OP_RESET)
3383 return;
3384
3385 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3386 if (!skb)
3387 return;
3388
3389 skb_queue_head(&hdev->cmd_q, skb);
3390 queue_work(hdev->workqueue, &hdev->cmd_work);
3391}
3392
Johan Hedberg9238f362013-03-05 20:37:48 +02003393void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3394{
3395 hci_req_complete_t req_complete = NULL;
3396 struct sk_buff *skb;
3397 unsigned long flags;
3398
3399 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3400
Johan Hedberg42c6b122013-03-05 20:37:49 +02003401 /* If the completed command doesn't match the last one that was
3402 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003403 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003404 if (!hci_sent_cmd_data(hdev, opcode)) {
3405 /* Some CSR based controllers generate a spontaneous
3406 * reset complete event during init and any pending
3407 * command will never be completed. In such a case we
3408 * need to resend whatever was the last sent
3409 * command.
3410 */
3411 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3412 hci_resend_last(hdev);
3413
Johan Hedberg9238f362013-03-05 20:37:48 +02003414 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003415 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003416
3417 /* If the command succeeded and there's still more commands in
3418 * this request the request is not yet complete.
3419 */
3420 if (!status && !hci_req_is_complete(hdev))
3421 return;
3422
3423 /* If this was the last command in a request the complete
3424 * callback would be found in hdev->sent_cmd instead of the
3425 * command queue (hdev->cmd_q).
3426 */
3427 if (hdev->sent_cmd) {
3428 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3429 if (req_complete)
3430 goto call_complete;
3431 }
3432
3433 /* Remove all pending commands belonging to this request */
3434 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3435 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3436 if (bt_cb(skb)->req.start) {
3437 __skb_queue_head(&hdev->cmd_q, skb);
3438 break;
3439 }
3440
3441 req_complete = bt_cb(skb)->req.complete;
3442 kfree_skb(skb);
3443 }
3444 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3445
3446call_complete:
3447 if (req_complete)
3448 req_complete(hdev, status);
3449}
3450
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003451static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003452{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003453 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003454 struct sk_buff *skb;
3455
3456 BT_DBG("%s", hdev->name);
3457
Linus Torvalds1da177e2005-04-16 15:20:36 -07003458 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003459 /* Send copy to monitor */
3460 hci_send_to_monitor(hdev, skb);
3461
Linus Torvalds1da177e2005-04-16 15:20:36 -07003462 if (atomic_read(&hdev->promisc)) {
3463 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003464 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003465 }
3466
3467 if (test_bit(HCI_RAW, &hdev->flags)) {
3468 kfree_skb(skb);
3469 continue;
3470 }
3471
3472 if (test_bit(HCI_INIT, &hdev->flags)) {
3473 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003474 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003475 case HCI_ACLDATA_PKT:
3476 case HCI_SCODATA_PKT:
3477 kfree_skb(skb);
3478 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003479 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003480 }
3481
3482 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003483 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003485 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003486 hci_event_packet(hdev, skb);
3487 break;
3488
3489 case HCI_ACLDATA_PKT:
3490 BT_DBG("%s ACL data packet", hdev->name);
3491 hci_acldata_packet(hdev, skb);
3492 break;
3493
3494 case HCI_SCODATA_PKT:
3495 BT_DBG("%s SCO data packet", hdev->name);
3496 hci_scodata_packet(hdev, skb);
3497 break;
3498
3499 default:
3500 kfree_skb(skb);
3501 break;
3502 }
3503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003504}
3505
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003506static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003507{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003508 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003509 struct sk_buff *skb;
3510
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003511 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3512 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003513
Linus Torvalds1da177e2005-04-16 15:20:36 -07003514 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003515 if (atomic_read(&hdev->cmd_cnt)) {
3516 skb = skb_dequeue(&hdev->cmd_q);
3517 if (!skb)
3518 return;
3519
Wei Yongjun7585b972009-02-25 18:29:52 +08003520 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003521
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003522 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3523 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003524 atomic_dec(&hdev->cmd_cnt);
3525 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003526 if (test_bit(HCI_RESET, &hdev->flags))
3527 del_timer(&hdev->cmd_timer);
3528 else
3529 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003530 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003531 } else {
3532 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003533 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003534 }
3535 }
3536}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003537
3538int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3539{
3540 /* General inquiry access code (GIAC) */
3541 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3542 struct hci_cp_inquiry cp;
3543
3544 BT_DBG("%s", hdev->name);
3545
3546 if (test_bit(HCI_INQUIRY, &hdev->flags))
3547 return -EINPROGRESS;
3548
Johan Hedberg46632622012-01-02 16:06:08 +02003549 inquiry_cache_flush(hdev);
3550
Andre Guedes2519a1f2011-11-07 11:45:24 -03003551 memset(&cp, 0, sizeof(cp));
3552 memcpy(&cp.lap, lap, sizeof(cp.lap));
3553 cp.length = length;
3554
3555 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3556}
Andre Guedes023d50492011-11-04 14:16:52 -03003557
3558int hci_cancel_inquiry(struct hci_dev *hdev)
3559{
3560 BT_DBG("%s", hdev->name);
3561
3562 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003563 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003564
3565 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3566}
Andre Guedes31f79562012-04-24 21:02:53 -03003567
3568u8 bdaddr_to_le(u8 bdaddr_type)
3569{
3570 switch (bdaddr_type) {
3571 case BDADDR_LE_PUBLIC:
3572 return ADDR_LE_DEV_PUBLIC;
3573
3574 default:
3575 /* Fallback to LE Random address type */
3576 return ADDR_LE_DEV_RANDOM;
3577 }
3578}