blob: 9369e010c90ee0e476c75785c2a5fe30f103369e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200110static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +0200125
126 /* If the request didn't send any commands return immediately */
127 if (skb_queue_empty(&hdev->cmd_q) && atomic_read(&hdev->cmd_cnt)) {
128 hdev->req_status = 0;
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130 return err;
131 }
132
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 schedule_timeout(timeout);
134
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136
137 if (signal_pending(current))
138 return -EINTR;
139
140 switch (hdev->req_status) {
141 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700142 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 break;
144
145 case HCI_REQ_CANCELED:
146 err = -hdev->req_result;
147 break;
148
149 default:
150 err = -ETIMEDOUT;
151 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Johan Hedberga5040ef2011-01-10 13:28:59 +0200154 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155
156 BT_DBG("%s end: err %d", hdev->name, err);
157
158 return err;
159}
160
Johan Hedberg01178cd2013-03-05 20:37:41 +0200161static int hci_req_sync(struct hci_dev *hdev,
162 void (*req)(struct hci_dev *hdev, unsigned long opt),
163 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
165 int ret;
166
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200167 if (!test_bit(HCI_UP, &hdev->flags))
168 return -ENETDOWN;
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 /* Serialize all requests */
171 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200172 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 hci_req_unlock(hdev);
174
175 return ret;
176}
177
178static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
179{
180 BT_DBG("%s %ld", hdev->name, opt);
181
182 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300183 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200187static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200189 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200194 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
197
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200198static void amp_init(struct hci_dev *hdev)
199{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
201
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202 /* Read Local Version */
203 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300204
205 /* Read Local AMP Info */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300207
208 /* Read Data Blk size */
209 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200210}
211
212static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
213{
214 struct sk_buff *skb;
215
216 BT_DBG("%s %ld", hdev->name, opt);
217
218 /* Driver initialization */
219
220 /* Special commands */
221 while ((skb = skb_dequeue(&hdev->driver_init))) {
222 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
223 skb->dev = (void *) hdev;
224
225 skb_queue_tail(&hdev->cmd_q, skb);
226 queue_work(hdev->workqueue, &hdev->cmd_work);
227 }
228 skb_queue_purge(&hdev->driver_init);
229
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300230 /* Reset */
231 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
232 hci_reset_req(hdev, 0);
233
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234 switch (hdev->dev_type) {
235 case HCI_BREDR:
236 bredr_init(hdev);
237 break;
238
239 case HCI_AMP:
240 amp_init(hdev);
241 break;
242
243 default:
244 BT_ERR("Unknown device type %d", hdev->dev_type);
245 break;
246 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200247}
248
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
250{
251 __u8 scan = opt;
252
253 BT_DBG("%s %x", hdev->name, scan);
254
255 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200256 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
258
259static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
260{
261 __u8 auth = opt;
262
263 BT_DBG("%s %x", hdev->name, auth);
264
265 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200266 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700267}
268
269static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
270{
271 __u8 encrypt = opt;
272
273 BT_DBG("%s %x", hdev->name, encrypt);
274
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200275 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200276 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
278
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200279static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
280{
281 __le16 policy = cpu_to_le16(opt);
282
Marcel Holtmanna418b892008-11-30 12:17:28 +0100283 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200284
285 /* Default link policy */
286 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
287}
288
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900289/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 * Device is held on return. */
291struct hci_dev *hci_dev_get(int index)
292{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294
295 BT_DBG("%d", index);
296
297 if (index < 0)
298 return NULL;
299
300 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200301 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302 if (d->id == index) {
303 hdev = hci_dev_hold(d);
304 break;
305 }
306 }
307 read_unlock(&hci_dev_list_lock);
308 return hdev;
309}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200312
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200313bool hci_discovery_active(struct hci_dev *hdev)
314{
315 struct discovery_state *discov = &hdev->discovery;
316
Andre Guedes6fbe1952012-02-03 17:47:58 -0300317 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300318 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300319 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200320 return true;
321
Andre Guedes6fbe1952012-02-03 17:47:58 -0300322 default:
323 return false;
324 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200325}
326
Johan Hedbergff9ef572012-01-04 14:23:45 +0200327void hci_discovery_set_state(struct hci_dev *hdev, int state)
328{
329 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
330
331 if (hdev->discovery.state == state)
332 return;
333
334 switch (state) {
335 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300336 if (hdev->discovery.state != DISCOVERY_STARTING)
337 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 break;
339 case DISCOVERY_STARTING:
340 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300341 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200342 mgmt_discovering(hdev, 1);
343 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200344 case DISCOVERY_RESOLVING:
345 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200346 case DISCOVERY_STOPPING:
347 break;
348 }
349
350 hdev->discovery.state = state;
351}
352
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353static void inquiry_cache_flush(struct hci_dev *hdev)
354{
Johan Hedberg30883512012-01-04 14:16:21 +0200355 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200356 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357
Johan Hedberg561aafb2012-01-04 13:31:59 +0200358 list_for_each_entry_safe(p, n, &cache->all, all) {
359 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200360 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200362
363 INIT_LIST_HEAD(&cache->unknown);
364 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
366
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300367struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
368 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
Johan Hedberg30883512012-01-04 14:16:21 +0200370 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 struct inquiry_entry *e;
372
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300373 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200377 return e;
378 }
379
380 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381}
382
Johan Hedberg561aafb2012-01-04 13:31:59 +0200383struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300384 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200385{
Johan Hedberg30883512012-01-04 14:16:21 +0200386 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200387 struct inquiry_entry *e;
388
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300389 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200390
391 list_for_each_entry(e, &cache->unknown, list) {
392 if (!bacmp(&e->data.bdaddr, bdaddr))
393 return e;
394 }
395
396 return NULL;
397}
398
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300400 bdaddr_t *bdaddr,
401 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200402{
403 struct discovery_state *cache = &hdev->discovery;
404 struct inquiry_entry *e;
405
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300406 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200407
408 list_for_each_entry(e, &cache->resolve, list) {
409 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
410 return e;
411 if (!bacmp(&e->data.bdaddr, bdaddr))
412 return e;
413 }
414
415 return NULL;
416}
417
Johan Hedberga3d4e202012-01-09 00:53:02 +0200418void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300419 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200420{
421 struct discovery_state *cache = &hdev->discovery;
422 struct list_head *pos = &cache->resolve;
423 struct inquiry_entry *p;
424
425 list_del(&ie->list);
426
427 list_for_each_entry(p, &cache->resolve, list) {
428 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300429 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200430 break;
431 pos = &p->list;
432 }
433
434 list_add(&ie->list, pos);
435}
436
Johan Hedberg31754052012-01-04 13:39:52 +0200437bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300438 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439{
Johan Hedberg30883512012-01-04 14:16:21 +0200440 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200441 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300443 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
Szymon Janc2b2fec42012-11-20 11:38:54 +0100445 hci_remove_remote_oob_data(hdev, &data->bdaddr);
446
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200447 if (ssp)
448 *ssp = data->ssp_mode;
449
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200450 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200451 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200452 if (ie->data.ssp_mode && ssp)
453 *ssp = true;
454
Johan Hedberga3d4e202012-01-09 00:53:02 +0200455 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300456 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200457 ie->data.rssi = data->rssi;
458 hci_inquiry_cache_update_resolve(hdev, ie);
459 }
460
Johan Hedberg561aafb2012-01-04 13:31:59 +0200461 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200462 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200463
Johan Hedberg561aafb2012-01-04 13:31:59 +0200464 /* Entry not in the cache. Add new one. */
465 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
466 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200467 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200468
469 list_add(&ie->all, &cache->all);
470
471 if (name_known) {
472 ie->name_state = NAME_KNOWN;
473 } else {
474 ie->name_state = NAME_NOT_KNOWN;
475 list_add(&ie->list, &cache->unknown);
476 }
477
478update:
479 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300480 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200481 ie->name_state = NAME_KNOWN;
482 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483 }
484
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200485 memcpy(&ie->data, data, sizeof(*data));
486 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200488
489 if (ie->name_state == NAME_NOT_KNOWN)
490 return false;
491
492 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493}
494
495static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
496{
Johan Hedberg30883512012-01-04 14:16:21 +0200497 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 struct inquiry_info *info = (struct inquiry_info *) buf;
499 struct inquiry_entry *e;
500 int copied = 0;
501
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200504
505 if (copied >= num)
506 break;
507
Linus Torvalds1da177e2005-04-16 15:20:36 -0700508 bacpy(&info->bdaddr, &data->bdaddr);
509 info->pscan_rep_mode = data->pscan_rep_mode;
510 info->pscan_period_mode = data->pscan_period_mode;
511 info->pscan_mode = data->pscan_mode;
512 memcpy(info->dev_class, data->dev_class, 3);
513 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200514
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200516 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
519 BT_DBG("cache %p, copied %d", cache, copied);
520 return copied;
521}
522
523static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
524{
525 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
526 struct hci_cp_inquiry cp;
527
528 BT_DBG("%s", hdev->name);
529
530 if (test_bit(HCI_INQUIRY, &hdev->flags))
531 return;
532
533 /* Start Inquiry */
534 memcpy(&cp.lap, &ir->lap, 3);
535 cp.length = ir->length;
536 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200537 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538}
539
540int hci_inquiry(void __user *arg)
541{
542 __u8 __user *ptr = arg;
543 struct hci_inquiry_req ir;
544 struct hci_dev *hdev;
545 int err = 0, do_inquiry = 0, max_rsp;
546 long timeo;
547 __u8 *buf;
548
549 if (copy_from_user(&ir, ptr, sizeof(ir)))
550 return -EFAULT;
551
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200552 hdev = hci_dev_get(ir.dev_id);
553 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554 return -ENODEV;
555
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300556 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900557 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300558 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559 inquiry_cache_flush(hdev);
560 do_inquiry = 1;
561 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300562 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563
Marcel Holtmann04837f62006-07-03 10:02:33 +0200564 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200565
566 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200567 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
568 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200569 if (err < 0)
570 goto done;
571 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300573 /* for unlimited number of responses we will use buffer with
574 * 255 entries
575 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
577
578 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
579 * copy it to the user space.
580 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100581 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200582 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 err = -ENOMEM;
584 goto done;
585 }
586
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300587 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300589 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591 BT_DBG("num_rsp %d", ir.num_rsp);
592
593 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
594 ptr += sizeof(ir);
595 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300596 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900598 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 err = -EFAULT;
600
601 kfree(buf);
602
603done:
604 hci_dev_put(hdev);
605 return err;
606}
607
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100608static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
609{
610 u8 ad_len = 0, flags = 0;
611 size_t name_len;
612
613 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
614 flags |= LE_AD_GENERAL;
615
616 if (!lmp_bredr_capable(hdev))
617 flags |= LE_AD_NO_BREDR;
618
619 if (lmp_le_br_capable(hdev))
620 flags |= LE_AD_SIM_LE_BREDR_CTRL;
621
622 if (lmp_host_le_br_capable(hdev))
623 flags |= LE_AD_SIM_LE_BREDR_HOST;
624
625 if (flags) {
626 BT_DBG("adv flags 0x%02x", flags);
627
628 ptr[0] = 2;
629 ptr[1] = EIR_FLAGS;
630 ptr[2] = flags;
631
632 ad_len += 3;
633 ptr += 3;
634 }
635
636 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
637 ptr[0] = 2;
638 ptr[1] = EIR_TX_POWER;
639 ptr[2] = (u8) hdev->adv_tx_power;
640
641 ad_len += 3;
642 ptr += 3;
643 }
644
645 name_len = strlen(hdev->dev_name);
646 if (name_len > 0) {
647 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
648
649 if (name_len > max_len) {
650 name_len = max_len;
651 ptr[1] = EIR_NAME_SHORT;
652 } else
653 ptr[1] = EIR_NAME_COMPLETE;
654
655 ptr[0] = name_len + 1;
656
657 memcpy(ptr + 2, hdev->dev_name, name_len);
658
659 ad_len += (name_len + 2);
660 ptr += (name_len + 2);
661 }
662
663 return ad_len;
664}
665
666int hci_update_ad(struct hci_dev *hdev)
667{
668 struct hci_cp_le_set_adv_data cp;
669 u8 len;
670 int err;
671
672 hci_dev_lock(hdev);
673
674 if (!lmp_le_capable(hdev)) {
675 err = -EINVAL;
676 goto unlock;
677 }
678
679 memset(&cp, 0, sizeof(cp));
680
681 len = create_ad(hdev, cp.data);
682
683 if (hdev->adv_data_len == len &&
684 memcmp(cp.data, hdev->adv_data, len) == 0) {
685 err = 0;
686 goto unlock;
687 }
688
689 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
690 hdev->adv_data_len = len;
691
692 cp.length = len;
693 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
694
695unlock:
696 hci_dev_unlock(hdev);
697
698 return err;
699}
700
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701/* ---- HCI ioctl helpers ---- */
702
703int hci_dev_open(__u16 dev)
704{
705 struct hci_dev *hdev;
706 int ret = 0;
707
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200708 hdev = hci_dev_get(dev);
709 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 return -ENODEV;
711
712 BT_DBG("%s %p", hdev->name, hdev);
713
714 hci_req_lock(hdev);
715
Johan Hovold94324962012-03-15 14:48:41 +0100716 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
717 ret = -ENODEV;
718 goto done;
719 }
720
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200721 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
722 ret = -ERFKILL;
723 goto done;
724 }
725
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (test_bit(HCI_UP, &hdev->flags)) {
727 ret = -EALREADY;
728 goto done;
729 }
730
731 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
732 set_bit(HCI_RAW, &hdev->flags);
733
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200734 /* Treat all non BR/EDR controllers as raw devices if
735 enable_hs is not set */
736 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100737 set_bit(HCI_RAW, &hdev->flags);
738
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739 if (hdev->open(hdev)) {
740 ret = -EIO;
741 goto done;
742 }
743
744 if (!test_bit(HCI_RAW, &hdev->flags)) {
745 atomic_set(&hdev->cmd_cnt, 1);
746 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200747 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748
Johan Hedberg01178cd2013-03-05 20:37:41 +0200749 ret = __hci_req_sync(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751 clear_bit(HCI_INIT, &hdev->flags);
752 }
753
754 if (!ret) {
755 hci_dev_hold(hdev);
756 set_bit(HCI_UP, &hdev->flags);
757 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100758 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300759 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
760 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300761 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200762 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300763 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200764 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900765 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200767 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200768 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400769 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771 skb_queue_purge(&hdev->cmd_q);
772 skb_queue_purge(&hdev->rx_q);
773
774 if (hdev->flush)
775 hdev->flush(hdev);
776
777 if (hdev->sent_cmd) {
778 kfree_skb(hdev->sent_cmd);
779 hdev->sent_cmd = NULL;
780 }
781
782 hdev->close(hdev);
783 hdev->flags = 0;
784 }
785
786done:
787 hci_req_unlock(hdev);
788 hci_dev_put(hdev);
789 return ret;
790}
791
792static int hci_dev_do_close(struct hci_dev *hdev)
793{
794 BT_DBG("%s %p", hdev->name, hdev);
795
Andre Guedes28b75a82012-02-03 17:48:00 -0300796 cancel_work_sync(&hdev->le_scan);
797
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300798 cancel_delayed_work(&hdev->power_off);
799
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 hci_req_cancel(hdev, ENODEV);
801 hci_req_lock(hdev);
802
803 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300804 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 hci_req_unlock(hdev);
806 return 0;
807 }
808
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200809 /* Flush RX and TX works */
810 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400811 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200813 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200814 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200815 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200816 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200817 }
818
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200819 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200820 cancel_delayed_work(&hdev->service_cache);
821
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300822 cancel_delayed_work_sync(&hdev->le_scan_disable);
823
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300824 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 inquiry_cache_flush(hdev);
826 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300827 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828
829 hci_notify(hdev, HCI_DEV_DOWN);
830
831 if (hdev->flush)
832 hdev->flush(hdev);
833
834 /* Reset device */
835 skb_queue_purge(&hdev->cmd_q);
836 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200837 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200838 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200840 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 clear_bit(HCI_INIT, &hdev->flags);
842 }
843
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200844 /* flush cmd work */
845 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846
847 /* Drop queues */
848 skb_queue_purge(&hdev->rx_q);
849 skb_queue_purge(&hdev->cmd_q);
850 skb_queue_purge(&hdev->raw_q);
851
852 /* Drop last sent command */
853 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300854 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 kfree_skb(hdev->sent_cmd);
856 hdev->sent_cmd = NULL;
857 }
858
859 /* After this point our queues are empty
860 * and no tasks are scheduled. */
861 hdev->close(hdev);
862
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300863 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
864 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100865 hci_dev_lock(hdev);
866 mgmt_powered(hdev, 0);
867 hci_dev_unlock(hdev);
868 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 /* Clear flags */
871 hdev->flags = 0;
872
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +0200873 /* Controller radio is available but is currently powered down */
874 hdev->amp_status = 0;
875
Johan Hedberge59fda82012-02-22 18:11:53 +0200876 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200877 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 hci_req_unlock(hdev);
880
881 hci_dev_put(hdev);
882 return 0;
883}
884
885int hci_dev_close(__u16 dev)
886{
887 struct hci_dev *hdev;
888 int err;
889
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200890 hdev = hci_dev_get(dev);
891 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100893
894 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
895 cancel_delayed_work(&hdev->power_off);
896
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100898
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 hci_dev_put(hdev);
900 return err;
901}
902
903int hci_dev_reset(__u16 dev)
904{
905 struct hci_dev *hdev;
906 int ret = 0;
907
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200908 hdev = hci_dev_get(dev);
909 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700910 return -ENODEV;
911
912 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913
914 if (!test_bit(HCI_UP, &hdev->flags))
915 goto done;
916
917 /* Drop queues */
918 skb_queue_purge(&hdev->rx_q);
919 skb_queue_purge(&hdev->cmd_q);
920
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300921 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 inquiry_cache_flush(hdev);
923 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300924 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925
926 if (hdev->flush)
927 hdev->flush(hdev);
928
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900929 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300930 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931
932 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +0200933 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934
935done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 hci_req_unlock(hdev);
937 hci_dev_put(hdev);
938 return ret;
939}
940
941int hci_dev_reset_stat(__u16 dev)
942{
943 struct hci_dev *hdev;
944 int ret = 0;
945
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200946 hdev = hci_dev_get(dev);
947 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 return -ENODEV;
949
950 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
951
952 hci_dev_put(hdev);
953
954 return ret;
955}
956
957int hci_dev_cmd(unsigned int cmd, void __user *arg)
958{
959 struct hci_dev *hdev;
960 struct hci_dev_req dr;
961 int err = 0;
962
963 if (copy_from_user(&dr, arg, sizeof(dr)))
964 return -EFAULT;
965
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200966 hdev = hci_dev_get(dr.dev_id);
967 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 return -ENODEV;
969
970 switch (cmd) {
971 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +0200972 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
973 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 break;
975
976 case HCISETENCRYPT:
977 if (!lmp_encrypt_capable(hdev)) {
978 err = -EOPNOTSUPP;
979 break;
980 }
981
982 if (!test_bit(HCI_AUTH, &hdev->flags)) {
983 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200984 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
985 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 if (err)
987 break;
988 }
989
Johan Hedberg01178cd2013-03-05 20:37:41 +0200990 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
991 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 break;
993
994 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +0200995 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
996 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 break;
998
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200999 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001000 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1001 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001002 break;
1003
1004 case HCISETLINKMODE:
1005 hdev->link_mode = ((__u16) dr.dev_opt) &
1006 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1007 break;
1008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 case HCISETPTYPE:
1010 hdev->pkt_type = (__u16) dr.dev_opt;
1011 break;
1012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001014 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1015 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 break;
1017
1018 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001019 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1020 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 break;
1022
1023 default:
1024 err = -EINVAL;
1025 break;
1026 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001027
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 hci_dev_put(hdev);
1029 return err;
1030}
1031
1032int hci_get_dev_list(void __user *arg)
1033{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001034 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 struct hci_dev_list_req *dl;
1036 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 int n = 0, size, err;
1038 __u16 dev_num;
1039
1040 if (get_user(dev_num, (__u16 __user *) arg))
1041 return -EFAULT;
1042
1043 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1044 return -EINVAL;
1045
1046 size = sizeof(*dl) + dev_num * sizeof(*dr);
1047
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001048 dl = kzalloc(size, GFP_KERNEL);
1049 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 return -ENOMEM;
1051
1052 dr = dl->dev_req;
1053
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001054 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001055 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001056 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001057 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001058
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001059 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1060 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001061
Linus Torvalds1da177e2005-04-16 15:20:36 -07001062 (dr + n)->dev_id = hdev->id;
1063 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001064
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 if (++n >= dev_num)
1066 break;
1067 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001068 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069
1070 dl->dev_num = n;
1071 size = sizeof(*dl) + n * sizeof(*dr);
1072
1073 err = copy_to_user(arg, dl, size);
1074 kfree(dl);
1075
1076 return err ? -EFAULT : 0;
1077}
1078
1079int hci_get_dev_info(void __user *arg)
1080{
1081 struct hci_dev *hdev;
1082 struct hci_dev_info di;
1083 int err = 0;
1084
1085 if (copy_from_user(&di, arg, sizeof(di)))
1086 return -EFAULT;
1087
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001088 hdev = hci_dev_get(di.dev_id);
1089 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 return -ENODEV;
1091
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001092 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001093 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001094
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001095 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1096 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001097
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 strcpy(di.name, hdev->name);
1099 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001100 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 di.flags = hdev->flags;
1102 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001103 if (lmp_bredr_capable(hdev)) {
1104 di.acl_mtu = hdev->acl_mtu;
1105 di.acl_pkts = hdev->acl_pkts;
1106 di.sco_mtu = hdev->sco_mtu;
1107 di.sco_pkts = hdev->sco_pkts;
1108 } else {
1109 di.acl_mtu = hdev->le_mtu;
1110 di.acl_pkts = hdev->le_pkts;
1111 di.sco_mtu = 0;
1112 di.sco_pkts = 0;
1113 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001114 di.link_policy = hdev->link_policy;
1115 di.link_mode = hdev->link_mode;
1116
1117 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1118 memcpy(&di.features, &hdev->features, sizeof(di.features));
1119
1120 if (copy_to_user(arg, &di, sizeof(di)))
1121 err = -EFAULT;
1122
1123 hci_dev_put(hdev);
1124
1125 return err;
1126}
1127
1128/* ---- Interface to HCI drivers ---- */
1129
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001130static int hci_rfkill_set_block(void *data, bool blocked)
1131{
1132 struct hci_dev *hdev = data;
1133
1134 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1135
1136 if (!blocked)
1137 return 0;
1138
1139 hci_dev_do_close(hdev);
1140
1141 return 0;
1142}
1143
1144static const struct rfkill_ops hci_rfkill_ops = {
1145 .set_block = hci_rfkill_set_block,
1146};
1147
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001148static void hci_power_on(struct work_struct *work)
1149{
1150 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1151
1152 BT_DBG("%s", hdev->name);
1153
1154 if (hci_dev_open(hdev->id) < 0)
1155 return;
1156
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001157 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001158 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1159 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001160
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001161 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001162 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001163}
1164
1165static void hci_power_off(struct work_struct *work)
1166{
Johan Hedberg32435532011-11-07 22:16:04 +02001167 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001168 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001169
1170 BT_DBG("%s", hdev->name);
1171
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001172 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001173}
1174
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001175static void hci_discov_off(struct work_struct *work)
1176{
1177 struct hci_dev *hdev;
1178 u8 scan = SCAN_PAGE;
1179
1180 hdev = container_of(work, struct hci_dev, discov_off.work);
1181
1182 BT_DBG("%s", hdev->name);
1183
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001184 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001185
1186 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1187
1188 hdev->discov_timeout = 0;
1189
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001190 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001191}
1192
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001193int hci_uuids_clear(struct hci_dev *hdev)
1194{
Johan Hedberg48210022013-01-27 00:31:28 +02001195 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001196
Johan Hedberg48210022013-01-27 00:31:28 +02001197 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1198 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001199 kfree(uuid);
1200 }
1201
1202 return 0;
1203}
1204
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001205int hci_link_keys_clear(struct hci_dev *hdev)
1206{
1207 struct list_head *p, *n;
1208
1209 list_for_each_safe(p, n, &hdev->link_keys) {
1210 struct link_key *key;
1211
1212 key = list_entry(p, struct link_key, list);
1213
1214 list_del(p);
1215 kfree(key);
1216 }
1217
1218 return 0;
1219}
1220
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001221int hci_smp_ltks_clear(struct hci_dev *hdev)
1222{
1223 struct smp_ltk *k, *tmp;
1224
1225 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1226 list_del(&k->list);
1227 kfree(k);
1228 }
1229
1230 return 0;
1231}
1232
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001233struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1234{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001235 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001236
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001237 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001238 if (bacmp(bdaddr, &k->bdaddr) == 0)
1239 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001240
1241 return NULL;
1242}
1243
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301244static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001245 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001246{
1247 /* Legacy key */
1248 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301249 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001250
1251 /* Debug keys are insecure so don't store them persistently */
1252 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301253 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001254
1255 /* Changed combination key and there's no previous one */
1256 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301257 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001258
1259 /* Security mode 3 case */
1260 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301261 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001262
1263 /* Neither local nor remote side had no-bonding as requirement */
1264 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301265 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001266
1267 /* Local side had dedicated bonding as requirement */
1268 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301269 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001270
1271 /* Remote side had dedicated bonding as requirement */
1272 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301273 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001274
1275 /* If none of the above criteria match, then don't store the key
1276 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301277 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001278}
1279
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001280struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001281{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001282 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001284 list_for_each_entry(k, &hdev->long_term_keys, list) {
1285 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001286 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001287 continue;
1288
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001289 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001290 }
1291
1292 return NULL;
1293}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001294
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001295struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001296 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001297{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001298 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001299
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001300 list_for_each_entry(k, &hdev->long_term_keys, list)
1301 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001302 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001303 return k;
1304
1305 return NULL;
1306}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001307
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001308int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001309 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001310{
1311 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301312 u8 old_key_type;
1313 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001314
1315 old_key = hci_find_link_key(hdev, bdaddr);
1316 if (old_key) {
1317 old_key_type = old_key->type;
1318 key = old_key;
1319 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001320 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001321 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1322 if (!key)
1323 return -ENOMEM;
1324 list_add(&key->list, &hdev->link_keys);
1325 }
1326
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001327 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001328
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001329 /* Some buggy controller combinations generate a changed
1330 * combination key for legacy pairing even when there's no
1331 * previous key */
1332 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001333 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001334 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001335 if (conn)
1336 conn->key_type = type;
1337 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001338
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001339 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001340 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001341 key->pin_len = pin_len;
1342
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001343 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001344 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001345 else
1346 key->type = type;
1347
Johan Hedberg4df378a2011-04-28 11:29:03 -07001348 if (!new_key)
1349 return 0;
1350
1351 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1352
Johan Hedberg744cf192011-11-08 20:40:14 +02001353 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001354
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301355 if (conn)
1356 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001357
1358 return 0;
1359}
1360
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001361int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001362 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001363 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001364{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001365 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001366
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001367 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1368 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001369
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001370 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1371 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001372 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001373 else {
1374 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001375 if (!key)
1376 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001377 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001378 }
1379
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001380 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001381 key->bdaddr_type = addr_type;
1382 memcpy(key->val, tk, sizeof(key->val));
1383 key->authenticated = authenticated;
1384 key->ediv = ediv;
1385 key->enc_size = enc_size;
1386 key->type = type;
1387 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001388
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001389 if (!new_key)
1390 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001391
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001392 if (type & HCI_SMP_LTK)
1393 mgmt_new_ltk(hdev, key, 1);
1394
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001395 return 0;
1396}
1397
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001398int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct link_key *key;
1401
1402 key = hci_find_link_key(hdev, bdaddr);
1403 if (!key)
1404 return -ENOENT;
1405
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001406 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001407
1408 list_del(&key->list);
1409 kfree(key);
1410
1411 return 0;
1412}
1413
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001414int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1415{
1416 struct smp_ltk *k, *tmp;
1417
1418 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1419 if (bacmp(bdaddr, &k->bdaddr))
1420 continue;
1421
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001422 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001423
1424 list_del(&k->list);
1425 kfree(k);
1426 }
1427
1428 return 0;
1429}
1430
Ville Tervo6bd32322011-02-16 16:32:41 +02001431/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001432static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001433{
1434 struct hci_dev *hdev = (void *) arg;
1435
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001436 if (hdev->sent_cmd) {
1437 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1438 u16 opcode = __le16_to_cpu(sent->opcode);
1439
1440 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1441 } else {
1442 BT_ERR("%s command tx timeout", hdev->name);
1443 }
1444
Ville Tervo6bd32322011-02-16 16:32:41 +02001445 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001446 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001447}
1448
Szymon Janc2763eda2011-03-22 13:12:22 +01001449struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001450 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001451{
1452 struct oob_data *data;
1453
1454 list_for_each_entry(data, &hdev->remote_oob_data, list)
1455 if (bacmp(bdaddr, &data->bdaddr) == 0)
1456 return data;
1457
1458 return NULL;
1459}
1460
1461int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1462{
1463 struct oob_data *data;
1464
1465 data = hci_find_remote_oob_data(hdev, bdaddr);
1466 if (!data)
1467 return -ENOENT;
1468
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001469 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001470
1471 list_del(&data->list);
1472 kfree(data);
1473
1474 return 0;
1475}
1476
1477int hci_remote_oob_data_clear(struct hci_dev *hdev)
1478{
1479 struct oob_data *data, *n;
1480
1481 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1482 list_del(&data->list);
1483 kfree(data);
1484 }
1485
1486 return 0;
1487}
1488
1489int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001490 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001491{
1492 struct oob_data *data;
1493
1494 data = hci_find_remote_oob_data(hdev, bdaddr);
1495
1496 if (!data) {
1497 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1498 if (!data)
1499 return -ENOMEM;
1500
1501 bacpy(&data->bdaddr, bdaddr);
1502 list_add(&data->list, &hdev->remote_oob_data);
1503 }
1504
1505 memcpy(data->hash, hash, sizeof(data->hash));
1506 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1507
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001508 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001509
1510 return 0;
1511}
1512
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001513struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001515 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001516
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001517 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001518 if (bacmp(bdaddr, &b->bdaddr) == 0)
1519 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001520
1521 return NULL;
1522}
1523
1524int hci_blacklist_clear(struct hci_dev *hdev)
1525{
1526 struct list_head *p, *n;
1527
1528 list_for_each_safe(p, n, &hdev->blacklist) {
1529 struct bdaddr_list *b;
1530
1531 b = list_entry(p, struct bdaddr_list, list);
1532
1533 list_del(p);
1534 kfree(b);
1535 }
1536
1537 return 0;
1538}
1539
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001540int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001541{
1542 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001543
1544 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1545 return -EBADF;
1546
Antti Julku5e762442011-08-25 16:48:02 +03001547 if (hci_blacklist_lookup(hdev, bdaddr))
1548 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001549
1550 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001551 if (!entry)
1552 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001553
1554 bacpy(&entry->bdaddr, bdaddr);
1555
1556 list_add(&entry->list, &hdev->blacklist);
1557
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001558 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001559}
1560
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001561int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001562{
1563 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001564
Szymon Janc1ec918c2011-11-16 09:32:21 +01001565 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001566 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001567
1568 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001569 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001570 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001571
1572 list_del(&entry->list);
1573 kfree(entry);
1574
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001575 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001576}
1577
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001578static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1579{
1580 struct le_scan_params *param = (struct le_scan_params *) opt;
1581 struct hci_cp_le_set_scan_param cp;
1582
1583 memset(&cp, 0, sizeof(cp));
1584 cp.type = param->type;
1585 cp.interval = cpu_to_le16(param->interval);
1586 cp.window = cpu_to_le16(param->window);
1587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1589}
1590
1591static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1592{
1593 struct hci_cp_le_set_scan_enable cp;
1594
1595 memset(&cp, 0, sizeof(cp));
1596 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001597 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600}
1601
1602static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001604{
1605 long timeo = msecs_to_jiffies(3000);
1606 struct le_scan_params param;
1607 int err;
1608
1609 BT_DBG("%s", hdev->name);
1610
1611 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1612 return -EINPROGRESS;
1613
1614 param.type = type;
1615 param.interval = interval;
1616 param.window = window;
1617
1618 hci_req_lock(hdev);
1619
Johan Hedberg01178cd2013-03-05 20:37:41 +02001620 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1621 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001622 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001623 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001624
1625 hci_req_unlock(hdev);
1626
1627 if (err < 0)
1628 return err;
1629
Johan Hedberg46818ed2013-01-14 22:33:52 +02001630 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1631 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001632
1633 return 0;
1634}
1635
Andre Guedes7dbfac12012-03-15 16:52:07 -03001636int hci_cancel_le_scan(struct hci_dev *hdev)
1637{
1638 BT_DBG("%s", hdev->name);
1639
1640 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1641 return -EALREADY;
1642
1643 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1644 struct hci_cp_le_set_scan_enable cp;
1645
1646 /* Send HCI command to disable LE Scan */
1647 memset(&cp, 0, sizeof(cp));
1648 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1649 }
1650
1651 return 0;
1652}
1653
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001654static void le_scan_disable_work(struct work_struct *work)
1655{
1656 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001657 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001658 struct hci_cp_le_set_scan_enable cp;
1659
1660 BT_DBG("%s", hdev->name);
1661
1662 memset(&cp, 0, sizeof(cp));
1663
1664 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1665}
1666
Andre Guedes28b75a82012-02-03 17:48:00 -03001667static void le_scan_work(struct work_struct *work)
1668{
1669 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001674 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1675 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001676}
1677
1678int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001679 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001680{
1681 struct le_scan_params *param = &hdev->le_scan_params;
1682
1683 BT_DBG("%s", hdev->name);
1684
Johan Hedbergf1550472012-10-24 21:12:03 +03001685 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1686 return -ENOTSUPP;
1687
Andre Guedes28b75a82012-02-03 17:48:00 -03001688 if (work_busy(&hdev->le_scan))
1689 return -EINPROGRESS;
1690
1691 param->type = type;
1692 param->interval = interval;
1693 param->window = window;
1694 param->timeout = timeout;
1695
1696 queue_work(system_long_wq, &hdev->le_scan);
1697
1698 return 0;
1699}
1700
David Herrmann9be0dab2012-04-22 14:39:57 +02001701/* Alloc HCI device */
1702struct hci_dev *hci_alloc_dev(void)
1703{
1704 struct hci_dev *hdev;
1705
1706 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1707 if (!hdev)
1708 return NULL;
1709
David Herrmannb1b813d2012-04-22 14:39:58 +02001710 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1711 hdev->esco_type = (ESCO_HV1);
1712 hdev->link_mode = (HCI_LM_ACCEPT);
1713 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001714 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1715 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001716
David Herrmannb1b813d2012-04-22 14:39:58 +02001717 hdev->sniff_max_interval = 800;
1718 hdev->sniff_min_interval = 80;
1719
1720 mutex_init(&hdev->lock);
1721 mutex_init(&hdev->req_lock);
1722
1723 INIT_LIST_HEAD(&hdev->mgmt_pending);
1724 INIT_LIST_HEAD(&hdev->blacklist);
1725 INIT_LIST_HEAD(&hdev->uuids);
1726 INIT_LIST_HEAD(&hdev->link_keys);
1727 INIT_LIST_HEAD(&hdev->long_term_keys);
1728 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001729 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001730
1731 INIT_WORK(&hdev->rx_work, hci_rx_work);
1732 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1733 INIT_WORK(&hdev->tx_work, hci_tx_work);
1734 INIT_WORK(&hdev->power_on, hci_power_on);
1735 INIT_WORK(&hdev->le_scan, le_scan_work);
1736
David Herrmannb1b813d2012-04-22 14:39:58 +02001737 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1738 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1739 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1740
David Herrmann9be0dab2012-04-22 14:39:57 +02001741 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001742 skb_queue_head_init(&hdev->rx_q);
1743 skb_queue_head_init(&hdev->cmd_q);
1744 skb_queue_head_init(&hdev->raw_q);
1745
1746 init_waitqueue_head(&hdev->req_wait_q);
1747
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001748 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001749
David Herrmannb1b813d2012-04-22 14:39:58 +02001750 hci_init_sysfs(hdev);
1751 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001752
1753 return hdev;
1754}
1755EXPORT_SYMBOL(hci_alloc_dev);
1756
1757/* Free HCI device */
1758void hci_free_dev(struct hci_dev *hdev)
1759{
1760 skb_queue_purge(&hdev->driver_init);
1761
1762 /* will free via device release */
1763 put_device(&hdev->dev);
1764}
1765EXPORT_SYMBOL(hci_free_dev);
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767/* Register HCI device */
1768int hci_register_dev(struct hci_dev *hdev)
1769{
David Herrmannb1b813d2012-04-22 14:39:58 +02001770 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
David Herrmann010666a2012-01-07 15:47:07 +01001772 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 return -EINVAL;
1774
Mat Martineau08add512011-11-02 16:18:36 -07001775 /* Do not allow HCI_AMP devices to register at index 0,
1776 * so the index can be used as the AMP controller ID.
1777 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001778 switch (hdev->dev_type) {
1779 case HCI_BREDR:
1780 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1781 break;
1782 case HCI_AMP:
1783 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1784 break;
1785 default:
1786 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001788
Sasha Levin3df92b32012-05-27 22:36:56 +02001789 if (id < 0)
1790 return id;
1791
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792 sprintf(hdev->name, "hci%d", id);
1793 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001794
1795 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1796
Sasha Levin3df92b32012-05-27 22:36:56 +02001797 write_lock(&hci_dev_list_lock);
1798 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001799 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001801 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001802 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001803 if (!hdev->workqueue) {
1804 error = -ENOMEM;
1805 goto err;
1806 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001807
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001808 hdev->req_workqueue = alloc_workqueue(hdev->name,
1809 WQ_HIGHPRI | WQ_UNBOUND |
1810 WQ_MEM_RECLAIM, 1);
1811 if (!hdev->req_workqueue) {
1812 destroy_workqueue(hdev->workqueue);
1813 error = -ENOMEM;
1814 goto err;
1815 }
1816
David Herrmann33ca9542011-10-08 14:58:49 +02001817 error = hci_add_sysfs(hdev);
1818 if (error < 0)
1819 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001821 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001822 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1823 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001831 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001832
1833 if (hdev->dev_type != HCI_AMP)
1834 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001837 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
Johan Hedberg19202572013-01-14 22:33:51 +02001839 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07001840
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001842
David Herrmann33ca9542011-10-08 14:58:49 +02001843err_wqueue:
1844 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001845 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02001846err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001847 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001848 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001849 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001850 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001851
David Herrmann33ca9542011-10-08 14:58:49 +02001852 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853}
1854EXPORT_SYMBOL(hci_register_dev);
1855
1856/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001857void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858{
Sasha Levin3df92b32012-05-27 22:36:56 +02001859 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001860
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001861 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
Johan Hovold94324962012-03-15 14:48:41 +01001863 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1864
Sasha Levin3df92b32012-05-27 22:36:56 +02001865 id = hdev->id;
1866
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001867 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001868 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001869 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001870
1871 hci_dev_do_close(hdev);
1872
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301873 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001874 kfree_skb(hdev->reassembly[i]);
1875
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02001876 cancel_work_sync(&hdev->power_on);
1877
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001878 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001879 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001880 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001881 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001882 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001883 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001884
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001885 /* mgmt_index_removed should take care of emptying the
1886 * pending list */
1887 BUG_ON(!list_empty(&hdev->mgmt_pending));
1888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889 hci_notify(hdev, HCI_DEV_UNREG);
1890
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001891 if (hdev->rfkill) {
1892 rfkill_unregister(hdev->rfkill);
1893 rfkill_destroy(hdev->rfkill);
1894 }
1895
David Herrmannce242972011-10-08 14:58:48 +02001896 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001897
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001898 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001899 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001900
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001901 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001902 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001903 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001904 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001905 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001906 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001907 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001908
David Herrmanndc946bd2012-01-07 15:47:24 +01001909 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001910
1911 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912}
1913EXPORT_SYMBOL(hci_unregister_dev);
1914
1915/* Suspend HCI device */
1916int hci_suspend_dev(struct hci_dev *hdev)
1917{
1918 hci_notify(hdev, HCI_DEV_SUSPEND);
1919 return 0;
1920}
1921EXPORT_SYMBOL(hci_suspend_dev);
1922
1923/* Resume HCI device */
1924int hci_resume_dev(struct hci_dev *hdev)
1925{
1926 hci_notify(hdev, HCI_DEV_RESUME);
1927 return 0;
1928}
1929EXPORT_SYMBOL(hci_resume_dev);
1930
Marcel Holtmann76bca882009-11-18 00:40:39 +01001931/* Receive frame from HCI drivers */
1932int hci_recv_frame(struct sk_buff *skb)
1933{
1934 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1935 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001936 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001937 kfree_skb(skb);
1938 return -ENXIO;
1939 }
1940
Jorrit Schippersd82603c2012-12-27 17:33:02 +01001941 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01001942 bt_cb(skb)->incoming = 1;
1943
1944 /* Time stamp */
1945 __net_timestamp(skb);
1946
Marcel Holtmann76bca882009-11-18 00:40:39 +01001947 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001948 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001949
Marcel Holtmann76bca882009-11-18 00:40:39 +01001950 return 0;
1951}
1952EXPORT_SYMBOL(hci_recv_frame);
1953
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301954static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001955 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301956{
1957 int len = 0;
1958 int hlen = 0;
1959 int remain = count;
1960 struct sk_buff *skb;
1961 struct bt_skb_cb *scb;
1962
1963 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001964 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301965 return -EILSEQ;
1966
1967 skb = hdev->reassembly[index];
1968
1969 if (!skb) {
1970 switch (type) {
1971 case HCI_ACLDATA_PKT:
1972 len = HCI_MAX_FRAME_SIZE;
1973 hlen = HCI_ACL_HDR_SIZE;
1974 break;
1975 case HCI_EVENT_PKT:
1976 len = HCI_MAX_EVENT_SIZE;
1977 hlen = HCI_EVENT_HDR_SIZE;
1978 break;
1979 case HCI_SCODATA_PKT:
1980 len = HCI_MAX_SCO_SIZE;
1981 hlen = HCI_SCO_HDR_SIZE;
1982 break;
1983 }
1984
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001985 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301986 if (!skb)
1987 return -ENOMEM;
1988
1989 scb = (void *) skb->cb;
1990 scb->expect = hlen;
1991 scb->pkt_type = type;
1992
1993 skb->dev = (void *) hdev;
1994 hdev->reassembly[index] = skb;
1995 }
1996
1997 while (count) {
1998 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001999 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302000
2001 memcpy(skb_put(skb, len), data, len);
2002
2003 count -= len;
2004 data += len;
2005 scb->expect -= len;
2006 remain = count;
2007
2008 switch (type) {
2009 case HCI_EVENT_PKT:
2010 if (skb->len == HCI_EVENT_HDR_SIZE) {
2011 struct hci_event_hdr *h = hci_event_hdr(skb);
2012 scb->expect = h->plen;
2013
2014 if (skb_tailroom(skb) < scb->expect) {
2015 kfree_skb(skb);
2016 hdev->reassembly[index] = NULL;
2017 return -ENOMEM;
2018 }
2019 }
2020 break;
2021
2022 case HCI_ACLDATA_PKT:
2023 if (skb->len == HCI_ACL_HDR_SIZE) {
2024 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2025 scb->expect = __le16_to_cpu(h->dlen);
2026
2027 if (skb_tailroom(skb) < scb->expect) {
2028 kfree_skb(skb);
2029 hdev->reassembly[index] = NULL;
2030 return -ENOMEM;
2031 }
2032 }
2033 break;
2034
2035 case HCI_SCODATA_PKT:
2036 if (skb->len == HCI_SCO_HDR_SIZE) {
2037 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2038 scb->expect = h->dlen;
2039
2040 if (skb_tailroom(skb) < scb->expect) {
2041 kfree_skb(skb);
2042 hdev->reassembly[index] = NULL;
2043 return -ENOMEM;
2044 }
2045 }
2046 break;
2047 }
2048
2049 if (scb->expect == 0) {
2050 /* Complete frame */
2051
2052 bt_cb(skb)->pkt_type = type;
2053 hci_recv_frame(skb);
2054
2055 hdev->reassembly[index] = NULL;
2056 return remain;
2057 }
2058 }
2059
2060 return remain;
2061}
2062
Marcel Holtmannef222012007-07-11 06:42:04 +02002063int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2064{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302065 int rem = 0;
2066
Marcel Holtmannef222012007-07-11 06:42:04 +02002067 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2068 return -EILSEQ;
2069
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002070 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002071 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302072 if (rem < 0)
2073 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002074
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302075 data += (count - rem);
2076 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002077 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002078
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302079 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002080}
2081EXPORT_SYMBOL(hci_recv_fragment);
2082
Suraj Sumangala99811512010-07-14 13:02:19 +05302083#define STREAM_REASSEMBLY 0
2084
2085int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2086{
2087 int type;
2088 int rem = 0;
2089
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002090 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302091 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2092
2093 if (!skb) {
2094 struct { char type; } *pkt;
2095
2096 /* Start of the frame */
2097 pkt = data;
2098 type = pkt->type;
2099
2100 data++;
2101 count--;
2102 } else
2103 type = bt_cb(skb)->pkt_type;
2104
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002105 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002106 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302107 if (rem < 0)
2108 return rem;
2109
2110 data += (count - rem);
2111 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002112 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302113
2114 return rem;
2115}
2116EXPORT_SYMBOL(hci_recv_stream_fragment);
2117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118/* ---- Interface to upper protocols ---- */
2119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120int hci_register_cb(struct hci_cb *cb)
2121{
2122 BT_DBG("%p name %s", cb, cb->name);
2123
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002124 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002126 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128 return 0;
2129}
2130EXPORT_SYMBOL(hci_register_cb);
2131
2132int hci_unregister_cb(struct hci_cb *cb)
2133{
2134 BT_DBG("%p name %s", cb, cb->name);
2135
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002136 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002138 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 return 0;
2141}
2142EXPORT_SYMBOL(hci_unregister_cb);
2143
2144static int hci_send_frame(struct sk_buff *skb)
2145{
2146 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2147
2148 if (!hdev) {
2149 kfree_skb(skb);
2150 return -ENODEV;
2151 }
2152
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002153 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002155 /* Time stamp */
2156 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002158 /* Send copy to monitor */
2159 hci_send_to_monitor(hdev, skb);
2160
2161 if (atomic_read(&hdev->promisc)) {
2162 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002163 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 }
2165
2166 /* Get rid of skb owner, prior to sending to the driver. */
2167 skb_orphan(skb);
2168
2169 return hdev->send(skb);
2170}
2171
2172/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002173int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174{
2175 int len = HCI_COMMAND_HDR_SIZE + plen;
2176 struct hci_command_hdr *hdr;
2177 struct sk_buff *skb;
2178
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002179 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181 skb = bt_skb_alloc(len, GFP_ATOMIC);
2182 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002183 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 return -ENOMEM;
2185 }
2186
2187 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002188 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 hdr->plen = plen;
2190
2191 if (plen)
2192 memcpy(skb_put(skb, plen), param, plen);
2193
2194 BT_DBG("skb len %d", skb->len);
2195
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002196 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002198
Johan Hedberga5040ef2011-01-10 13:28:59 +02002199 if (test_bit(HCI_INIT, &hdev->flags))
2200 hdev->init_last_cmd = opcode;
2201
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002203 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204
2205 return 0;
2206}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
2208/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002209void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210{
2211 struct hci_command_hdr *hdr;
2212
2213 if (!hdev->sent_cmd)
2214 return NULL;
2215
2216 hdr = (void *) hdev->sent_cmd->data;
2217
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002218 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 return NULL;
2220
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002221 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2224}
2225
2226/* Send ACL data */
2227static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2228{
2229 struct hci_acl_hdr *hdr;
2230 int len = skb->len;
2231
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002232 skb_push(skb, HCI_ACL_HDR_SIZE);
2233 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002234 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002235 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2236 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
2238
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002239static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002240 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002242 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 struct hci_dev *hdev = conn->hdev;
2244 struct sk_buff *list;
2245
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002246 skb->len = skb_headlen(skb);
2247 skb->data_len = 0;
2248
2249 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002250
2251 switch (hdev->dev_type) {
2252 case HCI_BREDR:
2253 hci_add_acl_hdr(skb, conn->handle, flags);
2254 break;
2255 case HCI_AMP:
2256 hci_add_acl_hdr(skb, chan->handle, flags);
2257 break;
2258 default:
2259 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2260 return;
2261 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002262
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002263 list = skb_shinfo(skb)->frag_list;
2264 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 /* Non fragmented */
2266 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2267
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002268 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 } else {
2270 /* Fragmented */
2271 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2272
2273 skb_shinfo(skb)->frag_list = NULL;
2274
2275 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002276 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002278 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002279
2280 flags &= ~ACL_START;
2281 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 do {
2283 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002286 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002287 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288
2289 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2290
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002291 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 } while (list);
2293
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002294 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002296}
2297
2298void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2299{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002300 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002301
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002302 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002303
2304 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002305
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002306 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002308 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310
2311/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002312void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313{
2314 struct hci_dev *hdev = conn->hdev;
2315 struct hci_sco_hdr hdr;
2316
2317 BT_DBG("%s len %d", hdev->name, skb->len);
2318
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002319 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 hdr.dlen = skb->len;
2321
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002322 skb_push(skb, HCI_SCO_HDR_SIZE);
2323 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002324 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325
2326 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002327 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002328
Linus Torvalds1da177e2005-04-16 15:20:36 -07002329 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002330 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332
2333/* ---- HCI TX task (outgoing data) ---- */
2334
2335/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002336static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2337 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338{
2339 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002340 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002341 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002343 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002345
2346 rcu_read_lock();
2347
2348 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002349 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002350 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002351
2352 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2353 continue;
2354
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 num++;
2356
2357 if (c->sent < min) {
2358 min = c->sent;
2359 conn = c;
2360 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002361
2362 if (hci_conn_num(hdev, type) == num)
2363 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 }
2365
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002366 rcu_read_unlock();
2367
Linus Torvalds1da177e2005-04-16 15:20:36 -07002368 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002369 int cnt, q;
2370
2371 switch (conn->type) {
2372 case ACL_LINK:
2373 cnt = hdev->acl_cnt;
2374 break;
2375 case SCO_LINK:
2376 case ESCO_LINK:
2377 cnt = hdev->sco_cnt;
2378 break;
2379 case LE_LINK:
2380 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2381 break;
2382 default:
2383 cnt = 0;
2384 BT_ERR("Unknown link type");
2385 }
2386
2387 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388 *quote = q ? q : 1;
2389 } else
2390 *quote = 0;
2391
2392 BT_DBG("conn %p quote %d", conn, *quote);
2393 return conn;
2394}
2395
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002396static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397{
2398 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002399 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002400
Ville Tervobae1f5d92011-02-10 22:38:53 -03002401 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002403 rcu_read_lock();
2404
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002406 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002407 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002408 BT_ERR("%s killing stalled connection %pMR",
2409 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002410 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411 }
2412 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002413
2414 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002415}
2416
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002417static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2418 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002419{
2420 struct hci_conn_hash *h = &hdev->conn_hash;
2421 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002422 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002423 struct hci_conn *conn;
2424 int cnt, q, conn_num = 0;
2425
2426 BT_DBG("%s", hdev->name);
2427
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002428 rcu_read_lock();
2429
2430 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002431 struct hci_chan *tmp;
2432
2433 if (conn->type != type)
2434 continue;
2435
2436 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2437 continue;
2438
2439 conn_num++;
2440
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002441 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002442 struct sk_buff *skb;
2443
2444 if (skb_queue_empty(&tmp->data_q))
2445 continue;
2446
2447 skb = skb_peek(&tmp->data_q);
2448 if (skb->priority < cur_prio)
2449 continue;
2450
2451 if (skb->priority > cur_prio) {
2452 num = 0;
2453 min = ~0;
2454 cur_prio = skb->priority;
2455 }
2456
2457 num++;
2458
2459 if (conn->sent < min) {
2460 min = conn->sent;
2461 chan = tmp;
2462 }
2463 }
2464
2465 if (hci_conn_num(hdev, type) == conn_num)
2466 break;
2467 }
2468
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002469 rcu_read_unlock();
2470
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002471 if (!chan)
2472 return NULL;
2473
2474 switch (chan->conn->type) {
2475 case ACL_LINK:
2476 cnt = hdev->acl_cnt;
2477 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002478 case AMP_LINK:
2479 cnt = hdev->block_cnt;
2480 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002481 case SCO_LINK:
2482 case ESCO_LINK:
2483 cnt = hdev->sco_cnt;
2484 break;
2485 case LE_LINK:
2486 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2487 break;
2488 default:
2489 cnt = 0;
2490 BT_ERR("Unknown link type");
2491 }
2492
2493 q = cnt / num;
2494 *quote = q ? q : 1;
2495 BT_DBG("chan %p quote %d", chan, *quote);
2496 return chan;
2497}
2498
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002499static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2500{
2501 struct hci_conn_hash *h = &hdev->conn_hash;
2502 struct hci_conn *conn;
2503 int num = 0;
2504
2505 BT_DBG("%s", hdev->name);
2506
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002507 rcu_read_lock();
2508
2509 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002510 struct hci_chan *chan;
2511
2512 if (conn->type != type)
2513 continue;
2514
2515 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2516 continue;
2517
2518 num++;
2519
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002520 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002521 struct sk_buff *skb;
2522
2523 if (chan->sent) {
2524 chan->sent = 0;
2525 continue;
2526 }
2527
2528 if (skb_queue_empty(&chan->data_q))
2529 continue;
2530
2531 skb = skb_peek(&chan->data_q);
2532 if (skb->priority >= HCI_PRIO_MAX - 1)
2533 continue;
2534
2535 skb->priority = HCI_PRIO_MAX - 1;
2536
2537 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002538 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002539 }
2540
2541 if (hci_conn_num(hdev, type) == num)
2542 break;
2543 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002544
2545 rcu_read_unlock();
2546
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002547}
2548
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002549static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2550{
2551 /* Calculate count of blocks used by this packet */
2552 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2553}
2554
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002555static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 if (!test_bit(HCI_RAW, &hdev->flags)) {
2558 /* ACL tx timeout must be longer than maximum
2559 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002560 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002561 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002562 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002564}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002566static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002567{
2568 unsigned int cnt = hdev->acl_cnt;
2569 struct hci_chan *chan;
2570 struct sk_buff *skb;
2571 int quote;
2572
2573 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002574
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002575 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002576 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002577 u32 priority = (skb_peek(&chan->data_q))->priority;
2578 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002579 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002580 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002581
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002582 /* Stop if priority has changed */
2583 if (skb->priority < priority)
2584 break;
2585
2586 skb = skb_dequeue(&chan->data_q);
2587
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002588 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002589 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002590
Linus Torvalds1da177e2005-04-16 15:20:36 -07002591 hci_send_frame(skb);
2592 hdev->acl_last_tx = jiffies;
2593
2594 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002595 chan->sent++;
2596 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 }
2598 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002599
2600 if (cnt != hdev->acl_cnt)
2601 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602}
2603
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002604static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002605{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002606 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002607 struct hci_chan *chan;
2608 struct sk_buff *skb;
2609 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002610 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002611
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002612 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002613
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002614 BT_DBG("%s", hdev->name);
2615
2616 if (hdev->dev_type == HCI_AMP)
2617 type = AMP_LINK;
2618 else
2619 type = ACL_LINK;
2620
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002621 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002622 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002623 u32 priority = (skb_peek(&chan->data_q))->priority;
2624 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2625 int blocks;
2626
2627 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002628 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002629
2630 /* Stop if priority has changed */
2631 if (skb->priority < priority)
2632 break;
2633
2634 skb = skb_dequeue(&chan->data_q);
2635
2636 blocks = __get_blocks(hdev, skb);
2637 if (blocks > hdev->block_cnt)
2638 return;
2639
2640 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002641 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002642
2643 hci_send_frame(skb);
2644 hdev->acl_last_tx = jiffies;
2645
2646 hdev->block_cnt -= blocks;
2647 quote -= blocks;
2648
2649 chan->sent += blocks;
2650 chan->conn->sent += blocks;
2651 }
2652 }
2653
2654 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002655 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002656}
2657
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002658static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002659{
2660 BT_DBG("%s", hdev->name);
2661
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002662 /* No ACL link over BR/EDR controller */
2663 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2664 return;
2665
2666 /* No AMP link over AMP controller */
2667 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002668 return;
2669
2670 switch (hdev->flow_ctl_mode) {
2671 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2672 hci_sched_acl_pkt(hdev);
2673 break;
2674
2675 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2676 hci_sched_acl_blk(hdev);
2677 break;
2678 }
2679}
2680
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002682static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
2684 struct hci_conn *conn;
2685 struct sk_buff *skb;
2686 int quote;
2687
2688 BT_DBG("%s", hdev->name);
2689
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002690 if (!hci_conn_num(hdev, SCO_LINK))
2691 return;
2692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703}
2704
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002705static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002706{
2707 struct hci_conn *conn;
2708 struct sk_buff *skb;
2709 int quote;
2710
2711 BT_DBG("%s", hdev->name);
2712
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002713 if (!hci_conn_num(hdev, ESCO_LINK))
2714 return;
2715
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002716 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2717 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002718 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2719 BT_DBG("skb %p len %d", skb, skb->len);
2720 hci_send_frame(skb);
2721
2722 conn->sent++;
2723 if (conn->sent == ~0)
2724 conn->sent = 0;
2725 }
2726 }
2727}
2728
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002729static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002730{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002731 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002732 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002733 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002734
2735 BT_DBG("%s", hdev->name);
2736
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002737 if (!hci_conn_num(hdev, LE_LINK))
2738 return;
2739
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002740 if (!test_bit(HCI_RAW, &hdev->flags)) {
2741 /* LE tx timeout must be longer than maximum
2742 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002743 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002744 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002745 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002746 }
2747
2748 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002749 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002750 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002751 u32 priority = (skb_peek(&chan->data_q))->priority;
2752 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002753 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002754 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002755
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002756 /* Stop if priority has changed */
2757 if (skb->priority < priority)
2758 break;
2759
2760 skb = skb_dequeue(&chan->data_q);
2761
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002762 hci_send_frame(skb);
2763 hdev->le_last_tx = jiffies;
2764
2765 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002766 chan->sent++;
2767 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002768 }
2769 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002770
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002771 if (hdev->le_pkts)
2772 hdev->le_cnt = cnt;
2773 else
2774 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002775
2776 if (cnt != tmp)
2777 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002778}
2779
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002780static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002782 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 struct sk_buff *skb;
2784
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002785 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002786 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787
2788 /* Schedule queues and send stuff to HCI driver */
2789
2790 hci_sched_acl(hdev);
2791
2792 hci_sched_sco(hdev);
2793
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002794 hci_sched_esco(hdev);
2795
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002796 hci_sched_le(hdev);
2797
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 /* Send next queued raw (unknown type) packet */
2799 while ((skb = skb_dequeue(&hdev->raw_q)))
2800 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801}
2802
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002803/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804
2805/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002806static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002807{
2808 struct hci_acl_hdr *hdr = (void *) skb->data;
2809 struct hci_conn *conn;
2810 __u16 handle, flags;
2811
2812 skb_pull(skb, HCI_ACL_HDR_SIZE);
2813
2814 handle = __le16_to_cpu(hdr->handle);
2815 flags = hci_flags(handle);
2816 handle = hci_handle(handle);
2817
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002818 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002819 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820
2821 hdev->stat.acl_rx++;
2822
2823 hci_dev_lock(hdev);
2824 conn = hci_conn_hash_lookup_handle(hdev, handle);
2825 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002826
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002828 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002829
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002831 l2cap_recv_acldata(conn, skb, flags);
2832 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002834 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002835 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 }
2837
2838 kfree_skb(skb);
2839}
2840
2841/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002842static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843{
2844 struct hci_sco_hdr *hdr = (void *) skb->data;
2845 struct hci_conn *conn;
2846 __u16 handle;
2847
2848 skb_pull(skb, HCI_SCO_HDR_SIZE);
2849
2850 handle = __le16_to_cpu(hdr->handle);
2851
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002852 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
2854 hdev->stat.sco_rx++;
2855
2856 hci_dev_lock(hdev);
2857 conn = hci_conn_hash_lookup_handle(hdev, handle);
2858 hci_dev_unlock(hdev);
2859
2860 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002862 sco_recv_scodata(conn, skb);
2863 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002865 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002866 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 }
2868
2869 kfree_skb(skb);
2870}
2871
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002872static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002873{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002874 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 struct sk_buff *skb;
2876
2877 BT_DBG("%s", hdev->name);
2878
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002880 /* Send copy to monitor */
2881 hci_send_to_monitor(hdev, skb);
2882
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 if (atomic_read(&hdev->promisc)) {
2884 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002885 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 }
2887
2888 if (test_bit(HCI_RAW, &hdev->flags)) {
2889 kfree_skb(skb);
2890 continue;
2891 }
2892
2893 if (test_bit(HCI_INIT, &hdev->flags)) {
2894 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002895 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 case HCI_ACLDATA_PKT:
2897 case HCI_SCODATA_PKT:
2898 kfree_skb(skb);
2899 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002900 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002901 }
2902
2903 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002904 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002906 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002907 hci_event_packet(hdev, skb);
2908 break;
2909
2910 case HCI_ACLDATA_PKT:
2911 BT_DBG("%s ACL data packet", hdev->name);
2912 hci_acldata_packet(hdev, skb);
2913 break;
2914
2915 case HCI_SCODATA_PKT:
2916 BT_DBG("%s SCO data packet", hdev->name);
2917 hci_scodata_packet(hdev, skb);
2918 break;
2919
2920 default:
2921 kfree_skb(skb);
2922 break;
2923 }
2924 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002925}
2926
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002927static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002929 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930 struct sk_buff *skb;
2931
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002932 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2933 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002936 if (atomic_read(&hdev->cmd_cnt)) {
2937 skb = skb_dequeue(&hdev->cmd_q);
2938 if (!skb)
2939 return;
2940
Wei Yongjun7585b972009-02-25 18:29:52 +08002941 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002942
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002943 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2944 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002945 atomic_dec(&hdev->cmd_cnt);
2946 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002947 if (test_bit(HCI_RESET, &hdev->flags))
2948 del_timer(&hdev->cmd_timer);
2949 else
2950 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002951 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002952 } else {
2953 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002954 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955 }
2956 }
2957}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002958
2959int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2960{
2961 /* General inquiry access code (GIAC) */
2962 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2963 struct hci_cp_inquiry cp;
2964
2965 BT_DBG("%s", hdev->name);
2966
2967 if (test_bit(HCI_INQUIRY, &hdev->flags))
2968 return -EINPROGRESS;
2969
Johan Hedberg46632622012-01-02 16:06:08 +02002970 inquiry_cache_flush(hdev);
2971
Andre Guedes2519a1f2011-11-07 11:45:24 -03002972 memset(&cp, 0, sizeof(cp));
2973 memcpy(&cp.lap, lap, sizeof(cp.lap));
2974 cp.length = length;
2975
2976 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2977}
Andre Guedes023d50492011-11-04 14:16:52 -03002978
2979int hci_cancel_inquiry(struct hci_dev *hdev)
2980{
2981 BT_DBG("%s", hdev->name);
2982
2983 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002984 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002985
2986 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2987}
Andre Guedes31f79562012-04-24 21:02:53 -03002988
2989u8 bdaddr_to_le(u8 bdaddr_type)
2990{
2991 switch (bdaddr_type) {
2992 case BDADDR_LE_PUBLIC:
2993 return ADDR_LE_DEV_PUBLIC;
2994
2995 default:
2996 /* Fallback to LE Random address type */
2997 return ADDR_LE_DEV_RANDOM;
2998 }
2999}