blob: 59ec99eb739b2d403ce2a25630d50d195c9b1263 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020085 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
Andre Guedes6fbe1952012-02-03 17:47:58 -0300367 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300368 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200370 return true;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 default:
373 return false;
374 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375}
376
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200388 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 break;
390 case DISCOVERY_STARTING:
391 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300392 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 mgmt_discovering(hdev, 1);
394 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200395 case DISCOVERY_RESOLVING:
396 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200427 return e;
428 }
429
430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Johan Hedberg561aafb2012-01-04 13:31:59 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300434 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435{
Johan Hedberg30883512012-01-04 14:16:21 +0200436 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300450 bdaddr_t *bdaddr,
451 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
Johan Hedberga3d4e202012-01-09 00:53:02 +0200468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300469 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
Johan Hedberg31754052012-01-04 13:39:52 +0200487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300488 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Johan Hedberg30883512012-01-04 14:16:21 +0200490 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200495 if (ssp)
496 *ssp = data->ssp_mode;
497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200499 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
Johan Hedberga3d4e202012-01-09 00:53:02 +0200503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
Johan Hedberg561aafb2012-01-04 13:31:59 +0200509 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200515 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
525
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
Johan Hedberg30883512012-01-04 14:16:21 +0200545 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
Johan Hedberg561aafb2012-01-04 13:31:59 +0200550 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
553 if (copied >= num)
554 break;
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -ENODEV;
603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300604 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200628 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -ENOMEM;
630 goto done;
631 }
632
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900644 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200669 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
670 ret = -ERFKILL;
671 goto done;
672 }
673
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 if (test_bit(HCI_UP, &hdev->flags)) {
675 ret = -EALREADY;
676 goto done;
677 }
678
679 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
680 set_bit(HCI_RAW, &hdev->flags);
681
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200682 /* Treat all non BR/EDR controllers as raw devices if
683 enable_hs is not set */
684 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100685 set_bit(HCI_RAW, &hdev->flags);
686
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (hdev->open(hdev)) {
688 ret = -EIO;
689 goto done;
690 }
691
692 if (!test_bit(HCI_RAW, &hdev->flags)) {
693 atomic_set(&hdev->cmd_cnt, 1);
694 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200695 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696
Marcel Holtmann04837f62006-07-03 10:02:33 +0200697 ret = __hci_request(hdev, hci_init_req, 0,
698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699
Andre Guedeseead27d2011-06-30 19:20:55 -0300700 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300701 ret = __hci_request(hdev, hci_le_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
703
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704 clear_bit(HCI_INIT, &hdev->flags);
705 }
706
707 if (!ret) {
708 hci_dev_hold(hdev);
709 set_bit(HCI_UP, &hdev->flags);
710 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200711 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300712 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200713 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300714 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200715 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900716 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200718 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200719 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400720 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721
722 skb_queue_purge(&hdev->cmd_q);
723 skb_queue_purge(&hdev->rx_q);
724
725 if (hdev->flush)
726 hdev->flush(hdev);
727
728 if (hdev->sent_cmd) {
729 kfree_skb(hdev->sent_cmd);
730 hdev->sent_cmd = NULL;
731 }
732
733 hdev->close(hdev);
734 hdev->flags = 0;
735 }
736
737done:
738 hci_req_unlock(hdev);
739 hci_dev_put(hdev);
740 return ret;
741}
742
743static int hci_dev_do_close(struct hci_dev *hdev)
744{
745 BT_DBG("%s %p", hdev->name, hdev);
746
Andre Guedes28b75a82012-02-03 17:48:00 -0300747 cancel_work_sync(&hdev->le_scan);
748
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 hci_req_cancel(hdev, ENODEV);
750 hci_req_lock(hdev);
751
752 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300753 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 hci_req_unlock(hdev);
755 return 0;
756 }
757
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200758 /* Flush RX and TX works */
759 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400760 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200762 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200763 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200764 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200765 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200766 }
767
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200768 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200769 cancel_delayed_work(&hdev->service_cache);
770
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300771 cancel_delayed_work_sync(&hdev->le_scan_disable);
772
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300773 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774 inquiry_cache_flush(hdev);
775 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300776 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777
778 hci_notify(hdev, HCI_DEV_DOWN);
779
780 if (hdev->flush)
781 hdev->flush(hdev);
782
783 /* Reset device */
784 skb_queue_purge(&hdev->cmd_q);
785 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200786 if (!test_bit(HCI_RAW, &hdev->flags) &&
787 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200789 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200790 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 clear_bit(HCI_INIT, &hdev->flags);
792 }
793
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200794 /* flush cmd work */
795 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796
797 /* Drop queues */
798 skb_queue_purge(&hdev->rx_q);
799 skb_queue_purge(&hdev->cmd_q);
800 skb_queue_purge(&hdev->raw_q);
801
802 /* Drop last sent command */
803 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300804 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805 kfree_skb(hdev->sent_cmd);
806 hdev->sent_cmd = NULL;
807 }
808
809 /* After this point our queues are empty
810 * and no tasks are scheduled. */
811 hdev->close(hdev);
812
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100813 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
814 hci_dev_lock(hdev);
815 mgmt_powered(hdev, 0);
816 hci_dev_unlock(hdev);
817 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 /* Clear flags */
820 hdev->flags = 0;
821
Johan Hedberge59fda82012-02-22 18:11:53 +0200822 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200823 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200824
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 hci_req_unlock(hdev);
826
827 hci_dev_put(hdev);
828 return 0;
829}
830
831int hci_dev_close(__u16 dev)
832{
833 struct hci_dev *hdev;
834 int err;
835
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200836 hdev = hci_dev_get(dev);
837 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100839
840 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
841 cancel_delayed_work(&hdev->power_off);
842
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100844
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 hci_dev_put(hdev);
846 return err;
847}
848
849int hci_dev_reset(__u16 dev)
850{
851 struct hci_dev *hdev;
852 int ret = 0;
853
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200854 hdev = hci_dev_get(dev);
855 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 return -ENODEV;
857
858 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859
860 if (!test_bit(HCI_UP, &hdev->flags))
861 goto done;
862
863 /* Drop queues */
864 skb_queue_purge(&hdev->rx_q);
865 skb_queue_purge(&hdev->cmd_q);
866
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300867 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868 inquiry_cache_flush(hdev);
869 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300870 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 if (hdev->flush)
873 hdev->flush(hdev);
874
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900875 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300876 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200879 ret = __hci_request(hdev, hci_reset_req, 0,
880 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 hci_req_unlock(hdev);
884 hci_dev_put(hdev);
885 return ret;
886}
887
888int hci_dev_reset_stat(__u16 dev)
889{
890 struct hci_dev *hdev;
891 int ret = 0;
892
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200893 hdev = hci_dev_get(dev);
894 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700895 return -ENODEV;
896
897 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
898
899 hci_dev_put(hdev);
900
901 return ret;
902}
903
904int hci_dev_cmd(unsigned int cmd, void __user *arg)
905{
906 struct hci_dev *hdev;
907 struct hci_dev_req dr;
908 int err = 0;
909
910 if (copy_from_user(&dr, arg, sizeof(dr)))
911 return -EFAULT;
912
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200913 hdev = hci_dev_get(dr.dev_id);
914 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700915 return -ENODEV;
916
917 switch (cmd) {
918 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200919 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
920 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 break;
922
923 case HCISETENCRYPT:
924 if (!lmp_encrypt_capable(hdev)) {
925 err = -EOPNOTSUPP;
926 break;
927 }
928
929 if (!test_bit(HCI_AUTH, &hdev->flags)) {
930 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200931 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
932 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933 if (err)
934 break;
935 }
936
Marcel Holtmann04837f62006-07-03 10:02:33 +0200937 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
938 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 break;
940
941 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200942 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200946 case HCISETLINKPOL:
947 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
949 break;
950
951 case HCISETLINKMODE:
952 hdev->link_mode = ((__u16) dr.dev_opt) &
953 (HCI_LM_MASTER | HCI_LM_ACCEPT);
954 break;
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 case HCISETPTYPE:
957 hdev->pkt_type = (__u16) dr.dev_opt;
958 break;
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200961 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
962 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 break;
964
965 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 break;
969
970 default:
971 err = -EINVAL;
972 break;
973 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200974
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 hci_dev_put(hdev);
976 return err;
977}
978
979int hci_get_dev_list(void __user *arg)
980{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200981 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 struct hci_dev_list_req *dl;
983 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 int n = 0, size, err;
985 __u16 dev_num;
986
987 if (get_user(dev_num, (__u16 __user *) arg))
988 return -EFAULT;
989
990 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
991 return -EINVAL;
992
993 size = sizeof(*dl) + dev_num * sizeof(*dr);
994
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200995 dl = kzalloc(size, GFP_KERNEL);
996 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 return -ENOMEM;
998
999 dr = dl->dev_req;
1000
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001001 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001002 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001003 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001004 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001005
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001006 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1007 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001008
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 (dr + n)->dev_id = hdev->id;
1010 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 if (++n >= dev_num)
1013 break;
1014 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001015 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016
1017 dl->dev_num = n;
1018 size = sizeof(*dl) + n * sizeof(*dr);
1019
1020 err = copy_to_user(arg, dl, size);
1021 kfree(dl);
1022
1023 return err ? -EFAULT : 0;
1024}
1025
1026int hci_get_dev_info(void __user *arg)
1027{
1028 struct hci_dev *hdev;
1029 struct hci_dev_info di;
1030 int err = 0;
1031
1032 if (copy_from_user(&di, arg, sizeof(di)))
1033 return -EFAULT;
1034
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001035 hdev = hci_dev_get(di.dev_id);
1036 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 return -ENODEV;
1038
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001039 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001040 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001041
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001042 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1043 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001044
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 strcpy(di.name, hdev->name);
1046 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001047 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 di.flags = hdev->flags;
1049 di.pkt_type = hdev->pkt_type;
1050 di.acl_mtu = hdev->acl_mtu;
1051 di.acl_pkts = hdev->acl_pkts;
1052 di.sco_mtu = hdev->sco_mtu;
1053 di.sco_pkts = hdev->sco_pkts;
1054 di.link_policy = hdev->link_policy;
1055 di.link_mode = hdev->link_mode;
1056
1057 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1058 memcpy(&di.features, &hdev->features, sizeof(di.features));
1059
1060 if (copy_to_user(arg, &di, sizeof(di)))
1061 err = -EFAULT;
1062
1063 hci_dev_put(hdev);
1064
1065 return err;
1066}
1067
1068/* ---- Interface to HCI drivers ---- */
1069
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001070static int hci_rfkill_set_block(void *data, bool blocked)
1071{
1072 struct hci_dev *hdev = data;
1073
1074 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1075
1076 if (!blocked)
1077 return 0;
1078
1079 hci_dev_do_close(hdev);
1080
1081 return 0;
1082}
1083
1084static const struct rfkill_ops hci_rfkill_ops = {
1085 .set_block = hci_rfkill_set_block,
1086};
1087
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088/* Alloc HCI device */
1089struct hci_dev *hci_alloc_dev(void)
1090{
1091 struct hci_dev *hdev;
1092
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001093 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 if (!hdev)
1095 return NULL;
1096
David Herrmann0ac7e702011-10-08 14:58:47 +02001097 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 skb_queue_head_init(&hdev->driver_init);
1099
1100 return hdev;
1101}
1102EXPORT_SYMBOL(hci_alloc_dev);
1103
1104/* Free HCI device */
1105void hci_free_dev(struct hci_dev *hdev)
1106{
1107 skb_queue_purge(&hdev->driver_init);
1108
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001109 /* will free via device release */
1110 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111}
1112EXPORT_SYMBOL(hci_free_dev);
1113
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001114static void hci_power_on(struct work_struct *work)
1115{
1116 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1117
1118 BT_DBG("%s", hdev->name);
1119
1120 if (hci_dev_open(hdev->id) < 0)
1121 return;
1122
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001123 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001124 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001125 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001126
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001127 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001128 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001129}
1130
1131static void hci_power_off(struct work_struct *work)
1132{
Johan Hedberg32435532011-11-07 22:16:04 +02001133 struct hci_dev *hdev = container_of(work, struct hci_dev,
1134 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001135
1136 BT_DBG("%s", hdev->name);
1137
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001138 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001139}
1140
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001141static void hci_discov_off(struct work_struct *work)
1142{
1143 struct hci_dev *hdev;
1144 u8 scan = SCAN_PAGE;
1145
1146 hdev = container_of(work, struct hci_dev, discov_off.work);
1147
1148 BT_DBG("%s", hdev->name);
1149
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001150 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001151
1152 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1153
1154 hdev->discov_timeout = 0;
1155
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001156 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001157}
1158
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001159int hci_uuids_clear(struct hci_dev *hdev)
1160{
1161 struct list_head *p, *n;
1162
1163 list_for_each_safe(p, n, &hdev->uuids) {
1164 struct bt_uuid *uuid;
1165
1166 uuid = list_entry(p, struct bt_uuid, list);
1167
1168 list_del(p);
1169 kfree(uuid);
1170 }
1171
1172 return 0;
1173}
1174
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001175int hci_link_keys_clear(struct hci_dev *hdev)
1176{
1177 struct list_head *p, *n;
1178
1179 list_for_each_safe(p, n, &hdev->link_keys) {
1180 struct link_key *key;
1181
1182 key = list_entry(p, struct link_key, list);
1183
1184 list_del(p);
1185 kfree(key);
1186 }
1187
1188 return 0;
1189}
1190
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001191int hci_smp_ltks_clear(struct hci_dev *hdev)
1192{
1193 struct smp_ltk *k, *tmp;
1194
1195 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1196 list_del(&k->list);
1197 kfree(k);
1198 }
1199
1200 return 0;
1201}
1202
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001203struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1204{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001205 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001206
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001207 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001208 if (bacmp(bdaddr, &k->bdaddr) == 0)
1209 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001210
1211 return NULL;
1212}
1213
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1215 u8 key_type, u8 old_key_type)
1216{
1217 /* Legacy key */
1218 if (key_type < 0x03)
1219 return 1;
1220
1221 /* Debug keys are insecure so don't store them persistently */
1222 if (key_type == HCI_LK_DEBUG_COMBINATION)
1223 return 0;
1224
1225 /* Changed combination key and there's no previous one */
1226 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1227 return 0;
1228
1229 /* Security mode 3 case */
1230 if (!conn)
1231 return 1;
1232
1233 /* Neither local nor remote side had no-bonding as requirement */
1234 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1235 return 1;
1236
1237 /* Local side had dedicated bonding as requirement */
1238 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1239 return 1;
1240
1241 /* Remote side had dedicated bonding as requirement */
1242 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1243 return 1;
1244
1245 /* If none of the above criteria match, then don't store the key
1246 * persistently */
1247 return 0;
1248}
1249
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001250struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001253
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254 list_for_each_entry(k, &hdev->long_term_keys, list) {
1255 if (k->ediv != ediv ||
1256 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257 continue;
1258
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260 }
1261
1262 return NULL;
1263}
1264EXPORT_SYMBOL(hci_find_ltk);
1265
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001266struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001267 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001268{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001269 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001270
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001271 list_for_each_entry(k, &hdev->long_term_keys, list)
1272 if (addr_type == k->bdaddr_type &&
1273 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001274 return k;
1275
1276 return NULL;
1277}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001278EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001280int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001281 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001282{
1283 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001284 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001285
1286 old_key = hci_find_link_key(hdev, bdaddr);
1287 if (old_key) {
1288 old_key_type = old_key->type;
1289 key = old_key;
1290 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001291 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001292 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1293 if (!key)
1294 return -ENOMEM;
1295 list_add(&key->list, &hdev->link_keys);
1296 }
1297
1298 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1299
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001300 /* Some buggy controller combinations generate a changed
1301 * combination key for legacy pairing even when there's no
1302 * previous key */
1303 if (type == HCI_LK_CHANGED_COMBINATION &&
1304 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001305 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001306 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001307 if (conn)
1308 conn->key_type = type;
1309 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001310
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001311 bacpy(&key->bdaddr, bdaddr);
1312 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001313 key->pin_len = pin_len;
1314
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001315 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001316 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001317 else
1318 key->type = type;
1319
Johan Hedberg4df378a2011-04-28 11:29:03 -07001320 if (!new_key)
1321 return 0;
1322
1323 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1324
Johan Hedberg744cf192011-11-08 20:40:14 +02001325 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001326
1327 if (!persistent) {
1328 list_del(&key->list);
1329 kfree(key);
1330 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001331
1332 return 0;
1333}
1334
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001335int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001336 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1337 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001338{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001339 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001340
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001341 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1342 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001344 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1345 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001347 else {
1348 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349 if (!key)
1350 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001351 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 }
1353
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001354 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001355 key->bdaddr_type = addr_type;
1356 memcpy(key->val, tk, sizeof(key->val));
1357 key->authenticated = authenticated;
1358 key->ediv = ediv;
1359 key->enc_size = enc_size;
1360 key->type = type;
1361 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001362
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001363 if (!new_key)
1364 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001365
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001366 if (type & HCI_SMP_LTK)
1367 mgmt_new_ltk(hdev, key, 1);
1368
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001369 return 0;
1370}
1371
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001372int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1373{
1374 struct link_key *key;
1375
1376 key = hci_find_link_key(hdev, bdaddr);
1377 if (!key)
1378 return -ENOENT;
1379
1380 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1381
1382 list_del(&key->list);
1383 kfree(key);
1384
1385 return 0;
1386}
1387
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001388int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1389{
1390 struct smp_ltk *k, *tmp;
1391
1392 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1393 if (bacmp(bdaddr, &k->bdaddr))
1394 continue;
1395
1396 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1397
1398 list_del(&k->list);
1399 kfree(k);
1400 }
1401
1402 return 0;
1403}
1404
Ville Tervo6bd32322011-02-16 16:32:41 +02001405/* HCI command timer function */
1406static void hci_cmd_timer(unsigned long arg)
1407{
1408 struct hci_dev *hdev = (void *) arg;
1409
1410 BT_ERR("%s command tx timeout", hdev->name);
1411 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001412 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001413}
1414
Szymon Janc2763eda2011-03-22 13:12:22 +01001415struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001416 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001417{
1418 struct oob_data *data;
1419
1420 list_for_each_entry(data, &hdev->remote_oob_data, list)
1421 if (bacmp(bdaddr, &data->bdaddr) == 0)
1422 return data;
1423
1424 return NULL;
1425}
1426
1427int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1428{
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432 if (!data)
1433 return -ENOENT;
1434
1435 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1436
1437 list_del(&data->list);
1438 kfree(data);
1439
1440 return 0;
1441}
1442
1443int hci_remote_oob_data_clear(struct hci_dev *hdev)
1444{
1445 struct oob_data *data, *n;
1446
1447 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1448 list_del(&data->list);
1449 kfree(data);
1450 }
1451
1452 return 0;
1453}
1454
1455int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001456 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001457{
1458 struct oob_data *data;
1459
1460 data = hci_find_remote_oob_data(hdev, bdaddr);
1461
1462 if (!data) {
1463 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1464 if (!data)
1465 return -ENOMEM;
1466
1467 bacpy(&data->bdaddr, bdaddr);
1468 list_add(&data->list, &hdev->remote_oob_data);
1469 }
1470
1471 memcpy(data->hash, hash, sizeof(data->hash));
1472 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1473
1474 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1475
1476 return 0;
1477}
1478
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001479struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001481 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001482
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001483 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001484 if (bacmp(bdaddr, &b->bdaddr) == 0)
1485 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
1487 return NULL;
1488}
1489
1490int hci_blacklist_clear(struct hci_dev *hdev)
1491{
1492 struct list_head *p, *n;
1493
1494 list_for_each_safe(p, n, &hdev->blacklist) {
1495 struct bdaddr_list *b;
1496
1497 b = list_entry(p, struct bdaddr_list, list);
1498
1499 list_del(p);
1500 kfree(b);
1501 }
1502
1503 return 0;
1504}
1505
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001506int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001507{
1508 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509
1510 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1511 return -EBADF;
1512
Antti Julku5e762442011-08-25 16:48:02 +03001513 if (hci_blacklist_lookup(hdev, bdaddr))
1514 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515
1516 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001517 if (!entry)
1518 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001519
1520 bacpy(&entry->bdaddr, bdaddr);
1521
1522 list_add(&entry->list, &hdev->blacklist);
1523
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001524 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001525}
1526
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001527int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001528{
1529 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001530
Szymon Janc1ec918c2011-11-16 09:32:21 +01001531 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001532 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001533
1534 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001535 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001536 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001537
1538 list_del(&entry->list);
1539 kfree(entry);
1540
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001541 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001542}
1543
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001544static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001545{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001546 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001547 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001548
1549 hci_dev_lock(hdev);
1550
1551 hci_adv_entries_clear(hdev);
1552
1553 hci_dev_unlock(hdev);
1554}
1555
Andre Guedes76c86862011-05-26 16:23:50 -03001556int hci_adv_entries_clear(struct hci_dev *hdev)
1557{
1558 struct adv_entry *entry, *tmp;
1559
1560 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1561 list_del(&entry->list);
1562 kfree(entry);
1563 }
1564
1565 BT_DBG("%s adv cache cleared", hdev->name);
1566
1567 return 0;
1568}
1569
1570struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1571{
1572 struct adv_entry *entry;
1573
1574 list_for_each_entry(entry, &hdev->adv_entries, list)
1575 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1576 return entry;
1577
1578 return NULL;
1579}
1580
1581static inline int is_connectable_adv(u8 evt_type)
1582{
1583 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1584 return 1;
1585
1586 return 0;
1587}
1588
1589int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001590 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001591 return -EINVAL;
1592
1593 /* Only new entries should be added to adv_entries. So, if
1594 * bdaddr was found, don't add it. */
1595 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1596 return 0;
1597
Andre Guedes4777bfd2012-01-30 23:31:28 -03001598 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001599 if (!entry)
1600 return -ENOMEM;
1601
1602 bacpy(&entry->bdaddr, &ev->bdaddr);
1603 entry->bdaddr_type = ev->bdaddr_type;
1604
1605 list_add(&entry->list, &hdev->adv_entries);
1606
1607 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1608 batostr(&entry->bdaddr), entry->bdaddr_type);
1609
1610 return 0;
1611}
1612
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001613static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1614{
1615 struct le_scan_params *param = (struct le_scan_params *) opt;
1616 struct hci_cp_le_set_scan_param cp;
1617
1618 memset(&cp, 0, sizeof(cp));
1619 cp.type = param->type;
1620 cp.interval = cpu_to_le16(param->interval);
1621 cp.window = cpu_to_le16(param->window);
1622
1623 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1624}
1625
1626static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1627{
1628 struct hci_cp_le_set_scan_enable cp;
1629
1630 memset(&cp, 0, sizeof(cp));
1631 cp.enable = 1;
1632
1633 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1634}
1635
1636static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001637 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001638{
1639 long timeo = msecs_to_jiffies(3000);
1640 struct le_scan_params param;
1641 int err;
1642
1643 BT_DBG("%s", hdev->name);
1644
1645 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1646 return -EINPROGRESS;
1647
1648 param.type = type;
1649 param.interval = interval;
1650 param.window = window;
1651
1652 hci_req_lock(hdev);
1653
1654 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001655 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001656 if (!err)
1657 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1658
1659 hci_req_unlock(hdev);
1660
1661 if (err < 0)
1662 return err;
1663
1664 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001665 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001666
1667 return 0;
1668}
1669
1670static void le_scan_disable_work(struct work_struct *work)
1671{
1672 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001673 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001674 struct hci_cp_le_set_scan_enable cp;
1675
1676 BT_DBG("%s", hdev->name);
1677
1678 memset(&cp, 0, sizeof(cp));
1679
1680 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1681}
1682
Andre Guedes28b75a82012-02-03 17:48:00 -03001683static void le_scan_work(struct work_struct *work)
1684{
1685 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1686 struct le_scan_params *param = &hdev->le_scan_params;
1687
1688 BT_DBG("%s", hdev->name);
1689
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001690 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1691 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001692}
1693
1694int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001696{
1697 struct le_scan_params *param = &hdev->le_scan_params;
1698
1699 BT_DBG("%s", hdev->name);
1700
1701 if (work_busy(&hdev->le_scan))
1702 return -EINPROGRESS;
1703
1704 param->type = type;
1705 param->interval = interval;
1706 param->window = window;
1707 param->timeout = timeout;
1708
1709 queue_work(system_long_wq, &hdev->le_scan);
1710
1711 return 0;
1712}
1713
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714/* Register HCI device */
1715int hci_register_dev(struct hci_dev *hdev)
1716{
1717 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001718 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001720 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
David Herrmann010666a2012-01-07 15:47:07 +01001722 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723 return -EINVAL;
1724
Mat Martineau08add512011-11-02 16:18:36 -07001725 /* Do not allow HCI_AMP devices to register at index 0,
1726 * so the index can be used as the AMP controller ID.
1727 */
1728 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1729
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001730 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001731
1732 /* Find first available device id */
1733 list_for_each(p, &hci_dev_list) {
1734 if (list_entry(p, struct hci_dev, list)->id != id)
1735 break;
1736 head = p; id++;
1737 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001738
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739 sprintf(hdev->name, "hci%d", id);
1740 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001741 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001743 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
1745 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001746 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001748 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001750 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751
Marcel Holtmann04837f62006-07-03 10:02:33 +02001752 hdev->idle_timeout = 0;
1753 hdev->sniff_max_interval = 800;
1754 hdev->sniff_min_interval = 80;
1755
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001756 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001757 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001758 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001759
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 skb_queue_head_init(&hdev->rx_q);
1762 skb_queue_head_init(&hdev->cmd_q);
1763 skb_queue_head_init(&hdev->raw_q);
1764
Ville Tervo6bd32322011-02-16 16:32:41 +02001765 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1766
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301767 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001768 hdev->reassembly[i] = NULL;
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001771 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Johan Hedberg30883512012-01-04 14:16:21 +02001773 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774
1775 hci_conn_hash_init(hdev);
1776
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001777 INIT_LIST_HEAD(&hdev->mgmt_pending);
1778
David Millerea4bd8b2010-07-30 21:54:49 -07001779 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001780
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001781 INIT_LIST_HEAD(&hdev->uuids);
1782
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001783 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001784 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001785
Szymon Janc2763eda2011-03-22 13:12:22 +01001786 INIT_LIST_HEAD(&hdev->remote_oob_data);
1787
Andre Guedes76c86862011-05-26 16:23:50 -03001788 INIT_LIST_HEAD(&hdev->adv_entries);
1789
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001790 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001791 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001792 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001793
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001794 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1795
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1797
1798 atomic_set(&hdev->promisc, 0);
1799
Andre Guedes28b75a82012-02-03 17:48:00 -03001800 INIT_WORK(&hdev->le_scan, le_scan_work);
1801
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001802 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1803
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001804 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001806 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1807 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001808 if (!hdev->workqueue) {
1809 error = -ENOMEM;
1810 goto err;
1811 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001812
David Herrmann33ca9542011-10-08 14:58:49 +02001813 error = hci_add_sysfs(hdev);
1814 if (error < 0)
1815 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001817 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1818 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1819 if (hdev->rfkill) {
1820 if (rfkill_register(hdev->rfkill) < 0) {
1821 rfkill_destroy(hdev->rfkill);
1822 hdev->rfkill = NULL;
1823 }
1824 }
1825
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001826 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1827 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001828 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001829
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001831 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832
1833 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001834
David Herrmann33ca9542011-10-08 14:58:49 +02001835err_wqueue:
1836 destroy_workqueue(hdev->workqueue);
1837err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001838 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001839 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001840 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001841
David Herrmann33ca9542011-10-08 14:58:49 +02001842 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001843}
1844EXPORT_SYMBOL(hci_register_dev);
1845
1846/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001847void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848{
Marcel Holtmannef222012007-07-11 06:42:04 +02001849 int i;
1850
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001851 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001852
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001853 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001855 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 hci_dev_do_close(hdev);
1858
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301859 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001860 kfree_skb(hdev->reassembly[i]);
1861
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001862 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001863 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001864 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001865 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001866 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001867 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001868
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001869 /* mgmt_index_removed should take care of emptying the
1870 * pending list */
1871 BUG_ON(!list_empty(&hdev->mgmt_pending));
1872
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873 hci_notify(hdev, HCI_DEV_UNREG);
1874
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001875 if (hdev->rfkill) {
1876 rfkill_unregister(hdev->rfkill);
1877 rfkill_destroy(hdev->rfkill);
1878 }
1879
David Herrmannce242972011-10-08 14:58:48 +02001880 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001881
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001882 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001883
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001884 destroy_workqueue(hdev->workqueue);
1885
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001886 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001887 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001888 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001889 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001890 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001891 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001892 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001893 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001894
David Herrmanndc946bd2012-01-07 15:47:24 +01001895 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001896}
1897EXPORT_SYMBOL(hci_unregister_dev);
1898
1899/* Suspend HCI device */
1900int hci_suspend_dev(struct hci_dev *hdev)
1901{
1902 hci_notify(hdev, HCI_DEV_SUSPEND);
1903 return 0;
1904}
1905EXPORT_SYMBOL(hci_suspend_dev);
1906
1907/* Resume HCI device */
1908int hci_resume_dev(struct hci_dev *hdev)
1909{
1910 hci_notify(hdev, HCI_DEV_RESUME);
1911 return 0;
1912}
1913EXPORT_SYMBOL(hci_resume_dev);
1914
Marcel Holtmann76bca882009-11-18 00:40:39 +01001915/* Receive frame from HCI drivers */
1916int hci_recv_frame(struct sk_buff *skb)
1917{
1918 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1919 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1920 && !test_bit(HCI_INIT, &hdev->flags))) {
1921 kfree_skb(skb);
1922 return -ENXIO;
1923 }
1924
1925 /* Incomming skb */
1926 bt_cb(skb)->incoming = 1;
1927
1928 /* Time stamp */
1929 __net_timestamp(skb);
1930
Marcel Holtmann76bca882009-11-18 00:40:39 +01001931 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001932 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001933
Marcel Holtmann76bca882009-11-18 00:40:39 +01001934 return 0;
1935}
1936EXPORT_SYMBOL(hci_recv_frame);
1937
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301938static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001939 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301940{
1941 int len = 0;
1942 int hlen = 0;
1943 int remain = count;
1944 struct sk_buff *skb;
1945 struct bt_skb_cb *scb;
1946
1947 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1948 index >= NUM_REASSEMBLY)
1949 return -EILSEQ;
1950
1951 skb = hdev->reassembly[index];
1952
1953 if (!skb) {
1954 switch (type) {
1955 case HCI_ACLDATA_PKT:
1956 len = HCI_MAX_FRAME_SIZE;
1957 hlen = HCI_ACL_HDR_SIZE;
1958 break;
1959 case HCI_EVENT_PKT:
1960 len = HCI_MAX_EVENT_SIZE;
1961 hlen = HCI_EVENT_HDR_SIZE;
1962 break;
1963 case HCI_SCODATA_PKT:
1964 len = HCI_MAX_SCO_SIZE;
1965 hlen = HCI_SCO_HDR_SIZE;
1966 break;
1967 }
1968
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001969 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301970 if (!skb)
1971 return -ENOMEM;
1972
1973 scb = (void *) skb->cb;
1974 scb->expect = hlen;
1975 scb->pkt_type = type;
1976
1977 skb->dev = (void *) hdev;
1978 hdev->reassembly[index] = skb;
1979 }
1980
1981 while (count) {
1982 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001983 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301984
1985 memcpy(skb_put(skb, len), data, len);
1986
1987 count -= len;
1988 data += len;
1989 scb->expect -= len;
1990 remain = count;
1991
1992 switch (type) {
1993 case HCI_EVENT_PKT:
1994 if (skb->len == HCI_EVENT_HDR_SIZE) {
1995 struct hci_event_hdr *h = hci_event_hdr(skb);
1996 scb->expect = h->plen;
1997
1998 if (skb_tailroom(skb) < scb->expect) {
1999 kfree_skb(skb);
2000 hdev->reassembly[index] = NULL;
2001 return -ENOMEM;
2002 }
2003 }
2004 break;
2005
2006 case HCI_ACLDATA_PKT:
2007 if (skb->len == HCI_ACL_HDR_SIZE) {
2008 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2009 scb->expect = __le16_to_cpu(h->dlen);
2010
2011 if (skb_tailroom(skb) < scb->expect) {
2012 kfree_skb(skb);
2013 hdev->reassembly[index] = NULL;
2014 return -ENOMEM;
2015 }
2016 }
2017 break;
2018
2019 case HCI_SCODATA_PKT:
2020 if (skb->len == HCI_SCO_HDR_SIZE) {
2021 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2022 scb->expect = h->dlen;
2023
2024 if (skb_tailroom(skb) < scb->expect) {
2025 kfree_skb(skb);
2026 hdev->reassembly[index] = NULL;
2027 return -ENOMEM;
2028 }
2029 }
2030 break;
2031 }
2032
2033 if (scb->expect == 0) {
2034 /* Complete frame */
2035
2036 bt_cb(skb)->pkt_type = type;
2037 hci_recv_frame(skb);
2038
2039 hdev->reassembly[index] = NULL;
2040 return remain;
2041 }
2042 }
2043
2044 return remain;
2045}
2046
Marcel Holtmannef222012007-07-11 06:42:04 +02002047int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2048{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302049 int rem = 0;
2050
Marcel Holtmannef222012007-07-11 06:42:04 +02002051 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2052 return -EILSEQ;
2053
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002054 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002055 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302056 if (rem < 0)
2057 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002058
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302059 data += (count - rem);
2060 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002061 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002062
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302063 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002064}
2065EXPORT_SYMBOL(hci_recv_fragment);
2066
Suraj Sumangala99811512010-07-14 13:02:19 +05302067#define STREAM_REASSEMBLY 0
2068
2069int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2070{
2071 int type;
2072 int rem = 0;
2073
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002074 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302075 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2076
2077 if (!skb) {
2078 struct { char type; } *pkt;
2079
2080 /* Start of the frame */
2081 pkt = data;
2082 type = pkt->type;
2083
2084 data++;
2085 count--;
2086 } else
2087 type = bt_cb(skb)->pkt_type;
2088
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002089 rem = hci_reassembly(hdev, type, data, count,
2090 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302091 if (rem < 0)
2092 return rem;
2093
2094 data += (count - rem);
2095 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002096 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302097
2098 return rem;
2099}
2100EXPORT_SYMBOL(hci_recv_stream_fragment);
2101
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102/* ---- Interface to upper protocols ---- */
2103
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104int hci_register_cb(struct hci_cb *cb)
2105{
2106 BT_DBG("%p name %s", cb, cb->name);
2107
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002108 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002110 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111
2112 return 0;
2113}
2114EXPORT_SYMBOL(hci_register_cb);
2115
2116int hci_unregister_cb(struct hci_cb *cb)
2117{
2118 BT_DBG("%p name %s", cb, cb->name);
2119
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002120 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002122 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124 return 0;
2125}
2126EXPORT_SYMBOL(hci_unregister_cb);
2127
2128static int hci_send_frame(struct sk_buff *skb)
2129{
2130 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2131
2132 if (!hdev) {
2133 kfree_skb(skb);
2134 return -ENODEV;
2135 }
2136
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002137 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002139 /* Time stamp */
2140 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002142 /* Send copy to monitor */
2143 hci_send_to_monitor(hdev, skb);
2144
2145 if (atomic_read(&hdev->promisc)) {
2146 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002147 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148 }
2149
2150 /* Get rid of skb owner, prior to sending to the driver. */
2151 skb_orphan(skb);
2152
2153 return hdev->send(skb);
2154}
2155
2156/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002157int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158{
2159 int len = HCI_COMMAND_HDR_SIZE + plen;
2160 struct hci_command_hdr *hdr;
2161 struct sk_buff *skb;
2162
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002163 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
2165 skb = bt_skb_alloc(len, GFP_ATOMIC);
2166 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002167 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 return -ENOMEM;
2169 }
2170
2171 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002172 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hdr->plen = plen;
2174
2175 if (plen)
2176 memcpy(skb_put(skb, plen), param, plen);
2177
2178 BT_DBG("skb len %d", skb->len);
2179
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002180 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002182
Johan Hedberga5040ef2011-01-10 13:28:59 +02002183 if (test_bit(HCI_INIT, &hdev->flags))
2184 hdev->init_last_cmd = opcode;
2185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002187 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
2189 return 0;
2190}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191
2192/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002193void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194{
2195 struct hci_command_hdr *hdr;
2196
2197 if (!hdev->sent_cmd)
2198 return NULL;
2199
2200 hdr = (void *) hdev->sent_cmd->data;
2201
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002202 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002203 return NULL;
2204
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002205 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
2207 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2208}
2209
2210/* Send ACL data */
2211static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2212{
2213 struct hci_acl_hdr *hdr;
2214 int len = skb->len;
2215
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002216 skb_push(skb, HCI_ACL_HDR_SIZE);
2217 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002218 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002219 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2220 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221}
2222
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002223static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2224 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
2226 struct hci_dev *hdev = conn->hdev;
2227 struct sk_buff *list;
2228
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002229 list = skb_shinfo(skb)->frag_list;
2230 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 /* Non fragmented */
2232 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2233
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002234 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 } else {
2236 /* Fragmented */
2237 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2238
2239 skb_shinfo(skb)->frag_list = NULL;
2240
2241 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002242 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002244 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002245
2246 flags &= ~ACL_START;
2247 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 do {
2249 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002250
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002252 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002253 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
2255 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2256
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002257 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 } while (list);
2259
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002260 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002262}
2263
2264void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2265{
2266 struct hci_conn *conn = chan->conn;
2267 struct hci_dev *hdev = conn->hdev;
2268
2269 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2270
2271 skb->dev = (void *) hdev;
2272 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2273 hci_add_acl_hdr(skb, conn->handle, flags);
2274
2275 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002277 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002278}
2279EXPORT_SYMBOL(hci_send_acl);
2280
2281/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002282void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283{
2284 struct hci_dev *hdev = conn->hdev;
2285 struct hci_sco_hdr hdr;
2286
2287 BT_DBG("%s len %d", hdev->name, skb->len);
2288
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002289 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 hdr.dlen = skb->len;
2291
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002292 skb_push(skb, HCI_SCO_HDR_SIZE);
2293 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002294 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295
2296 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002297 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002298
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002300 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
2302EXPORT_SYMBOL(hci_send_sco);
2303
2304/* ---- HCI TX task (outgoing data) ---- */
2305
2306/* HCI Connection scheduler */
2307static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2308{
2309 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002310 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002313 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002315
2316 rcu_read_lock();
2317
2318 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002319 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002321
2322 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2323 continue;
2324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 num++;
2326
2327 if (c->sent < min) {
2328 min = c->sent;
2329 conn = c;
2330 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002331
2332 if (hci_conn_num(hdev, type) == num)
2333 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334 }
2335
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002336 rcu_read_unlock();
2337
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002339 int cnt, q;
2340
2341 switch (conn->type) {
2342 case ACL_LINK:
2343 cnt = hdev->acl_cnt;
2344 break;
2345 case SCO_LINK:
2346 case ESCO_LINK:
2347 cnt = hdev->sco_cnt;
2348 break;
2349 case LE_LINK:
2350 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2351 break;
2352 default:
2353 cnt = 0;
2354 BT_ERR("Unknown link type");
2355 }
2356
2357 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002358 *quote = q ? q : 1;
2359 } else
2360 *quote = 0;
2361
2362 BT_DBG("conn %p quote %d", conn, *quote);
2363 return conn;
2364}
2365
Ville Tervobae1f5d2011-02-10 22:38:53 -03002366static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002367{
2368 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002369 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002370
Ville Tervobae1f5d2011-02-10 22:38:53 -03002371 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002373 rcu_read_lock();
2374
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002376 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002377 if (c->type == type && c->sent) {
2378 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379 hdev->name, batostr(&c->dst));
2380 hci_acl_disconn(c, 0x13);
2381 }
2382 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002383
2384 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385}
2386
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002387static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2388 int *quote)
2389{
2390 struct hci_conn_hash *h = &hdev->conn_hash;
2391 struct hci_chan *chan = NULL;
2392 int num = 0, min = ~0, cur_prio = 0;
2393 struct hci_conn *conn;
2394 int cnt, q, conn_num = 0;
2395
2396 BT_DBG("%s", hdev->name);
2397
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002398 rcu_read_lock();
2399
2400 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002401 struct hci_chan *tmp;
2402
2403 if (conn->type != type)
2404 continue;
2405
2406 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2407 continue;
2408
2409 conn_num++;
2410
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002411 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002412 struct sk_buff *skb;
2413
2414 if (skb_queue_empty(&tmp->data_q))
2415 continue;
2416
2417 skb = skb_peek(&tmp->data_q);
2418 if (skb->priority < cur_prio)
2419 continue;
2420
2421 if (skb->priority > cur_prio) {
2422 num = 0;
2423 min = ~0;
2424 cur_prio = skb->priority;
2425 }
2426
2427 num++;
2428
2429 if (conn->sent < min) {
2430 min = conn->sent;
2431 chan = tmp;
2432 }
2433 }
2434
2435 if (hci_conn_num(hdev, type) == conn_num)
2436 break;
2437 }
2438
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002439 rcu_read_unlock();
2440
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002441 if (!chan)
2442 return NULL;
2443
2444 switch (chan->conn->type) {
2445 case ACL_LINK:
2446 cnt = hdev->acl_cnt;
2447 break;
2448 case SCO_LINK:
2449 case ESCO_LINK:
2450 cnt = hdev->sco_cnt;
2451 break;
2452 case LE_LINK:
2453 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2454 break;
2455 default:
2456 cnt = 0;
2457 BT_ERR("Unknown link type");
2458 }
2459
2460 q = cnt / num;
2461 *quote = q ? q : 1;
2462 BT_DBG("chan %p quote %d", chan, *quote);
2463 return chan;
2464}
2465
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002466static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2467{
2468 struct hci_conn_hash *h = &hdev->conn_hash;
2469 struct hci_conn *conn;
2470 int num = 0;
2471
2472 BT_DBG("%s", hdev->name);
2473
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002474 rcu_read_lock();
2475
2476 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002477 struct hci_chan *chan;
2478
2479 if (conn->type != type)
2480 continue;
2481
2482 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2483 continue;
2484
2485 num++;
2486
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002487 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002488 struct sk_buff *skb;
2489
2490 if (chan->sent) {
2491 chan->sent = 0;
2492 continue;
2493 }
2494
2495 if (skb_queue_empty(&chan->data_q))
2496 continue;
2497
2498 skb = skb_peek(&chan->data_q);
2499 if (skb->priority >= HCI_PRIO_MAX - 1)
2500 continue;
2501
2502 skb->priority = HCI_PRIO_MAX - 1;
2503
2504 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2505 skb->priority);
2506 }
2507
2508 if (hci_conn_num(hdev, type) == num)
2509 break;
2510 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002511
2512 rcu_read_unlock();
2513
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002514}
2515
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002516static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2517{
2518 /* Calculate count of blocks used by this packet */
2519 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2520}
2521
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002522static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002524 if (!test_bit(HCI_RAW, &hdev->flags)) {
2525 /* ACL tx timeout must be longer than maximum
2526 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002527 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002528 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002529 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002531}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002533static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2534{
2535 unsigned int cnt = hdev->acl_cnt;
2536 struct hci_chan *chan;
2537 struct sk_buff *skb;
2538 int quote;
2539
2540 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002541
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002542 while (hdev->acl_cnt &&
2543 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002544 u32 priority = (skb_peek(&chan->data_q))->priority;
2545 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002546 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2547 skb->len, skb->priority);
2548
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002549 /* Stop if priority has changed */
2550 if (skb->priority < priority)
2551 break;
2552
2553 skb = skb_dequeue(&chan->data_q);
2554
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002555 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002556 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002557
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558 hci_send_frame(skb);
2559 hdev->acl_last_tx = jiffies;
2560
2561 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002562 chan->sent++;
2563 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 }
2565 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002566
2567 if (cnt != hdev->acl_cnt)
2568 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569}
2570
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002571static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2572{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002573 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002574 struct hci_chan *chan;
2575 struct sk_buff *skb;
2576 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002577
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002578 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002579
2580 while (hdev->block_cnt > 0 &&
2581 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2582 u32 priority = (skb_peek(&chan->data_q))->priority;
2583 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2584 int blocks;
2585
2586 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2587 skb->len, skb->priority);
2588
2589 /* Stop if priority has changed */
2590 if (skb->priority < priority)
2591 break;
2592
2593 skb = skb_dequeue(&chan->data_q);
2594
2595 blocks = __get_blocks(hdev, skb);
2596 if (blocks > hdev->block_cnt)
2597 return;
2598
2599 hci_conn_enter_active_mode(chan->conn,
2600 bt_cb(skb)->force_active);
2601
2602 hci_send_frame(skb);
2603 hdev->acl_last_tx = jiffies;
2604
2605 hdev->block_cnt -= blocks;
2606 quote -= blocks;
2607
2608 chan->sent += blocks;
2609 chan->conn->sent += blocks;
2610 }
2611 }
2612
2613 if (cnt != hdev->block_cnt)
2614 hci_prio_recalculate(hdev, ACL_LINK);
2615}
2616
2617static inline void hci_sched_acl(struct hci_dev *hdev)
2618{
2619 BT_DBG("%s", hdev->name);
2620
2621 if (!hci_conn_num(hdev, ACL_LINK))
2622 return;
2623
2624 switch (hdev->flow_ctl_mode) {
2625 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2626 hci_sched_acl_pkt(hdev);
2627 break;
2628
2629 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2630 hci_sched_acl_blk(hdev);
2631 break;
2632 }
2633}
2634
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635/* Schedule SCO */
2636static inline void hci_sched_sco(struct hci_dev *hdev)
2637{
2638 struct hci_conn *conn;
2639 struct sk_buff *skb;
2640 int quote;
2641
2642 BT_DBG("%s", hdev->name);
2643
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002644 if (!hci_conn_num(hdev, SCO_LINK))
2645 return;
2646
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2648 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2649 BT_DBG("skb %p len %d", skb, skb->len);
2650 hci_send_frame(skb);
2651
2652 conn->sent++;
2653 if (conn->sent == ~0)
2654 conn->sent = 0;
2655 }
2656 }
2657}
2658
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002659static inline void hci_sched_esco(struct hci_dev *hdev)
2660{
2661 struct hci_conn *conn;
2662 struct sk_buff *skb;
2663 int quote;
2664
2665 BT_DBG("%s", hdev->name);
2666
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002667 if (!hci_conn_num(hdev, ESCO_LINK))
2668 return;
2669
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002670 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2671 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2672 BT_DBG("skb %p len %d", skb, skb->len);
2673 hci_send_frame(skb);
2674
2675 conn->sent++;
2676 if (conn->sent == ~0)
2677 conn->sent = 0;
2678 }
2679 }
2680}
2681
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002682static inline void hci_sched_le(struct hci_dev *hdev)
2683{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002684 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002685 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002686 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002687
2688 BT_DBG("%s", hdev->name);
2689
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002690 if (!hci_conn_num(hdev, LE_LINK))
2691 return;
2692
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002693 if (!test_bit(HCI_RAW, &hdev->flags)) {
2694 /* LE tx timeout must be longer than maximum
2695 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002696 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002697 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002698 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002699 }
2700
2701 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002702 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002703 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002704 u32 priority = (skb_peek(&chan->data_q))->priority;
2705 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002706 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2707 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002709 /* Stop if priority has changed */
2710 if (skb->priority < priority)
2711 break;
2712
2713 skb = skb_dequeue(&chan->data_q);
2714
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002715 hci_send_frame(skb);
2716 hdev->le_last_tx = jiffies;
2717
2718 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002719 chan->sent++;
2720 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002721 }
2722 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002723
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 if (hdev->le_pkts)
2725 hdev->le_cnt = cnt;
2726 else
2727 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002728
2729 if (cnt != tmp)
2730 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002731}
2732
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002733static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002735 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 struct sk_buff *skb;
2737
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002738 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2739 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740
2741 /* Schedule queues and send stuff to HCI driver */
2742
2743 hci_sched_acl(hdev);
2744
2745 hci_sched_sco(hdev);
2746
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002747 hci_sched_esco(hdev);
2748
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002749 hci_sched_le(hdev);
2750
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 /* Send next queued raw (unknown type) packet */
2752 while ((skb = skb_dequeue(&hdev->raw_q)))
2753 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002754}
2755
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002756/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757
2758/* ACL data packet */
2759static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2760{
2761 struct hci_acl_hdr *hdr = (void *) skb->data;
2762 struct hci_conn *conn;
2763 __u16 handle, flags;
2764
2765 skb_pull(skb, HCI_ACL_HDR_SIZE);
2766
2767 handle = __le16_to_cpu(hdr->handle);
2768 flags = hci_flags(handle);
2769 handle = hci_handle(handle);
2770
2771 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2772
2773 hdev->stat.acl_rx++;
2774
2775 hci_dev_lock(hdev);
2776 conn = hci_conn_hash_lookup_handle(hdev, handle);
2777 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002778
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002780 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002781
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002783 l2cap_recv_acldata(conn, skb, flags);
2784 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002786 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 hdev->name, handle);
2788 }
2789
2790 kfree_skb(skb);
2791}
2792
2793/* SCO data packet */
2794static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2795{
2796 struct hci_sco_hdr *hdr = (void *) skb->data;
2797 struct hci_conn *conn;
2798 __u16 handle;
2799
2800 skb_pull(skb, HCI_SCO_HDR_SIZE);
2801
2802 handle = __le16_to_cpu(hdr->handle);
2803
2804 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2805
2806 hdev->stat.sco_rx++;
2807
2808 hci_dev_lock(hdev);
2809 conn = hci_conn_hash_lookup_handle(hdev, handle);
2810 hci_dev_unlock(hdev);
2811
2812 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002814 sco_recv_scodata(conn, skb);
2815 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002817 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 hdev->name, handle);
2819 }
2820
2821 kfree_skb(skb);
2822}
2823
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002824static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002826 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 struct sk_buff *skb;
2828
2829 BT_DBG("%s", hdev->name);
2830
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002832 /* Send copy to monitor */
2833 hci_send_to_monitor(hdev, skb);
2834
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 if (atomic_read(&hdev->promisc)) {
2836 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002837 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 }
2839
2840 if (test_bit(HCI_RAW, &hdev->flags)) {
2841 kfree_skb(skb);
2842 continue;
2843 }
2844
2845 if (test_bit(HCI_INIT, &hdev->flags)) {
2846 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002847 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 case HCI_ACLDATA_PKT:
2849 case HCI_SCODATA_PKT:
2850 kfree_skb(skb);
2851 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002852 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 }
2854
2855 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002856 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002858 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 hci_event_packet(hdev, skb);
2860 break;
2861
2862 case HCI_ACLDATA_PKT:
2863 BT_DBG("%s ACL data packet", hdev->name);
2864 hci_acldata_packet(hdev, skb);
2865 break;
2866
2867 case HCI_SCODATA_PKT:
2868 BT_DBG("%s SCO data packet", hdev->name);
2869 hci_scodata_packet(hdev, skb);
2870 break;
2871
2872 default:
2873 kfree_skb(skb);
2874 break;
2875 }
2876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877}
2878
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002879static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002881 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882 struct sk_buff *skb;
2883
2884 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2885
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002887 if (atomic_read(&hdev->cmd_cnt)) {
2888 skb = skb_dequeue(&hdev->cmd_q);
2889 if (!skb)
2890 return;
2891
Wei Yongjun7585b972009-02-25 18:29:52 +08002892 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002894 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2895 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 atomic_dec(&hdev->cmd_cnt);
2897 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002898 if (test_bit(HCI_RESET, &hdev->flags))
2899 del_timer(&hdev->cmd_timer);
2900 else
2901 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002902 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 } else {
2904 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002905 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906 }
2907 }
2908}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002909
2910int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2911{
2912 /* General inquiry access code (GIAC) */
2913 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2914 struct hci_cp_inquiry cp;
2915
2916 BT_DBG("%s", hdev->name);
2917
2918 if (test_bit(HCI_INQUIRY, &hdev->flags))
2919 return -EINPROGRESS;
2920
Johan Hedberg46632622012-01-02 16:06:08 +02002921 inquiry_cache_flush(hdev);
2922
Andre Guedes2519a1f2011-11-07 11:45:24 -03002923 memset(&cp, 0, sizeof(cp));
2924 memcpy(&cp.lap, lap, sizeof(cp.lap));
2925 cp.length = length;
2926
2927 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2928}
Andre Guedes023d5042011-11-04 14:16:52 -03002929
2930int hci_cancel_inquiry(struct hci_dev *hdev)
2931{
2932 BT_DBG("%s", hdev->name);
2933
2934 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2935 return -EPERM;
2936
2937 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2938}