blob: d3ddc0ba9cd4fc3839ab0a489c8ceeb652fe1212 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020085 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
Andre Guedes6fbe1952012-02-03 17:47:58 -0300367 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300368 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200370 return true;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 default:
373 return false;
374 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375}
376
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200388 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 break;
390 case DISCOVERY_STARTING:
391 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300392 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 mgmt_discovering(hdev, 1);
394 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200395 case DISCOVERY_RESOLVING:
396 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200416 cache->state = DISCOVERY_STOPPED;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
418
419struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
420{
Johan Hedberg30883512012-01-04 14:16:21 +0200421 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 struct inquiry_entry *e;
423
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425
Johan Hedberg561aafb2012-01-04 13:31:59 +0200426 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200428 return e;
429 }
430
431 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432}
433
Johan Hedberg561aafb2012-01-04 13:31:59 +0200434struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
435 bdaddr_t *bdaddr)
436{
Johan Hedberg30883512012-01-04 14:16:21 +0200437 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200438 struct inquiry_entry *e;
439
440 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
441
442 list_for_each_entry(e, &cache->unknown, list) {
443 if (!bacmp(&e->data.bdaddr, bdaddr))
444 return e;
445 }
446
447 return NULL;
448}
449
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200450struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
451 bdaddr_t *bdaddr,
452 int state)
453{
454 struct discovery_state *cache = &hdev->discovery;
455 struct inquiry_entry *e;
456
457 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
458
459 list_for_each_entry(e, &cache->resolve, list) {
460 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
461 return e;
462 if (!bacmp(&e->data.bdaddr, bdaddr))
463 return e;
464 }
465
466 return NULL;
467}
468
Johan Hedberga3d4e202012-01-09 00:53:02 +0200469void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
470 struct inquiry_entry *ie)
471{
472 struct discovery_state *cache = &hdev->discovery;
473 struct list_head *pos = &cache->resolve;
474 struct inquiry_entry *p;
475
476 list_del(&ie->list);
477
478 list_for_each_entry(p, &cache->resolve, list) {
479 if (p->name_state != NAME_PENDING &&
480 abs(p->data.rssi) >= abs(ie->data.rssi))
481 break;
482 pos = &p->list;
483 }
484
485 list_add(&ie->list, pos);
486}
487
Johan Hedberg31754052012-01-04 13:39:52 +0200488bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200489 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490{
Johan Hedberg30883512012-01-04 14:16:21 +0200491 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200492 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493
494 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
495
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200496 if (ssp)
497 *ssp = data->ssp_mode;
498
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200499 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200500 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200501 if (ie->data.ssp_mode && ssp)
502 *ssp = true;
503
Johan Hedberga3d4e202012-01-09 00:53:02 +0200504 if (ie->name_state == NAME_NEEDED &&
505 data->rssi != ie->data.rssi) {
506 ie->data.rssi = data->rssi;
507 hci_inquiry_cache_update_resolve(hdev, ie);
508 }
509
Johan Hedberg561aafb2012-01-04 13:31:59 +0200510 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200511 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200512
Johan Hedberg561aafb2012-01-04 13:31:59 +0200513 /* Entry not in the cache. Add new one. */
514 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
515 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200516 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200517
518 list_add(&ie->all, &cache->all);
519
520 if (name_known) {
521 ie->name_state = NAME_KNOWN;
522 } else {
523 ie->name_state = NAME_NOT_KNOWN;
524 list_add(&ie->list, &cache->unknown);
525 }
526
527update:
528 if (name_known && ie->name_state != NAME_KNOWN &&
529 ie->name_state != NAME_PENDING) {
530 ie->name_state = NAME_KNOWN;
531 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 }
533
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200534 memcpy(&ie->data, data, sizeof(*data));
535 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200537
538 if (ie->name_state == NAME_NOT_KNOWN)
539 return false;
540
541 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542}
543
544static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
545{
Johan Hedberg30883512012-01-04 14:16:21 +0200546 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 struct inquiry_info *info = (struct inquiry_info *) buf;
548 struct inquiry_entry *e;
549 int copied = 0;
550
Johan Hedberg561aafb2012-01-04 13:31:59 +0200551 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700552 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200553
554 if (copied >= num)
555 break;
556
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 bacpy(&info->bdaddr, &data->bdaddr);
558 info->pscan_rep_mode = data->pscan_rep_mode;
559 info->pscan_period_mode = data->pscan_period_mode;
560 info->pscan_mode = data->pscan_mode;
561 memcpy(info->dev_class, data->dev_class, 3);
562 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200563
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200565 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 }
567
568 BT_DBG("cache %p, copied %d", cache, copied);
569 return copied;
570}
571
572static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
573{
574 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
575 struct hci_cp_inquiry cp;
576
577 BT_DBG("%s", hdev->name);
578
579 if (test_bit(HCI_INQUIRY, &hdev->flags))
580 return;
581
582 /* Start Inquiry */
583 memcpy(&cp.lap, &ir->lap, 3);
584 cp.length = ir->length;
585 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200586 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587}
588
589int hci_inquiry(void __user *arg)
590{
591 __u8 __user *ptr = arg;
592 struct hci_inquiry_req ir;
593 struct hci_dev *hdev;
594 int err = 0, do_inquiry = 0, max_rsp;
595 long timeo;
596 __u8 *buf;
597
598 if (copy_from_user(&ir, ptr, sizeof(ir)))
599 return -EFAULT;
600
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200601 hdev = hci_dev_get(ir.dev_id);
602 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 return -ENODEV;
604
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300605 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900606 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200607 inquiry_cache_empty(hdev) ||
608 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 inquiry_cache_flush(hdev);
610 do_inquiry = 1;
611 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300612 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613
Marcel Holtmann04837f62006-07-03 10:02:33 +0200614 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200615
616 if (do_inquiry) {
617 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
618 if (err < 0)
619 goto done;
620 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621
622 /* for unlimited number of responses we will use buffer with 255 entries */
623 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
624
625 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
626 * copy it to the user space.
627 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100628 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200629 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 err = -ENOMEM;
631 goto done;
632 }
633
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300636 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
638 BT_DBG("num_rsp %d", ir.num_rsp);
639
640 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
641 ptr += sizeof(ir);
642 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
643 ir.num_rsp))
644 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900645 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 err = -EFAULT;
647
648 kfree(buf);
649
650done:
651 hci_dev_put(hdev);
652 return err;
653}
654
655/* ---- HCI ioctl helpers ---- */
656
657int hci_dev_open(__u16 dev)
658{
659 struct hci_dev *hdev;
660 int ret = 0;
661
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200662 hdev = hci_dev_get(dev);
663 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 return -ENODEV;
665
666 BT_DBG("%s %p", hdev->name, hdev);
667
668 hci_req_lock(hdev);
669
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200670 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
671 ret = -ERFKILL;
672 goto done;
673 }
674
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 if (test_bit(HCI_UP, &hdev->flags)) {
676 ret = -EALREADY;
677 goto done;
678 }
679
680 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
681 set_bit(HCI_RAW, &hdev->flags);
682
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200683 /* Treat all non BR/EDR controllers as raw devices if
684 enable_hs is not set */
685 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100686 set_bit(HCI_RAW, &hdev->flags);
687
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (hdev->open(hdev)) {
689 ret = -EIO;
690 goto done;
691 }
692
693 if (!test_bit(HCI_RAW, &hdev->flags)) {
694 atomic_set(&hdev->cmd_cnt, 1);
695 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200696 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697
Marcel Holtmann04837f62006-07-03 10:02:33 +0200698 ret = __hci_request(hdev, hci_init_req, 0,
699 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Andre Guedeseead27d2011-06-30 19:20:55 -0300701 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300702 ret = __hci_request(hdev, hci_le_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
704
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 clear_bit(HCI_INIT, &hdev->flags);
706 }
707
708 if (!ret) {
709 hci_dev_hold(hdev);
710 set_bit(HCI_UP, &hdev->flags);
711 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200712 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300713 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200714 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300715 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200716 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900717 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200719 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200720 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400721 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
723 skb_queue_purge(&hdev->cmd_q);
724 skb_queue_purge(&hdev->rx_q);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 if (hdev->sent_cmd) {
730 kfree_skb(hdev->sent_cmd);
731 hdev->sent_cmd = NULL;
732 }
733
734 hdev->close(hdev);
735 hdev->flags = 0;
736 }
737
738done:
739 hci_req_unlock(hdev);
740 hci_dev_put(hdev);
741 return ret;
742}
743
744static int hci_dev_do_close(struct hci_dev *hdev)
745{
746 BT_DBG("%s %p", hdev->name, hdev);
747
Andre Guedes28b75a82012-02-03 17:48:00 -0300748 cancel_work_sync(&hdev->le_scan);
749
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 hci_req_cancel(hdev, ENODEV);
751 hci_req_lock(hdev);
752
753 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300754 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 hci_req_unlock(hdev);
756 return 0;
757 }
758
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200759 /* Flush RX and TX works */
760 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400761 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200763 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200764 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200765 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200766 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200767 }
768
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200769 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200770 cancel_delayed_work(&hdev->service_cache);
771
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300772 cancel_delayed_work_sync(&hdev->le_scan_disable);
773
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300774 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 inquiry_cache_flush(hdev);
776 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300777 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778
779 hci_notify(hdev, HCI_DEV_DOWN);
780
781 if (hdev->flush)
782 hdev->flush(hdev);
783
784 /* Reset device */
785 skb_queue_purge(&hdev->cmd_q);
786 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200787 if (!test_bit(HCI_RAW, &hdev->flags) &&
788 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200790 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200791 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 clear_bit(HCI_INIT, &hdev->flags);
793 }
794
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200795 /* flush cmd work */
796 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797
798 /* Drop queues */
799 skb_queue_purge(&hdev->rx_q);
800 skb_queue_purge(&hdev->cmd_q);
801 skb_queue_purge(&hdev->raw_q);
802
803 /* Drop last sent command */
804 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300805 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 kfree_skb(hdev->sent_cmd);
807 hdev->sent_cmd = NULL;
808 }
809
810 /* After this point our queues are empty
811 * and no tasks are scheduled. */
812 hdev->close(hdev);
813
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100814 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
815 hci_dev_lock(hdev);
816 mgmt_powered(hdev, 0);
817 hci_dev_unlock(hdev);
818 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200819
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820 /* Clear flags */
821 hdev->flags = 0;
822
Johan Hedberge59fda82012-02-22 18:11:53 +0200823 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200824 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 hci_req_unlock(hdev);
827
828 hci_dev_put(hdev);
829 return 0;
830}
831
832int hci_dev_close(__u16 dev)
833{
834 struct hci_dev *hdev;
835 int err;
836
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200837 hdev = hci_dev_get(dev);
838 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100840
841 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
842 cancel_delayed_work(&hdev->power_off);
843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100845
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 hci_dev_put(hdev);
847 return err;
848}
849
850int hci_dev_reset(__u16 dev)
851{
852 struct hci_dev *hdev;
853 int ret = 0;
854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200855 hdev = hci_dev_get(dev);
856 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 return -ENODEV;
858
859 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 if (!test_bit(HCI_UP, &hdev->flags))
862 goto done;
863
864 /* Drop queues */
865 skb_queue_purge(&hdev->rx_q);
866 skb_queue_purge(&hdev->cmd_q);
867
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300868 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 inquiry_cache_flush(hdev);
870 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300871 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872
873 if (hdev->flush)
874 hdev->flush(hdev);
875
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900876 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300877 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200880 ret = __hci_request(hdev, hci_reset_req, 0,
881 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 hci_req_unlock(hdev);
885 hci_dev_put(hdev);
886 return ret;
887}
888
889int hci_dev_reset_stat(__u16 dev)
890{
891 struct hci_dev *hdev;
892 int ret = 0;
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 hdev = hci_dev_get(dev);
895 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return -ENODEV;
897
898 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
899
900 hci_dev_put(hdev);
901
902 return ret;
903}
904
905int hci_dev_cmd(unsigned int cmd, void __user *arg)
906{
907 struct hci_dev *hdev;
908 struct hci_dev_req dr;
909 int err = 0;
910
911 if (copy_from_user(&dr, arg, sizeof(dr)))
912 return -EFAULT;
913
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200914 hdev = hci_dev_get(dr.dev_id);
915 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916 return -ENODEV;
917
918 switch (cmd) {
919 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200920 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 break;
923
924 case HCISETENCRYPT:
925 if (!lmp_encrypt_capable(hdev)) {
926 err = -EOPNOTSUPP;
927 break;
928 }
929
930 if (!test_bit(HCI_AUTH, &hdev->flags)) {
931 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200932 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
933 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 if (err)
935 break;
936 }
937
Marcel Holtmann04837f62006-07-03 10:02:33 +0200938 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 break;
941
942 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200943 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 break;
946
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200947 case HCISETLINKPOL:
948 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
949 msecs_to_jiffies(HCI_INIT_TIMEOUT));
950 break;
951
952 case HCISETLINKMODE:
953 hdev->link_mode = ((__u16) dr.dev_opt) &
954 (HCI_LM_MASTER | HCI_LM_ACCEPT);
955 break;
956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 case HCISETPTYPE:
958 hdev->pkt_type = (__u16) dr.dev_opt;
959 break;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200962 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
963 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 break;
965
966 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200967 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
968 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 break;
970
971 default:
972 err = -EINVAL;
973 break;
974 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200975
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 hci_dev_put(hdev);
977 return err;
978}
979
980int hci_get_dev_list(void __user *arg)
981{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200982 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 struct hci_dev_list_req *dl;
984 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 int n = 0, size, err;
986 __u16 dev_num;
987
988 if (get_user(dev_num, (__u16 __user *) arg))
989 return -EFAULT;
990
991 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
992 return -EINVAL;
993
994 size = sizeof(*dl) + dev_num * sizeof(*dr);
995
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200996 dl = kzalloc(size, GFP_KERNEL);
997 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998 return -ENOMEM;
999
1000 dr = dl->dev_req;
1001
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001002 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001003 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001004 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001005 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001006
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001007 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1008 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 (dr + n)->dev_id = hdev->id;
1011 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 if (++n >= dev_num)
1014 break;
1015 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001016 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
1018 dl->dev_num = n;
1019 size = sizeof(*dl) + n * sizeof(*dr);
1020
1021 err = copy_to_user(arg, dl, size);
1022 kfree(dl);
1023
1024 return err ? -EFAULT : 0;
1025}
1026
1027int hci_get_dev_info(void __user *arg)
1028{
1029 struct hci_dev *hdev;
1030 struct hci_dev_info di;
1031 int err = 0;
1032
1033 if (copy_from_user(&di, arg, sizeof(di)))
1034 return -EFAULT;
1035
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001036 hdev = hci_dev_get(di.dev_id);
1037 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 return -ENODEV;
1039
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001040 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001041 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001042
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001043 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1044 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001045
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 strcpy(di.name, hdev->name);
1047 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001048 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 di.flags = hdev->flags;
1050 di.pkt_type = hdev->pkt_type;
1051 di.acl_mtu = hdev->acl_mtu;
1052 di.acl_pkts = hdev->acl_pkts;
1053 di.sco_mtu = hdev->sco_mtu;
1054 di.sco_pkts = hdev->sco_pkts;
1055 di.link_policy = hdev->link_policy;
1056 di.link_mode = hdev->link_mode;
1057
1058 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1059 memcpy(&di.features, &hdev->features, sizeof(di.features));
1060
1061 if (copy_to_user(arg, &di, sizeof(di)))
1062 err = -EFAULT;
1063
1064 hci_dev_put(hdev);
1065
1066 return err;
1067}
1068
1069/* ---- Interface to HCI drivers ---- */
1070
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001071static int hci_rfkill_set_block(void *data, bool blocked)
1072{
1073 struct hci_dev *hdev = data;
1074
1075 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1076
1077 if (!blocked)
1078 return 0;
1079
1080 hci_dev_do_close(hdev);
1081
1082 return 0;
1083}
1084
1085static const struct rfkill_ops hci_rfkill_ops = {
1086 .set_block = hci_rfkill_set_block,
1087};
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089/* Alloc HCI device */
1090struct hci_dev *hci_alloc_dev(void)
1091{
1092 struct hci_dev *hdev;
1093
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001094 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 if (!hdev)
1096 return NULL;
1097
David Herrmann0ac7e702011-10-08 14:58:47 +02001098 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 skb_queue_head_init(&hdev->driver_init);
1100
1101 return hdev;
1102}
1103EXPORT_SYMBOL(hci_alloc_dev);
1104
1105/* Free HCI device */
1106void hci_free_dev(struct hci_dev *hdev)
1107{
1108 skb_queue_purge(&hdev->driver_init);
1109
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001110 /* will free via device release */
1111 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112}
1113EXPORT_SYMBOL(hci_free_dev);
1114
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001115static void hci_power_on(struct work_struct *work)
1116{
1117 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1118
1119 BT_DBG("%s", hdev->name);
1120
1121 if (hci_dev_open(hdev->id) < 0)
1122 return;
1123
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001124 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001125 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001126 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001127
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001128 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001129 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001130}
1131
1132static void hci_power_off(struct work_struct *work)
1133{
Johan Hedberg32435532011-11-07 22:16:04 +02001134 struct hci_dev *hdev = container_of(work, struct hci_dev,
1135 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001136
1137 BT_DBG("%s", hdev->name);
1138
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001139 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001140}
1141
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001142static void hci_discov_off(struct work_struct *work)
1143{
1144 struct hci_dev *hdev;
1145 u8 scan = SCAN_PAGE;
1146
1147 hdev = container_of(work, struct hci_dev, discov_off.work);
1148
1149 BT_DBG("%s", hdev->name);
1150
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001151 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001152
1153 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1154
1155 hdev->discov_timeout = 0;
1156
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001157 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001158}
1159
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001160int hci_uuids_clear(struct hci_dev *hdev)
1161{
1162 struct list_head *p, *n;
1163
1164 list_for_each_safe(p, n, &hdev->uuids) {
1165 struct bt_uuid *uuid;
1166
1167 uuid = list_entry(p, struct bt_uuid, list);
1168
1169 list_del(p);
1170 kfree(uuid);
1171 }
1172
1173 return 0;
1174}
1175
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176int hci_link_keys_clear(struct hci_dev *hdev)
1177{
1178 struct list_head *p, *n;
1179
1180 list_for_each_safe(p, n, &hdev->link_keys) {
1181 struct link_key *key;
1182
1183 key = list_entry(p, struct link_key, list);
1184
1185 list_del(p);
1186 kfree(key);
1187 }
1188
1189 return 0;
1190}
1191
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001192int hci_smp_ltks_clear(struct hci_dev *hdev)
1193{
1194 struct smp_ltk *k, *tmp;
1195
1196 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1197 list_del(&k->list);
1198 kfree(k);
1199 }
1200
1201 return 0;
1202}
1203
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001204struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1205{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001206 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001207
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001208 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001209 if (bacmp(bdaddr, &k->bdaddr) == 0)
1210 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001211
1212 return NULL;
1213}
1214
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001215static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1216 u8 key_type, u8 old_key_type)
1217{
1218 /* Legacy key */
1219 if (key_type < 0x03)
1220 return 1;
1221
1222 /* Debug keys are insecure so don't store them persistently */
1223 if (key_type == HCI_LK_DEBUG_COMBINATION)
1224 return 0;
1225
1226 /* Changed combination key and there's no previous one */
1227 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1228 return 0;
1229
1230 /* Security mode 3 case */
1231 if (!conn)
1232 return 1;
1233
1234 /* Neither local nor remote side had no-bonding as requirement */
1235 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1236 return 1;
1237
1238 /* Local side had dedicated bonding as requirement */
1239 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1240 return 1;
1241
1242 /* Remote side had dedicated bonding as requirement */
1243 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1244 return 1;
1245
1246 /* If none of the above criteria match, then don't store the key
1247 * persistently */
1248 return 0;
1249}
1250
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001251struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001252{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001253 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001254
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255 list_for_each_entry(k, &hdev->long_term_keys, list) {
1256 if (k->ediv != ediv ||
1257 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258 continue;
1259
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001260 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001261 }
1262
1263 return NULL;
1264}
1265EXPORT_SYMBOL(hci_find_ltk);
1266
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001267struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1268 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001269{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001270 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001271
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001272 list_for_each_entry(k, &hdev->long_term_keys, list)
1273 if (addr_type == k->bdaddr_type &&
1274 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275 return k;
1276
1277 return NULL;
1278}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001279EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001280
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001281int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1282 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001283{
1284 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001285 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001286
1287 old_key = hci_find_link_key(hdev, bdaddr);
1288 if (old_key) {
1289 old_key_type = old_key->type;
1290 key = old_key;
1291 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001292 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1294 if (!key)
1295 return -ENOMEM;
1296 list_add(&key->list, &hdev->link_keys);
1297 }
1298
1299 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1300
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001301 /* Some buggy controller combinations generate a changed
1302 * combination key for legacy pairing even when there's no
1303 * previous key */
1304 if (type == HCI_LK_CHANGED_COMBINATION &&
1305 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001306 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001307 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001308 if (conn)
1309 conn->key_type = type;
1310 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001311
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001312 bacpy(&key->bdaddr, bdaddr);
1313 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001314 key->pin_len = pin_len;
1315
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001316 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001317 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001318 else
1319 key->type = type;
1320
Johan Hedberg4df378a2011-04-28 11:29:03 -07001321 if (!new_key)
1322 return 0;
1323
1324 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1325
Johan Hedberg744cf192011-11-08 20:40:14 +02001326 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001327
1328 if (!persistent) {
1329 list_del(&key->list);
1330 kfree(key);
1331 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001332
1333 return 0;
1334}
1335
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001336int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1337 int new_key, u8 authenticated, u8 tk[16],
1338 u8 enc_size, u16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001339{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001340 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001341
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001342 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1343 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001344
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001345 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1346 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001347 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001348 else {
1349 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350 if (!key)
1351 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001352 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001353 }
1354
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001356 key->bdaddr_type = addr_type;
1357 memcpy(key->val, tk, sizeof(key->val));
1358 key->authenticated = authenticated;
1359 key->ediv = ediv;
1360 key->enc_size = enc_size;
1361 key->type = type;
1362 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001363
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001364 if (!new_key)
1365 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001366
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001367 if (type & HCI_SMP_LTK)
1368 mgmt_new_ltk(hdev, key, 1);
1369
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001370 return 0;
1371}
1372
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001373int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1374{
1375 struct link_key *key;
1376
1377 key = hci_find_link_key(hdev, bdaddr);
1378 if (!key)
1379 return -ENOENT;
1380
1381 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1382
1383 list_del(&key->list);
1384 kfree(key);
1385
1386 return 0;
1387}
1388
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001389int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390{
1391 struct smp_ltk *k, *tmp;
1392
1393 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1394 if (bacmp(bdaddr, &k->bdaddr))
1395 continue;
1396
1397 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1398
1399 list_del(&k->list);
1400 kfree(k);
1401 }
1402
1403 return 0;
1404}
1405
Ville Tervo6bd32322011-02-16 16:32:41 +02001406/* HCI command timer function */
1407static void hci_cmd_timer(unsigned long arg)
1408{
1409 struct hci_dev *hdev = (void *) arg;
1410
1411 BT_ERR("%s command tx timeout", hdev->name);
1412 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001413 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001414}
1415
Szymon Janc2763eda2011-03-22 13:12:22 +01001416struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1417 bdaddr_t *bdaddr)
1418{
1419 struct oob_data *data;
1420
1421 list_for_each_entry(data, &hdev->remote_oob_data, list)
1422 if (bacmp(bdaddr, &data->bdaddr) == 0)
1423 return data;
1424
1425 return NULL;
1426}
1427
1428int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1429{
1430 struct oob_data *data;
1431
1432 data = hci_find_remote_oob_data(hdev, bdaddr);
1433 if (!data)
1434 return -ENOENT;
1435
1436 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1437
1438 list_del(&data->list);
1439 kfree(data);
1440
1441 return 0;
1442}
1443
1444int hci_remote_oob_data_clear(struct hci_dev *hdev)
1445{
1446 struct oob_data *data, *n;
1447
1448 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1449 list_del(&data->list);
1450 kfree(data);
1451 }
1452
1453 return 0;
1454}
1455
1456int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1457 u8 *randomizer)
1458{
1459 struct oob_data *data;
1460
1461 data = hci_find_remote_oob_data(hdev, bdaddr);
1462
1463 if (!data) {
1464 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1465 if (!data)
1466 return -ENOMEM;
1467
1468 bacpy(&data->bdaddr, bdaddr);
1469 list_add(&data->list, &hdev->remote_oob_data);
1470 }
1471
1472 memcpy(data->hash, hash, sizeof(data->hash));
1473 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1474
1475 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1476
1477 return 0;
1478}
1479
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1481 bdaddr_t *bdaddr)
1482{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001483 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001484
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001485 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486 if (bacmp(bdaddr, &b->bdaddr) == 0)
1487 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
1489 return NULL;
1490}
1491
1492int hci_blacklist_clear(struct hci_dev *hdev)
1493{
1494 struct list_head *p, *n;
1495
1496 list_for_each_safe(p, n, &hdev->blacklist) {
1497 struct bdaddr_list *b;
1498
1499 b = list_entry(p, struct bdaddr_list, list);
1500
1501 list_del(p);
1502 kfree(b);
1503 }
1504
1505 return 0;
1506}
1507
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001508int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509{
1510 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511
1512 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1513 return -EBADF;
1514
Antti Julku5e762442011-08-25 16:48:02 +03001515 if (hci_blacklist_lookup(hdev, bdaddr))
1516 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517
1518 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001519 if (!entry)
1520 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001521
1522 bacpy(&entry->bdaddr, bdaddr);
1523
1524 list_add(&entry->list, &hdev->blacklist);
1525
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001526 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001527}
1528
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001529int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001530{
1531 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001532
Szymon Janc1ec918c2011-11-16 09:32:21 +01001533 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001534 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001535
1536 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001537 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001538 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001539
1540 list_del(&entry->list);
1541 kfree(entry);
1542
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001543 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001544}
1545
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001546static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001547{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001548 struct hci_dev *hdev = container_of(work, struct hci_dev,
1549 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001550
1551 hci_dev_lock(hdev);
1552
1553 hci_adv_entries_clear(hdev);
1554
1555 hci_dev_unlock(hdev);
1556}
1557
Andre Guedes76c86862011-05-26 16:23:50 -03001558int hci_adv_entries_clear(struct hci_dev *hdev)
1559{
1560 struct adv_entry *entry, *tmp;
1561
1562 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1563 list_del(&entry->list);
1564 kfree(entry);
1565 }
1566
1567 BT_DBG("%s adv cache cleared", hdev->name);
1568
1569 return 0;
1570}
1571
1572struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1573{
1574 struct adv_entry *entry;
1575
1576 list_for_each_entry(entry, &hdev->adv_entries, list)
1577 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1578 return entry;
1579
1580 return NULL;
1581}
1582
1583static inline int is_connectable_adv(u8 evt_type)
1584{
1585 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1586 return 1;
1587
1588 return 0;
1589}
1590
1591int hci_add_adv_entry(struct hci_dev *hdev,
1592 struct hci_ev_le_advertising_info *ev)
1593{
1594 struct adv_entry *entry;
1595
1596 if (!is_connectable_adv(ev->evt_type))
1597 return -EINVAL;
1598
1599 /* Only new entries should be added to adv_entries. So, if
1600 * bdaddr was found, don't add it. */
1601 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602 return 0;
1603
Andre Guedes4777bfd2012-01-30 23:31:28 -03001604 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001605 if (!entry)
1606 return -ENOMEM;
1607
1608 bacpy(&entry->bdaddr, &ev->bdaddr);
1609 entry->bdaddr_type = ev->bdaddr_type;
1610
1611 list_add(&entry->list, &hdev->adv_entries);
1612
1613 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616 return 0;
1617}
1618
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001619static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620{
1621 struct le_scan_params *param = (struct le_scan_params *) opt;
1622 struct hci_cp_le_set_scan_param cp;
1623
1624 memset(&cp, 0, sizeof(cp));
1625 cp.type = param->type;
1626 cp.interval = cpu_to_le16(param->interval);
1627 cp.window = cpu_to_le16(param->window);
1628
1629 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630}
1631
1632static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633{
1634 struct hci_cp_le_set_scan_enable cp;
1635
1636 memset(&cp, 0, sizeof(cp));
1637 cp.enable = 1;
1638
1639 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640}
1641
1642static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1643 u16 window, int timeout)
1644{
1645 long timeo = msecs_to_jiffies(3000);
1646 struct le_scan_params param;
1647 int err;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652 return -EINPROGRESS;
1653
1654 param.type = type;
1655 param.interval = interval;
1656 param.window = window;
1657
1658 hci_req_lock(hdev);
1659
1660 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1661 timeo);
1662 if (!err)
1663 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665 hci_req_unlock(hdev);
1666
1667 if (err < 0)
1668 return err;
1669
1670 schedule_delayed_work(&hdev->le_scan_disable,
1671 msecs_to_jiffies(timeout));
1672
1673 return 0;
1674}
1675
1676static void le_scan_disable_work(struct work_struct *work)
1677{
1678 struct hci_dev *hdev = container_of(work, struct hci_dev,
1679 le_scan_disable.work);
1680 struct hci_cp_le_set_scan_enable cp;
1681
1682 BT_DBG("%s", hdev->name);
1683
1684 memset(&cp, 0, sizeof(cp));
1685
1686 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1687}
1688
Andre Guedes28b75a82012-02-03 17:48:00 -03001689static void le_scan_work(struct work_struct *work)
1690{
1691 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1692 struct le_scan_params *param = &hdev->le_scan_params;
1693
1694 BT_DBG("%s", hdev->name);
1695
1696 hci_do_le_scan(hdev, param->type, param->interval,
1697 param->window, param->timeout);
1698}
1699
1700int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1701 int timeout)
1702{
1703 struct le_scan_params *param = &hdev->le_scan_params;
1704
1705 BT_DBG("%s", hdev->name);
1706
1707 if (work_busy(&hdev->le_scan))
1708 return -EINPROGRESS;
1709
1710 param->type = type;
1711 param->interval = interval;
1712 param->window = window;
1713 param->timeout = timeout;
1714
1715 queue_work(system_long_wq, &hdev->le_scan);
1716
1717 return 0;
1718}
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720/* Register HCI device */
1721int hci_register_dev(struct hci_dev *hdev)
1722{
1723 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001724 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001726 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
David Herrmann010666a2012-01-07 15:47:07 +01001728 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 return -EINVAL;
1730
Mat Martineau08add512011-11-02 16:18:36 -07001731 /* Do not allow HCI_AMP devices to register at index 0,
1732 * so the index can be used as the AMP controller ID.
1733 */
1734 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1735
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001736 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
1738 /* Find first available device id */
1739 list_for_each(p, &hci_dev_list) {
1740 if (list_entry(p, struct hci_dev, list)->id != id)
1741 break;
1742 head = p; id++;
1743 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001744
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 sprintf(hdev->name, "hci%d", id);
1746 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001747 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001749 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001752 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001754 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001756 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Marcel Holtmann04837f62006-07-03 10:02:33 +02001758 hdev->idle_timeout = 0;
1759 hdev->sniff_max_interval = 800;
1760 hdev->sniff_min_interval = 80;
1761
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001762 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001763 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001764 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001765
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
1767 skb_queue_head_init(&hdev->rx_q);
1768 skb_queue_head_init(&hdev->cmd_q);
1769 skb_queue_head_init(&hdev->raw_q);
1770
Ville Tervo6bd32322011-02-16 16:32:41 +02001771 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1772
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301773 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001774 hdev->reassembly[i] = NULL;
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001777 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Johan Hedberg30883512012-01-04 14:16:21 +02001779 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 hci_conn_hash_init(hdev);
1782
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001783 INIT_LIST_HEAD(&hdev->mgmt_pending);
1784
David Millerea4bd8b2010-07-30 21:54:49 -07001785 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001786
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001787 INIT_LIST_HEAD(&hdev->uuids);
1788
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001789 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001790 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001791
Szymon Janc2763eda2011-03-22 13:12:22 +01001792 INIT_LIST_HEAD(&hdev->remote_oob_data);
1793
Andre Guedes76c86862011-05-26 16:23:50 -03001794 INIT_LIST_HEAD(&hdev->adv_entries);
1795
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001796 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001797 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001798 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001799
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001800 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1801
Linus Torvalds1da177e2005-04-16 15:20:36 -07001802 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1803
1804 atomic_set(&hdev->promisc, 0);
1805
Andre Guedes28b75a82012-02-03 17:48:00 -03001806 INIT_WORK(&hdev->le_scan, le_scan_work);
1807
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001808 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1809
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001810 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001812 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1813 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001814 if (!hdev->workqueue) {
1815 error = -ENOMEM;
1816 goto err;
1817 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001818
David Herrmann33ca9542011-10-08 14:58:49 +02001819 error = hci_add_sysfs(hdev);
1820 if (error < 0)
1821 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001823 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1824 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1825 if (hdev->rfkill) {
1826 if (rfkill_register(hdev->rfkill) < 0) {
1827 rfkill_destroy(hdev->rfkill);
1828 hdev->rfkill = NULL;
1829 }
1830 }
1831
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001832 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1833 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001834 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001835
Linus Torvalds1da177e2005-04-16 15:20:36 -07001836 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001837 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001838
1839 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001840
David Herrmann33ca9542011-10-08 14:58:49 +02001841err_wqueue:
1842 destroy_workqueue(hdev->workqueue);
1843err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001844 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001845 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001846 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001847
David Herrmann33ca9542011-10-08 14:58:49 +02001848 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850EXPORT_SYMBOL(hci_register_dev);
1851
1852/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001853void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854{
Marcel Holtmannef222012007-07-11 06:42:04 +02001855 int i;
1856
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001857 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001859 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001861 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
1863 hci_dev_do_close(hdev);
1864
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301865 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001866 kfree_skb(hdev->reassembly[i]);
1867
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001868 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001869 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001870 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001871 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001872 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001873 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001874
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001875 /* mgmt_index_removed should take care of emptying the
1876 * pending list */
1877 BUG_ON(!list_empty(&hdev->mgmt_pending));
1878
Linus Torvalds1da177e2005-04-16 15:20:36 -07001879 hci_notify(hdev, HCI_DEV_UNREG);
1880
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001881 if (hdev->rfkill) {
1882 rfkill_unregister(hdev->rfkill);
1883 rfkill_destroy(hdev->rfkill);
1884 }
1885
David Herrmannce242972011-10-08 14:58:48 +02001886 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001887
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001888 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001889
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001890 destroy_workqueue(hdev->workqueue);
1891
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001893 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001894 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001895 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001896 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001897 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001898 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001899 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001900
David Herrmanndc946bd2012-01-07 15:47:24 +01001901 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001902}
1903EXPORT_SYMBOL(hci_unregister_dev);
1904
1905/* Suspend HCI device */
1906int hci_suspend_dev(struct hci_dev *hdev)
1907{
1908 hci_notify(hdev, HCI_DEV_SUSPEND);
1909 return 0;
1910}
1911EXPORT_SYMBOL(hci_suspend_dev);
1912
1913/* Resume HCI device */
1914int hci_resume_dev(struct hci_dev *hdev)
1915{
1916 hci_notify(hdev, HCI_DEV_RESUME);
1917 return 0;
1918}
1919EXPORT_SYMBOL(hci_resume_dev);
1920
Marcel Holtmann76bca882009-11-18 00:40:39 +01001921/* Receive frame from HCI drivers */
1922int hci_recv_frame(struct sk_buff *skb)
1923{
1924 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1925 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1926 && !test_bit(HCI_INIT, &hdev->flags))) {
1927 kfree_skb(skb);
1928 return -ENXIO;
1929 }
1930
1931 /* Incomming skb */
1932 bt_cb(skb)->incoming = 1;
1933
1934 /* Time stamp */
1935 __net_timestamp(skb);
1936
Marcel Holtmann76bca882009-11-18 00:40:39 +01001937 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001938 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001939
Marcel Holtmann76bca882009-11-18 00:40:39 +01001940 return 0;
1941}
1942EXPORT_SYMBOL(hci_recv_frame);
1943
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301944static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001945 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301946{
1947 int len = 0;
1948 int hlen = 0;
1949 int remain = count;
1950 struct sk_buff *skb;
1951 struct bt_skb_cb *scb;
1952
1953 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1954 index >= NUM_REASSEMBLY)
1955 return -EILSEQ;
1956
1957 skb = hdev->reassembly[index];
1958
1959 if (!skb) {
1960 switch (type) {
1961 case HCI_ACLDATA_PKT:
1962 len = HCI_MAX_FRAME_SIZE;
1963 hlen = HCI_ACL_HDR_SIZE;
1964 break;
1965 case HCI_EVENT_PKT:
1966 len = HCI_MAX_EVENT_SIZE;
1967 hlen = HCI_EVENT_HDR_SIZE;
1968 break;
1969 case HCI_SCODATA_PKT:
1970 len = HCI_MAX_SCO_SIZE;
1971 hlen = HCI_SCO_HDR_SIZE;
1972 break;
1973 }
1974
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001975 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301976 if (!skb)
1977 return -ENOMEM;
1978
1979 scb = (void *) skb->cb;
1980 scb->expect = hlen;
1981 scb->pkt_type = type;
1982
1983 skb->dev = (void *) hdev;
1984 hdev->reassembly[index] = skb;
1985 }
1986
1987 while (count) {
1988 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001989 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301990
1991 memcpy(skb_put(skb, len), data, len);
1992
1993 count -= len;
1994 data += len;
1995 scb->expect -= len;
1996 remain = count;
1997
1998 switch (type) {
1999 case HCI_EVENT_PKT:
2000 if (skb->len == HCI_EVENT_HDR_SIZE) {
2001 struct hci_event_hdr *h = hci_event_hdr(skb);
2002 scb->expect = h->plen;
2003
2004 if (skb_tailroom(skb) < scb->expect) {
2005 kfree_skb(skb);
2006 hdev->reassembly[index] = NULL;
2007 return -ENOMEM;
2008 }
2009 }
2010 break;
2011
2012 case HCI_ACLDATA_PKT:
2013 if (skb->len == HCI_ACL_HDR_SIZE) {
2014 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2015 scb->expect = __le16_to_cpu(h->dlen);
2016
2017 if (skb_tailroom(skb) < scb->expect) {
2018 kfree_skb(skb);
2019 hdev->reassembly[index] = NULL;
2020 return -ENOMEM;
2021 }
2022 }
2023 break;
2024
2025 case HCI_SCODATA_PKT:
2026 if (skb->len == HCI_SCO_HDR_SIZE) {
2027 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2028 scb->expect = h->dlen;
2029
2030 if (skb_tailroom(skb) < scb->expect) {
2031 kfree_skb(skb);
2032 hdev->reassembly[index] = NULL;
2033 return -ENOMEM;
2034 }
2035 }
2036 break;
2037 }
2038
2039 if (scb->expect == 0) {
2040 /* Complete frame */
2041
2042 bt_cb(skb)->pkt_type = type;
2043 hci_recv_frame(skb);
2044
2045 hdev->reassembly[index] = NULL;
2046 return remain;
2047 }
2048 }
2049
2050 return remain;
2051}
2052
Marcel Holtmannef222012007-07-11 06:42:04 +02002053int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2054{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302055 int rem = 0;
2056
Marcel Holtmannef222012007-07-11 06:42:04 +02002057 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2058 return -EILSEQ;
2059
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002060 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002061 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302062 if (rem < 0)
2063 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002064
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302065 data += (count - rem);
2066 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002067 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002068
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302069 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002070}
2071EXPORT_SYMBOL(hci_recv_fragment);
2072
Suraj Sumangala99811512010-07-14 13:02:19 +05302073#define STREAM_REASSEMBLY 0
2074
2075int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2076{
2077 int type;
2078 int rem = 0;
2079
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002080 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302081 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2082
2083 if (!skb) {
2084 struct { char type; } *pkt;
2085
2086 /* Start of the frame */
2087 pkt = data;
2088 type = pkt->type;
2089
2090 data++;
2091 count--;
2092 } else
2093 type = bt_cb(skb)->pkt_type;
2094
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002095 rem = hci_reassembly(hdev, type, data, count,
2096 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302097 if (rem < 0)
2098 return rem;
2099
2100 data += (count - rem);
2101 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002102 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302103
2104 return rem;
2105}
2106EXPORT_SYMBOL(hci_recv_stream_fragment);
2107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108/* ---- Interface to upper protocols ---- */
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110int hci_register_cb(struct hci_cb *cb)
2111{
2112 BT_DBG("%p name %s", cb, cb->name);
2113
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002114 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002116 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 return 0;
2119}
2120EXPORT_SYMBOL(hci_register_cb);
2121
2122int hci_unregister_cb(struct hci_cb *cb)
2123{
2124 BT_DBG("%p name %s", cb, cb->name);
2125
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002126 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002128 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130 return 0;
2131}
2132EXPORT_SYMBOL(hci_unregister_cb);
2133
2134static int hci_send_frame(struct sk_buff *skb)
2135{
2136 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2137
2138 if (!hdev) {
2139 kfree_skb(skb);
2140 return -ENODEV;
2141 }
2142
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002143 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002145 /* Time stamp */
2146 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002148 /* Send copy to monitor */
2149 hci_send_to_monitor(hdev, skb);
2150
2151 if (atomic_read(&hdev->promisc)) {
2152 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002153 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154 }
2155
2156 /* Get rid of skb owner, prior to sending to the driver. */
2157 skb_orphan(skb);
2158
2159 return hdev->send(skb);
2160}
2161
2162/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002163int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164{
2165 int len = HCI_COMMAND_HDR_SIZE + plen;
2166 struct hci_command_hdr *hdr;
2167 struct sk_buff *skb;
2168
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002169 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170
2171 skb = bt_skb_alloc(len, GFP_ATOMIC);
2172 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002173 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 return -ENOMEM;
2175 }
2176
2177 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002178 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 hdr->plen = plen;
2180
2181 if (plen)
2182 memcpy(skb_put(skb, plen), param, plen);
2183
2184 BT_DBG("skb len %d", skb->len);
2185
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002186 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002188
Johan Hedberga5040ef2011-01-10 13:28:59 +02002189 if (test_bit(HCI_INIT, &hdev->flags))
2190 hdev->init_last_cmd = opcode;
2191
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002193 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194
2195 return 0;
2196}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002199void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200{
2201 struct hci_command_hdr *hdr;
2202
2203 if (!hdev->sent_cmd)
2204 return NULL;
2205
2206 hdr = (void *) hdev->sent_cmd->data;
2207
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002208 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 return NULL;
2210
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002211 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2214}
2215
2216/* Send ACL data */
2217static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2218{
2219 struct hci_acl_hdr *hdr;
2220 int len = skb->len;
2221
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002222 skb_push(skb, HCI_ACL_HDR_SIZE);
2223 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002224 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002225 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2226 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227}
2228
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002229static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2230 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231{
2232 struct hci_dev *hdev = conn->hdev;
2233 struct sk_buff *list;
2234
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002235 list = skb_shinfo(skb)->frag_list;
2236 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237 /* Non fragmented */
2238 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2239
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002240 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 } else {
2242 /* Fragmented */
2243 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2244
2245 skb_shinfo(skb)->frag_list = NULL;
2246
2247 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002248 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002250 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002251
2252 flags &= ~ACL_START;
2253 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 do {
2255 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002258 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002259 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
2261 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2262
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002263 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 } while (list);
2265
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002266 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002268}
2269
2270void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2271{
2272 struct hci_conn *conn = chan->conn;
2273 struct hci_dev *hdev = conn->hdev;
2274
2275 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2276
2277 skb->dev = (void *) hdev;
2278 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2279 hci_add_acl_hdr(skb, conn->handle, flags);
2280
2281 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002283 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284}
2285EXPORT_SYMBOL(hci_send_acl);
2286
2287/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002288void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289{
2290 struct hci_dev *hdev = conn->hdev;
2291 struct hci_sco_hdr hdr;
2292
2293 BT_DBG("%s len %d", hdev->name, skb->len);
2294
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002295 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 hdr.dlen = skb->len;
2297
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002298 skb_push(skb, HCI_SCO_HDR_SIZE);
2299 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002300 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002303 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002306 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308EXPORT_SYMBOL(hci_send_sco);
2309
2310/* ---- HCI TX task (outgoing data) ---- */
2311
2312/* HCI Connection scheduler */
2313static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2314{
2315 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002316 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002319 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002321
2322 rcu_read_lock();
2323
2324 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002325 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002327
2328 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329 continue;
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 num++;
2332
2333 if (c->sent < min) {
2334 min = c->sent;
2335 conn = c;
2336 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002337
2338 if (hci_conn_num(hdev, type) == num)
2339 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 }
2341
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002342 rcu_read_unlock();
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002345 int cnt, q;
2346
2347 switch (conn->type) {
2348 case ACL_LINK:
2349 cnt = hdev->acl_cnt;
2350 break;
2351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 *quote = q ? q : 1;
2365 } else
2366 *quote = 0;
2367
2368 BT_DBG("conn %p quote %d", conn, *quote);
2369 return conn;
2370}
2371
Ville Tervobae1f5d92011-02-10 22:38:53 -03002372static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
2374 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002375 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Ville Tervobae1f5d92011-02-10 22:38:53 -03002377 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002379 rcu_read_lock();
2380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002382 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002383 if (c->type == type && c->sent) {
2384 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002385 hdev->name, batostr(&c->dst));
2386 hci_acl_disconn(c, 0x13);
2387 }
2388 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002389
2390 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002393static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394 int *quote)
2395{
2396 struct hci_conn_hash *h = &hdev->conn_hash;
2397 struct hci_chan *chan = NULL;
2398 int num = 0, min = ~0, cur_prio = 0;
2399 struct hci_conn *conn;
2400 int cnt, q, conn_num = 0;
2401
2402 BT_DBG("%s", hdev->name);
2403
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002404 rcu_read_lock();
2405
2406 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002407 struct hci_chan *tmp;
2408
2409 if (conn->type != type)
2410 continue;
2411
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 continue;
2414
2415 conn_num++;
2416
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002417 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002418 struct sk_buff *skb;
2419
2420 if (skb_queue_empty(&tmp->data_q))
2421 continue;
2422
2423 skb = skb_peek(&tmp->data_q);
2424 if (skb->priority < cur_prio)
2425 continue;
2426
2427 if (skb->priority > cur_prio) {
2428 num = 0;
2429 min = ~0;
2430 cur_prio = skb->priority;
2431 }
2432
2433 num++;
2434
2435 if (conn->sent < min) {
2436 min = conn->sent;
2437 chan = tmp;
2438 }
2439 }
2440
2441 if (hci_conn_num(hdev, type) == conn_num)
2442 break;
2443 }
2444
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002445 rcu_read_unlock();
2446
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002447 if (!chan)
2448 return NULL;
2449
2450 switch (chan->conn->type) {
2451 case ACL_LINK:
2452 cnt = hdev->acl_cnt;
2453 break;
2454 case SCO_LINK:
2455 case ESCO_LINK:
2456 cnt = hdev->sco_cnt;
2457 break;
2458 case LE_LINK:
2459 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2460 break;
2461 default:
2462 cnt = 0;
2463 BT_ERR("Unknown link type");
2464 }
2465
2466 q = cnt / num;
2467 *quote = q ? q : 1;
2468 BT_DBG("chan %p quote %d", chan, *quote);
2469 return chan;
2470}
2471
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002472static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2473{
2474 struct hci_conn_hash *h = &hdev->conn_hash;
2475 struct hci_conn *conn;
2476 int num = 0;
2477
2478 BT_DBG("%s", hdev->name);
2479
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002480 rcu_read_lock();
2481
2482 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002483 struct hci_chan *chan;
2484
2485 if (conn->type != type)
2486 continue;
2487
2488 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2489 continue;
2490
2491 num++;
2492
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002493 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002494 struct sk_buff *skb;
2495
2496 if (chan->sent) {
2497 chan->sent = 0;
2498 continue;
2499 }
2500
2501 if (skb_queue_empty(&chan->data_q))
2502 continue;
2503
2504 skb = skb_peek(&chan->data_q);
2505 if (skb->priority >= HCI_PRIO_MAX - 1)
2506 continue;
2507
2508 skb->priority = HCI_PRIO_MAX - 1;
2509
2510 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2511 skb->priority);
2512 }
2513
2514 if (hci_conn_num(hdev, type) == num)
2515 break;
2516 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002517
2518 rcu_read_unlock();
2519
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002520}
2521
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002522static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2523{
2524 /* Calculate count of blocks used by this packet */
2525 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2526}
2527
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002528static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530 if (!test_bit(HCI_RAW, &hdev->flags)) {
2531 /* ACL tx timeout must be longer than maximum
2532 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002533 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002534 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002535 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002536 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002537}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002538
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002539static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2540{
2541 unsigned int cnt = hdev->acl_cnt;
2542 struct hci_chan *chan;
2543 struct sk_buff *skb;
2544 int quote;
2545
2546 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002547
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002548 while (hdev->acl_cnt &&
2549 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002550 u32 priority = (skb_peek(&chan->data_q))->priority;
2551 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002552 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2553 skb->len, skb->priority);
2554
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002555 /* Stop if priority has changed */
2556 if (skb->priority < priority)
2557 break;
2558
2559 skb = skb_dequeue(&chan->data_q);
2560
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002561 hci_conn_enter_active_mode(chan->conn,
2562 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 hci_send_frame(skb);
2565 hdev->acl_last_tx = jiffies;
2566
2567 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002568 chan->sent++;
2569 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 }
2571 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002572
2573 if (cnt != hdev->acl_cnt)
2574 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575}
2576
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002577static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2578{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002579 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002580 struct hci_chan *chan;
2581 struct sk_buff *skb;
2582 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002583
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002584 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002585
2586 while (hdev->block_cnt > 0 &&
2587 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2588 u32 priority = (skb_peek(&chan->data_q))->priority;
2589 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2590 int blocks;
2591
2592 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2593 skb->len, skb->priority);
2594
2595 /* Stop if priority has changed */
2596 if (skb->priority < priority)
2597 break;
2598
2599 skb = skb_dequeue(&chan->data_q);
2600
2601 blocks = __get_blocks(hdev, skb);
2602 if (blocks > hdev->block_cnt)
2603 return;
2604
2605 hci_conn_enter_active_mode(chan->conn,
2606 bt_cb(skb)->force_active);
2607
2608 hci_send_frame(skb);
2609 hdev->acl_last_tx = jiffies;
2610
2611 hdev->block_cnt -= blocks;
2612 quote -= blocks;
2613
2614 chan->sent += blocks;
2615 chan->conn->sent += blocks;
2616 }
2617 }
2618
2619 if (cnt != hdev->block_cnt)
2620 hci_prio_recalculate(hdev, ACL_LINK);
2621}
2622
2623static inline void hci_sched_acl(struct hci_dev *hdev)
2624{
2625 BT_DBG("%s", hdev->name);
2626
2627 if (!hci_conn_num(hdev, ACL_LINK))
2628 return;
2629
2630 switch (hdev->flow_ctl_mode) {
2631 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2632 hci_sched_acl_pkt(hdev);
2633 break;
2634
2635 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2636 hci_sched_acl_blk(hdev);
2637 break;
2638 }
2639}
2640
Linus Torvalds1da177e2005-04-16 15:20:36 -07002641/* Schedule SCO */
2642static inline void hci_sched_sco(struct hci_dev *hdev)
2643{
2644 struct hci_conn *conn;
2645 struct sk_buff *skb;
2646 int quote;
2647
2648 BT_DBG("%s", hdev->name);
2649
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002650 if (!hci_conn_num(hdev, SCO_LINK))
2651 return;
2652
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2654 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2655 BT_DBG("skb %p len %d", skb, skb->len);
2656 hci_send_frame(skb);
2657
2658 conn->sent++;
2659 if (conn->sent == ~0)
2660 conn->sent = 0;
2661 }
2662 }
2663}
2664
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002665static inline void hci_sched_esco(struct hci_dev *hdev)
2666{
2667 struct hci_conn *conn;
2668 struct sk_buff *skb;
2669 int quote;
2670
2671 BT_DBG("%s", hdev->name);
2672
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002673 if (!hci_conn_num(hdev, ESCO_LINK))
2674 return;
2675
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002676 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2677 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2678 BT_DBG("skb %p len %d", skb, skb->len);
2679 hci_send_frame(skb);
2680
2681 conn->sent++;
2682 if (conn->sent == ~0)
2683 conn->sent = 0;
2684 }
2685 }
2686}
2687
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002688static inline void hci_sched_le(struct hci_dev *hdev)
2689{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002690 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002691 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002692 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002693
2694 BT_DBG("%s", hdev->name);
2695
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002696 if (!hci_conn_num(hdev, LE_LINK))
2697 return;
2698
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002699 if (!test_bit(HCI_RAW, &hdev->flags)) {
2700 /* LE tx timeout must be longer than maximum
2701 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002702 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002703 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002704 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002705 }
2706
2707 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002708 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002709 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002710 u32 priority = (skb_peek(&chan->data_q))->priority;
2711 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002712 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2713 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002714
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002715 /* Stop if priority has changed */
2716 if (skb->priority < priority)
2717 break;
2718
2719 skb = skb_dequeue(&chan->data_q);
2720
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002721 hci_send_frame(skb);
2722 hdev->le_last_tx = jiffies;
2723
2724 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002725 chan->sent++;
2726 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002727 }
2728 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002729
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002730 if (hdev->le_pkts)
2731 hdev->le_cnt = cnt;
2732 else
2733 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002734
2735 if (cnt != tmp)
2736 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002737}
2738
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002739static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002741 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 struct sk_buff *skb;
2743
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002744 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2745 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746
2747 /* Schedule queues and send stuff to HCI driver */
2748
2749 hci_sched_acl(hdev);
2750
2751 hci_sched_sco(hdev);
2752
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002753 hci_sched_esco(hdev);
2754
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002755 hci_sched_le(hdev);
2756
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757 /* Send next queued raw (unknown type) packet */
2758 while ((skb = skb_dequeue(&hdev->raw_q)))
2759 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760}
2761
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002762/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
2764/* ACL data packet */
2765static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2766{
2767 struct hci_acl_hdr *hdr = (void *) skb->data;
2768 struct hci_conn *conn;
2769 __u16 handle, flags;
2770
2771 skb_pull(skb, HCI_ACL_HDR_SIZE);
2772
2773 handle = __le16_to_cpu(hdr->handle);
2774 flags = hci_flags(handle);
2775 handle = hci_handle(handle);
2776
2777 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2778
2779 hdev->stat.acl_rx++;
2780
2781 hci_dev_lock(hdev);
2782 conn = hci_conn_hash_lookup_handle(hdev, handle);
2783 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002784
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002786 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002787
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002789 l2cap_recv_acldata(conn, skb, flags);
2790 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002792 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 hdev->name, handle);
2794 }
2795
2796 kfree_skb(skb);
2797}
2798
2799/* SCO data packet */
2800static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2801{
2802 struct hci_sco_hdr *hdr = (void *) skb->data;
2803 struct hci_conn *conn;
2804 __u16 handle;
2805
2806 skb_pull(skb, HCI_SCO_HDR_SIZE);
2807
2808 handle = __le16_to_cpu(hdr->handle);
2809
2810 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2811
2812 hdev->stat.sco_rx++;
2813
2814 hci_dev_lock(hdev);
2815 conn = hci_conn_hash_lookup_handle(hdev, handle);
2816 hci_dev_unlock(hdev);
2817
2818 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002820 sco_recv_scodata(conn, skb);
2821 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002823 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 hdev->name, handle);
2825 }
2826
2827 kfree_skb(skb);
2828}
2829
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002830static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002832 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 struct sk_buff *skb;
2834
2835 BT_DBG("%s", hdev->name);
2836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002838 /* Send copy to monitor */
2839 hci_send_to_monitor(hdev, skb);
2840
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 if (atomic_read(&hdev->promisc)) {
2842 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002843 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 }
2845
2846 if (test_bit(HCI_RAW, &hdev->flags)) {
2847 kfree_skb(skb);
2848 continue;
2849 }
2850
2851 if (test_bit(HCI_INIT, &hdev->flags)) {
2852 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002853 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 case HCI_ACLDATA_PKT:
2855 case HCI_SCODATA_PKT:
2856 kfree_skb(skb);
2857 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002858 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 }
2860
2861 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002862 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002864 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865 hci_event_packet(hdev, skb);
2866 break;
2867
2868 case HCI_ACLDATA_PKT:
2869 BT_DBG("%s ACL data packet", hdev->name);
2870 hci_acldata_packet(hdev, skb);
2871 break;
2872
2873 case HCI_SCODATA_PKT:
2874 BT_DBG("%s SCO data packet", hdev->name);
2875 hci_scodata_packet(hdev, skb);
2876 break;
2877
2878 default:
2879 kfree_skb(skb);
2880 break;
2881 }
2882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883}
2884
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002885static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002887 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 struct sk_buff *skb;
2889
2890 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2891
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002893 if (atomic_read(&hdev->cmd_cnt)) {
2894 skb = skb_dequeue(&hdev->cmd_q);
2895 if (!skb)
2896 return;
2897
Wei Yongjun7585b972009-02-25 18:29:52 +08002898 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002900 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2901 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 atomic_dec(&hdev->cmd_cnt);
2903 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002904 if (test_bit(HCI_RESET, &hdev->flags))
2905 del_timer(&hdev->cmd_timer);
2906 else
2907 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002908 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 } else {
2910 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002911 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 }
2913 }
2914}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002915
2916int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2917{
2918 /* General inquiry access code (GIAC) */
2919 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2920 struct hci_cp_inquiry cp;
2921
2922 BT_DBG("%s", hdev->name);
2923
2924 if (test_bit(HCI_INQUIRY, &hdev->flags))
2925 return -EINPROGRESS;
2926
Johan Hedberg46632622012-01-02 16:06:08 +02002927 inquiry_cache_flush(hdev);
2928
Andre Guedes2519a1f2011-11-07 11:45:24 -03002929 memset(&cp, 0, sizeof(cp));
2930 memcpy(&cp.lap, lap, sizeof(cp.lap));
2931 cp.length = length;
2932
2933 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2934}
Andre Guedes023d50492011-11-04 14:16:52 -03002935
2936int hci_cancel_inquiry(struct hci_dev *hdev)
2937{
2938 BT_DBG("%s", hdev->name);
2939
2940 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2941 return -EPERM;
2942
2943 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2944}