blob: 2054c1321c87e46d475739ab937369e54af39a1c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur82453022008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
48#include <asm/system.h>
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020049#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <asm/unaligned.h>
51
52#include <net/bluetooth/bluetooth.h>
53#include <net/bluetooth/hci_core.h>
54
Johan Hedbergab81cbf2010-12-15 13:53:18 +020055#define AUTO_OFF_TIMEOUT 2000
56
Marcel Holtmannb78752c2010-08-08 23:06:53 -040057static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020058static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020059static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Linus Torvalds1da177e2005-04-16 15:20:36 -070061/* HCI device list */
62LIST_HEAD(hci_dev_list);
63DEFINE_RWLOCK(hci_dev_list_lock);
64
65/* HCI callback list */
66LIST_HEAD(hci_cb_list);
67DEFINE_RWLOCK(hci_cb_list_lock);
68
Linus Torvalds1da177e2005-04-16 15:20:36 -070069/* ---- HCI notifications ---- */
70
Marcel Holtmann65164552005-10-28 19:20:48 +020071static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Marcel Holtmann040030e2012-02-20 14:50:37 +010073 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074}
75
76/* ---- HCI requests ---- */
77
Johan Hedberg23bb5762010-12-21 23:01:27 +020078void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Johan Hedberg23bb5762010-12-21 23:01:27 +020080 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
Johan Hedberga5040ef2011-01-10 13:28:59 +020082 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
84 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020085 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
96 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef492011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255}
256
257static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258{
259 struct sk_buff *skb;
260
261 BT_DBG("%s %ld", hdev->name, opt);
262
263 /* Driver initialization */
264
265 /* Special commands */
266 while ((skb = skb_dequeue(&hdev->driver_init))) {
267 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268 skb->dev = (void *) hdev;
269
270 skb_queue_tail(&hdev->cmd_q, skb);
271 queue_work(hdev->workqueue, &hdev->cmd_work);
272 }
273 skb_queue_purge(&hdev->driver_init);
274
275 switch (hdev->dev_type) {
276 case HCI_BREDR:
277 bredr_init(hdev);
278 break;
279
280 case HCI_AMP:
281 amp_init(hdev);
282 break;
283
284 default:
285 BT_ERR("Unknown device type %d", hdev->dev_type);
286 break;
287 }
288
289}
290
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300291static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292{
293 BT_DBG("%s", hdev->name);
294
295 /* Read LE buffer size */
296 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297}
298
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 scan = opt;
302
303 BT_DBG("%s %x", hdev->name, scan);
304
305 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 auth = opt;
312
313 BT_DBG("%s %x", hdev->name, auth);
314
315 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
319static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __u8 encrypt = opt;
322
323 BT_DBG("%s %x", hdev->name, encrypt);
324
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200325 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200326 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330{
331 __le16 policy = cpu_to_le16(opt);
332
Marcel Holtmanna418b892008-11-30 12:17:28 +0100333 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334
335 /* Default link policy */
336 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337}
338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900339/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 * Device is held on return. */
341struct hci_dev *hci_dev_get(int index)
342{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200343 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
345 BT_DBG("%d", index);
346
347 if (index < 0)
348 return NULL;
349
350 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200351 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 if (d->id == index) {
353 hdev = hci_dev_hold(d);
354 break;
355 }
356 }
357 read_unlock(&hci_dev_list_lock);
358 return hdev;
359}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200363bool hci_discovery_active(struct hci_dev *hdev)
364{
365 struct discovery_state *discov = &hdev->discovery;
366
Andre Guedes6fbe1952012-02-03 17:47:58 -0300367 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300368 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300369 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200370 return true;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 default:
373 return false;
374 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375}
376
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377void hci_discovery_set_state(struct hci_dev *hdev, int state)
378{
379 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381 if (hdev->discovery.state == state)
382 return;
383
384 switch (state) {
385 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300386 if (hdev->discovery.state != DISCOVERY_STARTING)
387 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200388 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200389 break;
390 case DISCOVERY_STARTING:
391 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300392 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 mgmt_discovering(hdev, 1);
394 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200395 case DISCOVERY_RESOLVING:
396 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 case DISCOVERY_STOPPING:
398 break;
399 }
400
401 hdev->discovery.state = state;
402}
403
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404static void inquiry_cache_flush(struct hci_dev *hdev)
405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200407 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408
Johan Hedberg561aafb2012-01-04 13:31:59 +0200409 list_for_each_entry_safe(p, n, &cache->all, all) {
410 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413
414 INIT_LIST_HEAD(&cache->unknown);
415 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
418struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419{
Johan Hedberg30883512012-01-04 14:16:21 +0200420 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 struct inquiry_entry *e;
422
423 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200427 return e;
428 }
429
430 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431}
432
Johan Hedberg561aafb2012-01-04 13:31:59 +0200433struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300434 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435{
Johan Hedberg30883512012-01-04 14:16:21 +0200436 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441 list_for_each_entry(e, &cache->unknown, list) {
442 if (!bacmp(&e->data.bdaddr, bdaddr))
443 return e;
444 }
445
446 return NULL;
447}
448
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200449struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300450 bdaddr_t *bdaddr,
451 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200452{
453 struct discovery_state *cache = &hdev->discovery;
454 struct inquiry_entry *e;
455
456 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458 list_for_each_entry(e, &cache->resolve, list) {
459 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460 return e;
461 if (!bacmp(&e->data.bdaddr, bdaddr))
462 return e;
463 }
464
465 return NULL;
466}
467
Johan Hedberga3d4e202012-01-09 00:53:02 +0200468void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300469 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470{
471 struct discovery_state *cache = &hdev->discovery;
472 struct list_head *pos = &cache->resolve;
473 struct inquiry_entry *p;
474
475 list_del(&ie->list);
476
477 list_for_each_entry(p, &cache->resolve, list) {
478 if (p->name_state != NAME_PENDING &&
479 abs(p->data.rssi) >= abs(ie->data.rssi))
480 break;
481 pos = &p->list;
482 }
483
484 list_add(&ie->list, pos);
485}
486
Johan Hedberg31754052012-01-04 13:39:52 +0200487bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300488 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489{
Johan Hedberg30883512012-01-04 14:16:21 +0200490 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200491 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492
493 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200495 if (ssp)
496 *ssp = data->ssp_mode;
497
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200498 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200499 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ie->data.ssp_mode && ssp)
501 *ssp = true;
502
Johan Hedberga3d4e202012-01-09 00:53:02 +0200503 if (ie->name_state == NAME_NEEDED &&
504 data->rssi != ie->data.rssi) {
505 ie->data.rssi = data->rssi;
506 hci_inquiry_cache_update_resolve(hdev, ie);
507 }
508
Johan Hedberg561aafb2012-01-04 13:31:59 +0200509 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200511
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 /* Entry not in the cache. Add new one. */
513 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200515 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200516
517 list_add(&ie->all, &cache->all);
518
519 if (name_known) {
520 ie->name_state = NAME_KNOWN;
521 } else {
522 ie->name_state = NAME_NOT_KNOWN;
523 list_add(&ie->list, &cache->unknown);
524 }
525
526update:
527 if (name_known && ie->name_state != NAME_KNOWN &&
528 ie->name_state != NAME_PENDING) {
529 ie->name_state = NAME_KNOWN;
530 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 }
532
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200533 memcpy(&ie->data, data, sizeof(*data));
534 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200536
537 if (ie->name_state == NAME_NOT_KNOWN)
538 return false;
539
540 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
543static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544{
Johan Hedberg30883512012-01-04 14:16:21 +0200545 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 struct inquiry_info *info = (struct inquiry_info *) buf;
547 struct inquiry_entry *e;
548 int copied = 0;
549
Johan Hedberg561aafb2012-01-04 13:31:59 +0200550 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
553 if (copied >= num)
554 break;
555
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 bacpy(&info->bdaddr, &data->bdaddr);
557 info->pscan_rep_mode = data->pscan_rep_mode;
558 info->pscan_period_mode = data->pscan_period_mode;
559 info->pscan_mode = data->pscan_mode;
560 memcpy(info->dev_class, data->dev_class, 3);
561 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200562
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 }
566
567 BT_DBG("cache %p, copied %d", cache, copied);
568 return copied;
569}
570
571static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572{
573 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574 struct hci_cp_inquiry cp;
575
576 BT_DBG("%s", hdev->name);
577
578 if (test_bit(HCI_INQUIRY, &hdev->flags))
579 return;
580
581 /* Start Inquiry */
582 memcpy(&cp.lap, &ir->lap, 3);
583 cp.length = ir->length;
584 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200585 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586}
587
588int hci_inquiry(void __user *arg)
589{
590 __u8 __user *ptr = arg;
591 struct hci_inquiry_req ir;
592 struct hci_dev *hdev;
593 int err = 0, do_inquiry = 0, max_rsp;
594 long timeo;
595 __u8 *buf;
596
597 if (copy_from_user(&ir, ptr, sizeof(ir)))
598 return -EFAULT;
599
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200600 hdev = hci_dev_get(ir.dev_id);
601 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602 return -ENODEV;
603
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300604 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900605 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200606 inquiry_cache_empty(hdev) ||
607 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 inquiry_cache_flush(hdev);
609 do_inquiry = 1;
610 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300611 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612
Marcel Holtmann04837f62006-07-03 10:02:33 +0200613 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614
615 if (do_inquiry) {
616 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617 if (err < 0)
618 goto done;
619 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 /* for unlimited number of responses we will use buffer with 255 entries */
622 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625 * copy it to the user space.
626 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100627 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200628 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -ENOMEM;
630 goto done;
631 }
632
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300633 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
637 BT_DBG("num_rsp %d", ir.num_rsp);
638
639 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640 ptr += sizeof(ir);
641 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 ir.num_rsp))
643 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900644 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645 err = -EFAULT;
646
647 kfree(buf);
648
649done:
650 hci_dev_put(hdev);
651 return err;
652}
653
654/* ---- HCI ioctl helpers ---- */
655
656int hci_dev_open(__u16 dev)
657{
658 struct hci_dev *hdev;
659 int ret = 0;
660
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200661 hdev = hci_dev_get(dev);
662 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 return -ENODEV;
664
665 BT_DBG("%s %p", hdev->name, hdev);
666
667 hci_req_lock(hdev);
668
Johan Hovold94324962012-03-15 14:48:41 +0100669 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670 ret = -ENODEV;
671 goto done;
672 }
673
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200674 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675 ret = -ERFKILL;
676 goto done;
677 }
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (test_bit(HCI_UP, &hdev->flags)) {
680 ret = -EALREADY;
681 goto done;
682 }
683
684 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685 set_bit(HCI_RAW, &hdev->flags);
686
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200687 /* Treat all non BR/EDR controllers as raw devices if
688 enable_hs is not set */
689 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100690 set_bit(HCI_RAW, &hdev->flags);
691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 if (hdev->open(hdev)) {
693 ret = -EIO;
694 goto done;
695 }
696
697 if (!test_bit(HCI_RAW, &hdev->flags)) {
698 atomic_set(&hdev->cmd_cnt, 1);
699 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200700 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701
Marcel Holtmann04837f62006-07-03 10:02:33 +0200702 ret = __hci_request(hdev, hci_init_req, 0,
703 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Andre Guedeseead27d2011-06-30 19:20:55 -0300705 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300706 ret = __hci_request(hdev, hci_le_init_req, 0,
707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 clear_bit(HCI_INIT, &hdev->flags);
710 }
711
712 if (!ret) {
713 hci_dev_hold(hdev);
714 set_bit(HCI_UP, &hdev->flags);
715 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200716 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300717 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200718 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200720 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900721 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200723 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200724 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400725 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726
727 skb_queue_purge(&hdev->cmd_q);
728 skb_queue_purge(&hdev->rx_q);
729
730 if (hdev->flush)
731 hdev->flush(hdev);
732
733 if (hdev->sent_cmd) {
734 kfree_skb(hdev->sent_cmd);
735 hdev->sent_cmd = NULL;
736 }
737
738 hdev->close(hdev);
739 hdev->flags = 0;
740 }
741
742done:
743 hci_req_unlock(hdev);
744 hci_dev_put(hdev);
745 return ret;
746}
747
748static int hci_dev_do_close(struct hci_dev *hdev)
749{
750 BT_DBG("%s %p", hdev->name, hdev);
751
Andre Guedes28b75a82012-02-03 17:48:00 -0300752 cancel_work_sync(&hdev->le_scan);
753
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754 hci_req_cancel(hdev, ENODEV);
755 hci_req_lock(hdev);
756
757 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300758 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 hci_req_unlock(hdev);
760 return 0;
761 }
762
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200763 /* Flush RX and TX works */
764 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400765 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200767 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200768 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200769 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200770 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 }
772
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200773 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200774 cancel_delayed_work(&hdev->service_cache);
775
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300776 cancel_delayed_work_sync(&hdev->le_scan_disable);
777
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300778 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 inquiry_cache_flush(hdev);
780 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300781 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782
783 hci_notify(hdev, HCI_DEV_DOWN);
784
785 if (hdev->flush)
786 hdev->flush(hdev);
787
788 /* Reset device */
789 skb_queue_purge(&hdev->cmd_q);
790 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200791 if (!test_bit(HCI_RAW, &hdev->flags) &&
792 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200794 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200795 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 clear_bit(HCI_INIT, &hdev->flags);
797 }
798
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200799 /* flush cmd work */
800 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
802 /* Drop queues */
803 skb_queue_purge(&hdev->rx_q);
804 skb_queue_purge(&hdev->cmd_q);
805 skb_queue_purge(&hdev->raw_q);
806
807 /* Drop last sent command */
808 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300809 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 kfree_skb(hdev->sent_cmd);
811 hdev->sent_cmd = NULL;
812 }
813
814 /* After this point our queues are empty
815 * and no tasks are scheduled. */
816 hdev->close(hdev);
817
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100818 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819 hci_dev_lock(hdev);
820 mgmt_powered(hdev, 0);
821 hci_dev_unlock(hdev);
822 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 /* Clear flags */
825 hdev->flags = 0;
826
Johan Hedberge59fda82012-02-22 18:11:53 +0200827 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200828 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200829
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 hci_req_unlock(hdev);
831
832 hci_dev_put(hdev);
833 return 0;
834}
835
836int hci_dev_close(__u16 dev)
837{
838 struct hci_dev *hdev;
839 int err;
840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200841 hdev = hci_dev_get(dev);
842 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700843 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100844
845 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846 cancel_delayed_work(&hdev->power_off);
847
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 hci_dev_put(hdev);
851 return err;
852}
853
854int hci_dev_reset(__u16 dev)
855{
856 struct hci_dev *hdev;
857 int ret = 0;
858
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200859 hdev = hci_dev_get(dev);
860 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 return -ENODEV;
862
863 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 if (!test_bit(HCI_UP, &hdev->flags))
866 goto done;
867
868 /* Drop queues */
869 skb_queue_purge(&hdev->rx_q);
870 skb_queue_purge(&hdev->cmd_q);
871
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300872 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 inquiry_cache_flush(hdev);
874 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300875 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877 if (hdev->flush)
878 hdev->flush(hdev);
879
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900880 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300881 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882
883 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200884 ret = __hci_request(hdev, hci_reset_req, 0,
885 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 hci_req_unlock(hdev);
889 hci_dev_put(hdev);
890 return ret;
891}
892
893int hci_dev_reset_stat(__u16 dev)
894{
895 struct hci_dev *hdev;
896 int ret = 0;
897
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200898 hdev = hci_dev_get(dev);
899 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900 return -ENODEV;
901
902 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904 hci_dev_put(hdev);
905
906 return ret;
907}
908
909int hci_dev_cmd(unsigned int cmd, void __user *arg)
910{
911 struct hci_dev *hdev;
912 struct hci_dev_req dr;
913 int err = 0;
914
915 if (copy_from_user(&dr, arg, sizeof(dr)))
916 return -EFAULT;
917
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200918 hdev = hci_dev_get(dr.dev_id);
919 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 return -ENODEV;
921
922 switch (cmd) {
923 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 break;
927
928 case HCISETENCRYPT:
929 if (!lmp_encrypt_capable(hdev)) {
930 err = -EOPNOTSUPP;
931 break;
932 }
933
934 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200936 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 if (err)
939 break;
940 }
941
Marcel Holtmann04837f62006-07-03 10:02:33 +0200942 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 break;
945
946 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200947 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 break;
950
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200951 case HCISETLINKPOL:
952 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953 msecs_to_jiffies(HCI_INIT_TIMEOUT));
954 break;
955
956 case HCISETLINKMODE:
957 hdev->link_mode = ((__u16) dr.dev_opt) &
958 (HCI_LM_MASTER | HCI_LM_ACCEPT);
959 break;
960
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 case HCISETPTYPE:
962 hdev->pkt_type = (__u16) dr.dev_opt;
963 break;
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
967 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 break;
969
970 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200971 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
972 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 break;
974
975 default:
976 err = -EINVAL;
977 break;
978 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200979
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 hci_dev_put(hdev);
981 return err;
982}
983
984int hci_get_dev_list(void __user *arg)
985{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200986 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 struct hci_dev_list_req *dl;
988 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 int n = 0, size, err;
990 __u16 dev_num;
991
992 if (get_user(dev_num, (__u16 __user *) arg))
993 return -EFAULT;
994
995 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996 return -EINVAL;
997
998 size = sizeof(*dl) + dev_num * sizeof(*dr);
999
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001000 dl = kzalloc(size, GFP_KERNEL);
1001 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 return -ENOMEM;
1003
1004 dr = dl->dev_req;
1005
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001006 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001007 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001008 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001009 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001010
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001011 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 (dr + n)->dev_id = hdev->id;
1015 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 if (++n >= dev_num)
1018 break;
1019 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001020 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021
1022 dl->dev_num = n;
1023 size = sizeof(*dl) + n * sizeof(*dr);
1024
1025 err = copy_to_user(arg, dl, size);
1026 kfree(dl);
1027
1028 return err ? -EFAULT : 0;
1029}
1030
1031int hci_get_dev_info(void __user *arg)
1032{
1033 struct hci_dev *hdev;
1034 struct hci_dev_info di;
1035 int err = 0;
1036
1037 if (copy_from_user(&di, arg, sizeof(di)))
1038 return -EFAULT;
1039
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001040 hdev = hci_dev_get(di.dev_id);
1041 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return -ENODEV;
1043
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001044 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001045 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001046
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001047 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001049
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050 strcpy(di.name, hdev->name);
1051 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001052 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 di.flags = hdev->flags;
1054 di.pkt_type = hdev->pkt_type;
1055 di.acl_mtu = hdev->acl_mtu;
1056 di.acl_pkts = hdev->acl_pkts;
1057 di.sco_mtu = hdev->sco_mtu;
1058 di.sco_pkts = hdev->sco_pkts;
1059 di.link_policy = hdev->link_policy;
1060 di.link_mode = hdev->link_mode;
1061
1062 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063 memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065 if (copy_to_user(arg, &di, sizeof(di)))
1066 err = -EFAULT;
1067
1068 hci_dev_put(hdev);
1069
1070 return err;
1071}
1072
1073/* ---- Interface to HCI drivers ---- */
1074
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001075static int hci_rfkill_set_block(void *data, bool blocked)
1076{
1077 struct hci_dev *hdev = data;
1078
1079 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081 if (!blocked)
1082 return 0;
1083
1084 hci_dev_do_close(hdev);
1085
1086 return 0;
1087}
1088
1089static const struct rfkill_ops hci_rfkill_ops = {
1090 .set_block = hci_rfkill_set_block,
1091};
1092
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093/* Alloc HCI device */
1094struct hci_dev *hci_alloc_dev(void)
1095{
1096 struct hci_dev *hdev;
1097
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001098 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 if (!hdev)
1100 return NULL;
1101
David Herrmann0ac7e702011-10-08 14:58:47 +02001102 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001103 skb_queue_head_init(&hdev->driver_init);
1104
1105 return hdev;
1106}
1107EXPORT_SYMBOL(hci_alloc_dev);
1108
1109/* Free HCI device */
1110void hci_free_dev(struct hci_dev *hdev)
1111{
1112 skb_queue_purge(&hdev->driver_init);
1113
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001114 /* will free via device release */
1115 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116}
1117EXPORT_SYMBOL(hci_free_dev);
1118
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001119static void hci_power_on(struct work_struct *work)
1120{
1121 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123 BT_DBG("%s", hdev->name);
1124
1125 if (hci_dev_open(hdev->id) < 0)
1126 return;
1127
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001128 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001129 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001130 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001131
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001132 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001133 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001134}
1135
1136static void hci_power_off(struct work_struct *work)
1137{
Johan Hedberg32435532011-11-07 22:16:04 +02001138 struct hci_dev *hdev = container_of(work, struct hci_dev,
1139 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001140
1141 BT_DBG("%s", hdev->name);
1142
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001143 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001144}
1145
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001146static void hci_discov_off(struct work_struct *work)
1147{
1148 struct hci_dev *hdev;
1149 u8 scan = SCAN_PAGE;
1150
1151 hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153 BT_DBG("%s", hdev->name);
1154
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001155 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001156
1157 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159 hdev->discov_timeout = 0;
1160
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001161 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001162}
1163
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001164int hci_uuids_clear(struct hci_dev *hdev)
1165{
1166 struct list_head *p, *n;
1167
1168 list_for_each_safe(p, n, &hdev->uuids) {
1169 struct bt_uuid *uuid;
1170
1171 uuid = list_entry(p, struct bt_uuid, list);
1172
1173 list_del(p);
1174 kfree(uuid);
1175 }
1176
1177 return 0;
1178}
1179
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001180int hci_link_keys_clear(struct hci_dev *hdev)
1181{
1182 struct list_head *p, *n;
1183
1184 list_for_each_safe(p, n, &hdev->link_keys) {
1185 struct link_key *key;
1186
1187 key = list_entry(p, struct link_key, list);
1188
1189 list_del(p);
1190 kfree(key);
1191 }
1192
1193 return 0;
1194}
1195
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001196int hci_smp_ltks_clear(struct hci_dev *hdev)
1197{
1198 struct smp_ltk *k, *tmp;
1199
1200 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201 list_del(&k->list);
1202 kfree(k);
1203 }
1204
1205 return 0;
1206}
1207
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001208struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001210 struct link_key *k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001211
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001212 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001213 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214 return k;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001215
1216 return NULL;
1217}
1218
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001219static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1220 u8 key_type, u8 old_key_type)
1221{
1222 /* Legacy key */
1223 if (key_type < 0x03)
1224 return 1;
1225
1226 /* Debug keys are insecure so don't store them persistently */
1227 if (key_type == HCI_LK_DEBUG_COMBINATION)
1228 return 0;
1229
1230 /* Changed combination key and there's no previous one */
1231 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1232 return 0;
1233
1234 /* Security mode 3 case */
1235 if (!conn)
1236 return 1;
1237
1238 /* Neither local nor remote side had no-bonding as requirement */
1239 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1240 return 1;
1241
1242 /* Local side had dedicated bonding as requirement */
1243 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1244 return 1;
1245
1246 /* Remote side had dedicated bonding as requirement */
1247 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1248 return 1;
1249
1250 /* If none of the above criteria match, then don't store the key
1251 * persistently */
1252 return 0;
1253}
1254
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001255struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 list_for_each_entry(k, &hdev->long_term_keys, list) {
1260 if (k->ediv != ediv ||
1261 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262 continue;
1263
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001264 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001265 }
1266
1267 return NULL;
1268}
1269EXPORT_SYMBOL(hci_find_ltk);
1270
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001271struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001272 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001274 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 list_for_each_entry(k, &hdev->long_term_keys, list)
1277 if (addr_type == k->bdaddr_type &&
1278 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279 return k;
1280
1281 return NULL;
1282}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001283EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001284
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001285int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001286 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001287{
1288 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001289 u8 old_key_type, persistent;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001290
1291 old_key = hci_find_link_key(hdev, bdaddr);
1292 if (old_key) {
1293 old_key_type = old_key->type;
1294 key = old_key;
1295 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001296 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001297 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298 if (!key)
1299 return -ENOMEM;
1300 list_add(&key->list, &hdev->link_keys);
1301 }
1302
1303 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001305 /* Some buggy controller combinations generate a changed
1306 * combination key for legacy pairing even when there's no
1307 * previous key */
1308 if (type == HCI_LK_CHANGED_COMBINATION &&
1309 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001310 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001311 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001312 if (conn)
1313 conn->key_type = type;
1314 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001315
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001316 bacpy(&key->bdaddr, bdaddr);
1317 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001318 key->pin_len = pin_len;
1319
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001320 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001321 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001322 else
1323 key->type = type;
1324
Johan Hedberg4df378a2011-04-28 11:29:03 -07001325 if (!new_key)
1326 return 0;
1327
1328 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
Johan Hedberg744cf192011-11-08 20:40:14 +02001330 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001331
1332 if (!persistent) {
1333 list_del(&key->list);
1334 kfree(key);
1335 }
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001336
1337 return 0;
1338}
1339
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001340int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001341 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1342 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001344 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001346 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1347 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001348
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001349 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1350 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001351 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001352 else {
1353 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001354 if (!key)
1355 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001356 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001357 }
1358
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001359 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001360 key->bdaddr_type = addr_type;
1361 memcpy(key->val, tk, sizeof(key->val));
1362 key->authenticated = authenticated;
1363 key->ediv = ediv;
1364 key->enc_size = enc_size;
1365 key->type = type;
1366 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001367
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001368 if (!new_key)
1369 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001370
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001371 if (type & HCI_SMP_LTK)
1372 mgmt_new_ltk(hdev, key, 1);
1373
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001374 return 0;
1375}
1376
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001377int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378{
1379 struct link_key *key;
1380
1381 key = hci_find_link_key(hdev, bdaddr);
1382 if (!key)
1383 return -ENOENT;
1384
1385 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1386
1387 list_del(&key->list);
1388 kfree(key);
1389
1390 return 0;
1391}
1392
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001393int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct smp_ltk *k, *tmp;
1396
1397 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1398 if (bacmp(bdaddr, &k->bdaddr))
1399 continue;
1400
1401 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403 list_del(&k->list);
1404 kfree(k);
1405 }
1406
1407 return 0;
1408}
1409
Ville Tervo6bd32322011-02-16 16:32:41 +02001410/* HCI command timer function */
1411static void hci_cmd_timer(unsigned long arg)
1412{
1413 struct hci_dev *hdev = (void *) arg;
1414
1415 BT_ERR("%s command tx timeout", hdev->name);
1416 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001417 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001418}
1419
Szymon Janc2763eda2011-03-22 13:12:22 +01001420struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001421 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001422{
1423 struct oob_data *data;
1424
1425 list_for_each_entry(data, &hdev->remote_oob_data, list)
1426 if (bacmp(bdaddr, &data->bdaddr) == 0)
1427 return data;
1428
1429 return NULL;
1430}
1431
1432int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1433{
1434 struct oob_data *data;
1435
1436 data = hci_find_remote_oob_data(hdev, bdaddr);
1437 if (!data)
1438 return -ENOENT;
1439
1440 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1441
1442 list_del(&data->list);
1443 kfree(data);
1444
1445 return 0;
1446}
1447
1448int hci_remote_oob_data_clear(struct hci_dev *hdev)
1449{
1450 struct oob_data *data, *n;
1451
1452 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1453 list_del(&data->list);
1454 kfree(data);
1455 }
1456
1457 return 0;
1458}
1459
1460int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001461 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001462{
1463 struct oob_data *data;
1464
1465 data = hci_find_remote_oob_data(hdev, bdaddr);
1466
1467 if (!data) {
1468 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1469 if (!data)
1470 return -ENOMEM;
1471
1472 bacpy(&data->bdaddr, bdaddr);
1473 list_add(&data->list, &hdev->remote_oob_data);
1474 }
1475
1476 memcpy(data->hash, hash, sizeof(data->hash));
1477 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1478
1479 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1480
1481 return 0;
1482}
1483
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001484struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001486 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001487
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001488 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489 if (bacmp(bdaddr, &b->bdaddr) == 0)
1490 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491
1492 return NULL;
1493}
1494
1495int hci_blacklist_clear(struct hci_dev *hdev)
1496{
1497 struct list_head *p, *n;
1498
1499 list_for_each_safe(p, n, &hdev->blacklist) {
1500 struct bdaddr_list *b;
1501
1502 b = list_entry(p, struct bdaddr_list, list);
1503
1504 list_del(p);
1505 kfree(b);
1506 }
1507
1508 return 0;
1509}
1510
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001511int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512{
1513 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514
1515 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1516 return -EBADF;
1517
Antti Julku5e762442011-08-25 16:48:02 +03001518 if (hci_blacklist_lookup(hdev, bdaddr))
1519 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001520
1521 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001522 if (!entry)
1523 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001524
1525 bacpy(&entry->bdaddr, bdaddr);
1526
1527 list_add(&entry->list, &hdev->blacklist);
1528
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001529 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001530}
1531
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001532int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001533{
1534 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001535
Szymon Janc1ec918c2011-11-16 09:32:21 +01001536 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001537 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001538
1539 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001540 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001541 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001542
1543 list_del(&entry->list);
1544 kfree(entry);
1545
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001546 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001547}
1548
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001549static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001550{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001551 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001552 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001553
1554 hci_dev_lock(hdev);
1555
1556 hci_adv_entries_clear(hdev);
1557
1558 hci_dev_unlock(hdev);
1559}
1560
Andre Guedes76c86862011-05-26 16:23:50 -03001561int hci_adv_entries_clear(struct hci_dev *hdev)
1562{
1563 struct adv_entry *entry, *tmp;
1564
1565 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1566 list_del(&entry->list);
1567 kfree(entry);
1568 }
1569
1570 BT_DBG("%s adv cache cleared", hdev->name);
1571
1572 return 0;
1573}
1574
1575struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1576{
1577 struct adv_entry *entry;
1578
1579 list_for_each_entry(entry, &hdev->adv_entries, list)
1580 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1581 return entry;
1582
1583 return NULL;
1584}
1585
1586static inline int is_connectable_adv(u8 evt_type)
1587{
1588 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1589 return 1;
1590
1591 return 0;
1592}
1593
1594int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001595 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001596 return -EINVAL;
1597
1598 /* Only new entries should be added to adv_entries. So, if
1599 * bdaddr was found, don't add it. */
1600 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1601 return 0;
1602
Andre Guedes4777bfd2012-01-30 23:31:28 -03001603 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001604 if (!entry)
1605 return -ENOMEM;
1606
1607 bacpy(&entry->bdaddr, &ev->bdaddr);
1608 entry->bdaddr_type = ev->bdaddr_type;
1609
1610 list_add(&entry->list, &hdev->adv_entries);
1611
1612 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1613 batostr(&entry->bdaddr), entry->bdaddr_type);
1614
1615 return 0;
1616}
1617
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001618static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1619{
1620 struct le_scan_params *param = (struct le_scan_params *) opt;
1621 struct hci_cp_le_set_scan_param cp;
1622
1623 memset(&cp, 0, sizeof(cp));
1624 cp.type = param->type;
1625 cp.interval = cpu_to_le16(param->interval);
1626 cp.window = cpu_to_le16(param->window);
1627
1628 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1629}
1630
1631static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1632{
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 memset(&cp, 0, sizeof(cp));
1636 cp.enable = 1;
1637
1638 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639}
1640
1641static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001642 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001643{
1644 long timeo = msecs_to_jiffies(3000);
1645 struct le_scan_params param;
1646 int err;
1647
1648 BT_DBG("%s", hdev->name);
1649
1650 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1651 return -EINPROGRESS;
1652
1653 param.type = type;
1654 param.interval = interval;
1655 param.window = window;
1656
1657 hci_req_lock(hdev);
1658
1659 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001660 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001661 if (!err)
1662 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1663
1664 hci_req_unlock(hdev);
1665
1666 if (err < 0)
1667 return err;
1668
1669 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001670 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001671
1672 return 0;
1673}
1674
1675static void le_scan_disable_work(struct work_struct *work)
1676{
1677 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001678 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001679 struct hci_cp_le_set_scan_enable cp;
1680
1681 BT_DBG("%s", hdev->name);
1682
1683 memset(&cp, 0, sizeof(cp));
1684
1685 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686}
1687
Andre Guedes28b75a82012-02-03 17:48:00 -03001688static void le_scan_work(struct work_struct *work)
1689{
1690 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1691 struct le_scan_params *param = &hdev->le_scan_params;
1692
1693 BT_DBG("%s", hdev->name);
1694
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001695 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1696 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001697}
1698
1699int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001700 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001701{
1702 struct le_scan_params *param = &hdev->le_scan_params;
1703
1704 BT_DBG("%s", hdev->name);
1705
1706 if (work_busy(&hdev->le_scan))
1707 return -EINPROGRESS;
1708
1709 param->type = type;
1710 param->interval = interval;
1711 param->window = window;
1712 param->timeout = timeout;
1713
1714 queue_work(system_long_wq, &hdev->le_scan);
1715
1716 return 0;
1717}
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719/* Register HCI device */
1720int hci_register_dev(struct hci_dev *hdev)
1721{
1722 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001723 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
David Herrmann010666a2012-01-07 15:47:07 +01001727 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 return -EINVAL;
1729
Mat Martineau08add512011-11-02 16:18:36 -07001730 /* Do not allow HCI_AMP devices to register at index 0,
1731 * so the index can be used as the AMP controller ID.
1732 */
1733 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1734
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001735 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736
1737 /* Find first available device id */
1738 list_for_each(p, &hci_dev_list) {
1739 if (list_entry(p, struct hci_dev, list)->id != id)
1740 break;
1741 head = p; id++;
1742 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001743
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744 sprintf(hdev->name, "hci%d", id);
1745 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001746 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001748 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001751 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f99092007-07-11 09:51:55 +02001753 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001755 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756
Marcel Holtmann04837f62006-07-03 10:02:33 +02001757 hdev->idle_timeout = 0;
1758 hdev->sniff_max_interval = 800;
1759 hdev->sniff_min_interval = 80;
1760
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001761 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001762 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001763 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765
1766 skb_queue_head_init(&hdev->rx_q);
1767 skb_queue_head_init(&hdev->cmd_q);
1768 skb_queue_head_init(&hdev->raw_q);
1769
Ville Tervo6bd32322011-02-16 16:32:41 +02001770 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1771
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301772 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001773 hdev->reassembly[i] = NULL;
1774
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001776 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
Johan Hedberg30883512012-01-04 14:16:21 +02001778 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779
1780 hci_conn_hash_init(hdev);
1781
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001782 INIT_LIST_HEAD(&hdev->mgmt_pending);
1783
David Millerea4bd8b2010-07-30 21:54:49 -07001784 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001785
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001786 INIT_LIST_HEAD(&hdev->uuids);
1787
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001788 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001789 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001790
Szymon Janc2763eda2011-03-22 13:12:22 +01001791 INIT_LIST_HEAD(&hdev->remote_oob_data);
1792
Andre Guedes76c86862011-05-26 16:23:50 -03001793 INIT_LIST_HEAD(&hdev->adv_entries);
1794
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001795 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001797 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001798
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001799 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
1803 atomic_set(&hdev->promisc, 0);
1804
Andre Guedes28b75a82012-02-03 17:48:00 -03001805 INIT_WORK(&hdev->le_scan, le_scan_work);
1806
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001807 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1808
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001809 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001811 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1812 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001813 if (!hdev->workqueue) {
1814 error = -ENOMEM;
1815 goto err;
1816 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001817
David Herrmann33ca9542011-10-08 14:58:49 +02001818 error = hci_add_sysfs(hdev);
1819 if (error < 0)
1820 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001822 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1823 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1824 if (hdev->rfkill) {
1825 if (rfkill_register(hdev->rfkill) < 0) {
1826 rfkill_destroy(hdev->rfkill);
1827 hdev->rfkill = NULL;
1828 }
1829 }
1830
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001831 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1832 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001833 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001834
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001836 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837
1838 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001839
David Herrmann33ca9542011-10-08 14:58:49 +02001840err_wqueue:
1841 destroy_workqueue(hdev->workqueue);
1842err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001843 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001844 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001845 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001846
David Herrmann33ca9542011-10-08 14:58:49 +02001847 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848}
1849EXPORT_SYMBOL(hci_register_dev);
1850
1851/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001852void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853{
Marcel Holtmannef222012007-07-11 06:42:04 +02001854 int i;
1855
Marcel Holtmannc13854ce2010-02-08 15:27:07 +01001856 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001857
Johan Hovold94324962012-03-15 14:48:41 +01001858 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1859
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001860 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001862 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001863
1864 hci_dev_do_close(hdev);
1865
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301866 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001867 kfree_skb(hdev->reassembly[i]);
1868
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001869 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001870 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001871 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001872 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001873 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001874 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001875
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001876 /* mgmt_index_removed should take care of emptying the
1877 * pending list */
1878 BUG_ON(!list_empty(&hdev->mgmt_pending));
1879
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 hci_notify(hdev, HCI_DEV_UNREG);
1881
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001882 if (hdev->rfkill) {
1883 rfkill_unregister(hdev->rfkill);
1884 rfkill_destroy(hdev->rfkill);
1885 }
1886
David Herrmannce242972011-10-08 14:58:48 +02001887 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001888
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001889 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001890
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001891 destroy_workqueue(hdev->workqueue);
1892
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001893 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001894 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001895 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca2011-01-17 14:41:05 +02001896 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001897 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001898 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001899 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001900 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001901
David Herrmanndc946bd2012-01-07 15:47:24 +01001902 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001903}
1904EXPORT_SYMBOL(hci_unregister_dev);
1905
1906/* Suspend HCI device */
1907int hci_suspend_dev(struct hci_dev *hdev)
1908{
1909 hci_notify(hdev, HCI_DEV_SUSPEND);
1910 return 0;
1911}
1912EXPORT_SYMBOL(hci_suspend_dev);
1913
1914/* Resume HCI device */
1915int hci_resume_dev(struct hci_dev *hdev)
1916{
1917 hci_notify(hdev, HCI_DEV_RESUME);
1918 return 0;
1919}
1920EXPORT_SYMBOL(hci_resume_dev);
1921
Marcel Holtmann76bca882009-11-18 00:40:39 +01001922/* Receive frame from HCI drivers */
1923int hci_recv_frame(struct sk_buff *skb)
1924{
1925 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1926 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1927 && !test_bit(HCI_INIT, &hdev->flags))) {
1928 kfree_skb(skb);
1929 return -ENXIO;
1930 }
1931
1932 /* Incomming skb */
1933 bt_cb(skb)->incoming = 1;
1934
1935 /* Time stamp */
1936 __net_timestamp(skb);
1937
Marcel Holtmann76bca882009-11-18 00:40:39 +01001938 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001939 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001940
Marcel Holtmann76bca882009-11-18 00:40:39 +01001941 return 0;
1942}
1943EXPORT_SYMBOL(hci_recv_frame);
1944
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301945static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001946 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301947{
1948 int len = 0;
1949 int hlen = 0;
1950 int remain = count;
1951 struct sk_buff *skb;
1952 struct bt_skb_cb *scb;
1953
1954 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1955 index >= NUM_REASSEMBLY)
1956 return -EILSEQ;
1957
1958 skb = hdev->reassembly[index];
1959
1960 if (!skb) {
1961 switch (type) {
1962 case HCI_ACLDATA_PKT:
1963 len = HCI_MAX_FRAME_SIZE;
1964 hlen = HCI_ACL_HDR_SIZE;
1965 break;
1966 case HCI_EVENT_PKT:
1967 len = HCI_MAX_EVENT_SIZE;
1968 hlen = HCI_EVENT_HDR_SIZE;
1969 break;
1970 case HCI_SCODATA_PKT:
1971 len = HCI_MAX_SCO_SIZE;
1972 hlen = HCI_SCO_HDR_SIZE;
1973 break;
1974 }
1975
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001976 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301977 if (!skb)
1978 return -ENOMEM;
1979
1980 scb = (void *) skb->cb;
1981 scb->expect = hlen;
1982 scb->pkt_type = type;
1983
1984 skb->dev = (void *) hdev;
1985 hdev->reassembly[index] = skb;
1986 }
1987
1988 while (count) {
1989 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001990 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301991
1992 memcpy(skb_put(skb, len), data, len);
1993
1994 count -= len;
1995 data += len;
1996 scb->expect -= len;
1997 remain = count;
1998
1999 switch (type) {
2000 case HCI_EVENT_PKT:
2001 if (skb->len == HCI_EVENT_HDR_SIZE) {
2002 struct hci_event_hdr *h = hci_event_hdr(skb);
2003 scb->expect = h->plen;
2004
2005 if (skb_tailroom(skb) < scb->expect) {
2006 kfree_skb(skb);
2007 hdev->reassembly[index] = NULL;
2008 return -ENOMEM;
2009 }
2010 }
2011 break;
2012
2013 case HCI_ACLDATA_PKT:
2014 if (skb->len == HCI_ACL_HDR_SIZE) {
2015 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2016 scb->expect = __le16_to_cpu(h->dlen);
2017
2018 if (skb_tailroom(skb) < scb->expect) {
2019 kfree_skb(skb);
2020 hdev->reassembly[index] = NULL;
2021 return -ENOMEM;
2022 }
2023 }
2024 break;
2025
2026 case HCI_SCODATA_PKT:
2027 if (skb->len == HCI_SCO_HDR_SIZE) {
2028 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2029 scb->expect = h->dlen;
2030
2031 if (skb_tailroom(skb) < scb->expect) {
2032 kfree_skb(skb);
2033 hdev->reassembly[index] = NULL;
2034 return -ENOMEM;
2035 }
2036 }
2037 break;
2038 }
2039
2040 if (scb->expect == 0) {
2041 /* Complete frame */
2042
2043 bt_cb(skb)->pkt_type = type;
2044 hci_recv_frame(skb);
2045
2046 hdev->reassembly[index] = NULL;
2047 return remain;
2048 }
2049 }
2050
2051 return remain;
2052}
2053
Marcel Holtmannef222012007-07-11 06:42:04 +02002054int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2055{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302056 int rem = 0;
2057
Marcel Holtmannef222012007-07-11 06:42:04 +02002058 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2059 return -EILSEQ;
2060
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002061 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002062 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302063 if (rem < 0)
2064 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002065
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302066 data += (count - rem);
2067 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002068 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002069
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302070 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002071}
2072EXPORT_SYMBOL(hci_recv_fragment);
2073
Suraj Sumangala99811512010-07-14 13:02:19 +05302074#define STREAM_REASSEMBLY 0
2075
2076int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2077{
2078 int type;
2079 int rem = 0;
2080
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002081 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302082 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2083
2084 if (!skb) {
2085 struct { char type; } *pkt;
2086
2087 /* Start of the frame */
2088 pkt = data;
2089 type = pkt->type;
2090
2091 data++;
2092 count--;
2093 } else
2094 type = bt_cb(skb)->pkt_type;
2095
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002096 rem = hci_reassembly(hdev, type, data, count,
2097 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302098 if (rem < 0)
2099 return rem;
2100
2101 data += (count - rem);
2102 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002103 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302104
2105 return rem;
2106}
2107EXPORT_SYMBOL(hci_recv_stream_fragment);
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109/* ---- Interface to upper protocols ---- */
2110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111int hci_register_cb(struct hci_cb *cb)
2112{
2113 BT_DBG("%p name %s", cb, cb->name);
2114
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002115 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002117 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
2119 return 0;
2120}
2121EXPORT_SYMBOL(hci_register_cb);
2122
2123int hci_unregister_cb(struct hci_cb *cb)
2124{
2125 BT_DBG("%p name %s", cb, cb->name);
2126
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002127 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002129 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 return 0;
2132}
2133EXPORT_SYMBOL(hci_unregister_cb);
2134
2135static int hci_send_frame(struct sk_buff *skb)
2136{
2137 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2138
2139 if (!hdev) {
2140 kfree_skb(skb);
2141 return -ENODEV;
2142 }
2143
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002144 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002146 /* Time stamp */
2147 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002149 /* Send copy to monitor */
2150 hci_send_to_monitor(hdev, skb);
2151
2152 if (atomic_read(&hdev->promisc)) {
2153 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002154 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155 }
2156
2157 /* Get rid of skb owner, prior to sending to the driver. */
2158 skb_orphan(skb);
2159
2160 return hdev->send(skb);
2161}
2162
2163/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002164int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165{
2166 int len = HCI_COMMAND_HDR_SIZE + plen;
2167 struct hci_command_hdr *hdr;
2168 struct sk_buff *skb;
2169
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002170 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002171
2172 skb = bt_skb_alloc(len, GFP_ATOMIC);
2173 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002174 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 return -ENOMEM;
2176 }
2177
2178 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002179 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 hdr->plen = plen;
2181
2182 if (plen)
2183 memcpy(skb_put(skb, plen), param, plen);
2184
2185 BT_DBG("skb len %d", skb->len);
2186
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002187 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002189
Johan Hedberga5040ef2011-01-10 13:28:59 +02002190 if (test_bit(HCI_INIT, &hdev->flags))
2191 hdev->init_last_cmd = opcode;
2192
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002194 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196 return 0;
2197}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002200void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201{
2202 struct hci_command_hdr *hdr;
2203
2204 if (!hdev->sent_cmd)
2205 return NULL;
2206
2207 hdr = (void *) hdev->sent_cmd->data;
2208
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002209 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210 return NULL;
2211
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002212 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
2214 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2215}
2216
2217/* Send ACL data */
2218static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2219{
2220 struct hci_acl_hdr *hdr;
2221 int len = skb->len;
2222
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002223 skb_push(skb, HCI_ACL_HDR_SIZE);
2224 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002225 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002226 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2227 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228}
2229
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002230static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2231 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232{
2233 struct hci_dev *hdev = conn->hdev;
2234 struct sk_buff *list;
2235
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002236 list = skb_shinfo(skb)->frag_list;
2237 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238 /* Non fragmented */
2239 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2240
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002241 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 } else {
2243 /* Fragmented */
2244 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245
2246 skb_shinfo(skb)->frag_list = NULL;
2247
2248 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002249 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002251 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002252
2253 flags &= ~ACL_START;
2254 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 do {
2256 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002257
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002259 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002260 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261
2262 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2263
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002264 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 } while (list);
2266
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002267 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002269}
2270
2271void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2272{
2273 struct hci_conn *conn = chan->conn;
2274 struct hci_dev *hdev = conn->hdev;
2275
2276 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2277
2278 skb->dev = (void *) hdev;
2279 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2280 hci_add_acl_hdr(skb, conn->handle, flags);
2281
2282 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002284 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
2286EXPORT_SYMBOL(hci_send_acl);
2287
2288/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002289void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290{
2291 struct hci_dev *hdev = conn->hdev;
2292 struct hci_sco_hdr hdr;
2293
2294 BT_DBG("%s len %d", hdev->name, skb->len);
2295
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002296 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 hdr.dlen = skb->len;
2298
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002299 skb_push(skb, HCI_SCO_HDR_SIZE);
2300 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002301 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002304 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002305
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002307 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308}
2309EXPORT_SYMBOL(hci_send_sco);
2310
2311/* ---- HCI TX task (outgoing data) ---- */
2312
2313/* HCI Connection scheduler */
2314static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2315{
2316 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002317 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002320 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002322
2323 rcu_read_lock();
2324
2325 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002326 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002328
2329 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2330 continue;
2331
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332 num++;
2333
2334 if (c->sent < min) {
2335 min = c->sent;
2336 conn = c;
2337 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002338
2339 if (hci_conn_num(hdev, type) == num)
2340 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002341 }
2342
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002343 rcu_read_unlock();
2344
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002346 int cnt, q;
2347
2348 switch (conn->type) {
2349 case ACL_LINK:
2350 cnt = hdev->acl_cnt;
2351 break;
2352 case SCO_LINK:
2353 case ESCO_LINK:
2354 cnt = hdev->sco_cnt;
2355 break;
2356 case LE_LINK:
2357 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2358 break;
2359 default:
2360 cnt = 0;
2361 BT_ERR("Unknown link type");
2362 }
2363
2364 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 *quote = q ? q : 1;
2366 } else
2367 *quote = 0;
2368
2369 BT_DBG("conn %p quote %d", conn, *quote);
2370 return conn;
2371}
2372
Ville Tervobae1f5d2011-02-10 22:38:53 -03002373static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374{
2375 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002376 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
Ville Tervobae1f5d2011-02-10 22:38:53 -03002378 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002379
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002380 rcu_read_lock();
2381
Linus Torvalds1da177e2005-04-16 15:20:36 -07002382 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002383 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d2011-02-10 22:38:53 -03002384 if (c->type == type && c->sent) {
2385 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002386 hdev->name, batostr(&c->dst));
2387 hci_acl_disconn(c, 0x13);
2388 }
2389 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002390
2391 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392}
2393
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002394static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2395 int *quote)
2396{
2397 struct hci_conn_hash *h = &hdev->conn_hash;
2398 struct hci_chan *chan = NULL;
2399 int num = 0, min = ~0, cur_prio = 0;
2400 struct hci_conn *conn;
2401 int cnt, q, conn_num = 0;
2402
2403 BT_DBG("%s", hdev->name);
2404
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002405 rcu_read_lock();
2406
2407 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002408 struct hci_chan *tmp;
2409
2410 if (conn->type != type)
2411 continue;
2412
2413 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2414 continue;
2415
2416 conn_num++;
2417
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002418 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002419 struct sk_buff *skb;
2420
2421 if (skb_queue_empty(&tmp->data_q))
2422 continue;
2423
2424 skb = skb_peek(&tmp->data_q);
2425 if (skb->priority < cur_prio)
2426 continue;
2427
2428 if (skb->priority > cur_prio) {
2429 num = 0;
2430 min = ~0;
2431 cur_prio = skb->priority;
2432 }
2433
2434 num++;
2435
2436 if (conn->sent < min) {
2437 min = conn->sent;
2438 chan = tmp;
2439 }
2440 }
2441
2442 if (hci_conn_num(hdev, type) == conn_num)
2443 break;
2444 }
2445
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002446 rcu_read_unlock();
2447
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002448 if (!chan)
2449 return NULL;
2450
2451 switch (chan->conn->type) {
2452 case ACL_LINK:
2453 cnt = hdev->acl_cnt;
2454 break;
2455 case SCO_LINK:
2456 case ESCO_LINK:
2457 cnt = hdev->sco_cnt;
2458 break;
2459 case LE_LINK:
2460 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2461 break;
2462 default:
2463 cnt = 0;
2464 BT_ERR("Unknown link type");
2465 }
2466
2467 q = cnt / num;
2468 *quote = q ? q : 1;
2469 BT_DBG("chan %p quote %d", chan, *quote);
2470 return chan;
2471}
2472
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002473static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2474{
2475 struct hci_conn_hash *h = &hdev->conn_hash;
2476 struct hci_conn *conn;
2477 int num = 0;
2478
2479 BT_DBG("%s", hdev->name);
2480
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002481 rcu_read_lock();
2482
2483 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002484 struct hci_chan *chan;
2485
2486 if (conn->type != type)
2487 continue;
2488
2489 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2490 continue;
2491
2492 num++;
2493
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002494 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002495 struct sk_buff *skb;
2496
2497 if (chan->sent) {
2498 chan->sent = 0;
2499 continue;
2500 }
2501
2502 if (skb_queue_empty(&chan->data_q))
2503 continue;
2504
2505 skb = skb_peek(&chan->data_q);
2506 if (skb->priority >= HCI_PRIO_MAX - 1)
2507 continue;
2508
2509 skb->priority = HCI_PRIO_MAX - 1;
2510
2511 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2512 skb->priority);
2513 }
2514
2515 if (hci_conn_num(hdev, type) == num)
2516 break;
2517 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002518
2519 rcu_read_unlock();
2520
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002521}
2522
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002523static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2524{
2525 /* Calculate count of blocks used by this packet */
2526 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2527}
2528
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002529static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531 if (!test_bit(HCI_RAW, &hdev->flags)) {
2532 /* ACL tx timeout must be longer than maximum
2533 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002534 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002535 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002536 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002538}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002540static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2541{
2542 unsigned int cnt = hdev->acl_cnt;
2543 struct hci_chan *chan;
2544 struct sk_buff *skb;
2545 int quote;
2546
2547 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002548
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002549 while (hdev->acl_cnt &&
2550 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002551 u32 priority = (skb_peek(&chan->data_q))->priority;
2552 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002553 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2554 skb->len, skb->priority);
2555
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002556 /* Stop if priority has changed */
2557 if (skb->priority < priority)
2558 break;
2559
2560 skb = skb_dequeue(&chan->data_q);
2561
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002562 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002563 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565 hci_send_frame(skb);
2566 hdev->acl_last_tx = jiffies;
2567
2568 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002569 chan->sent++;
2570 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571 }
2572 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002573
2574 if (cnt != hdev->acl_cnt)
2575 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576}
2577
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002578static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2579{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002580 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002581 struct hci_chan *chan;
2582 struct sk_buff *skb;
2583 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002584
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002585 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002586
2587 while (hdev->block_cnt > 0 &&
2588 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2589 u32 priority = (skb_peek(&chan->data_q))->priority;
2590 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2591 int blocks;
2592
2593 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2594 skb->len, skb->priority);
2595
2596 /* Stop if priority has changed */
2597 if (skb->priority < priority)
2598 break;
2599
2600 skb = skb_dequeue(&chan->data_q);
2601
2602 blocks = __get_blocks(hdev, skb);
2603 if (blocks > hdev->block_cnt)
2604 return;
2605
2606 hci_conn_enter_active_mode(chan->conn,
2607 bt_cb(skb)->force_active);
2608
2609 hci_send_frame(skb);
2610 hdev->acl_last_tx = jiffies;
2611
2612 hdev->block_cnt -= blocks;
2613 quote -= blocks;
2614
2615 chan->sent += blocks;
2616 chan->conn->sent += blocks;
2617 }
2618 }
2619
2620 if (cnt != hdev->block_cnt)
2621 hci_prio_recalculate(hdev, ACL_LINK);
2622}
2623
2624static inline void hci_sched_acl(struct hci_dev *hdev)
2625{
2626 BT_DBG("%s", hdev->name);
2627
2628 if (!hci_conn_num(hdev, ACL_LINK))
2629 return;
2630
2631 switch (hdev->flow_ctl_mode) {
2632 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2633 hci_sched_acl_pkt(hdev);
2634 break;
2635
2636 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2637 hci_sched_acl_blk(hdev);
2638 break;
2639 }
2640}
2641
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642/* Schedule SCO */
2643static inline void hci_sched_sco(struct hci_dev *hdev)
2644{
2645 struct hci_conn *conn;
2646 struct sk_buff *skb;
2647 int quote;
2648
2649 BT_DBG("%s", hdev->name);
2650
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002651 if (!hci_conn_num(hdev, SCO_LINK))
2652 return;
2653
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2655 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2656 BT_DBG("skb %p len %d", skb, skb->len);
2657 hci_send_frame(skb);
2658
2659 conn->sent++;
2660 if (conn->sent == ~0)
2661 conn->sent = 0;
2662 }
2663 }
2664}
2665
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002666static inline void hci_sched_esco(struct hci_dev *hdev)
2667{
2668 struct hci_conn *conn;
2669 struct sk_buff *skb;
2670 int quote;
2671
2672 BT_DBG("%s", hdev->name);
2673
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002674 if (!hci_conn_num(hdev, ESCO_LINK))
2675 return;
2676
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002677 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2678 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2679 BT_DBG("skb %p len %d", skb, skb->len);
2680 hci_send_frame(skb);
2681
2682 conn->sent++;
2683 if (conn->sent == ~0)
2684 conn->sent = 0;
2685 }
2686 }
2687}
2688
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002689static inline void hci_sched_le(struct hci_dev *hdev)
2690{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002691 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002692 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002693 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002694
2695 BT_DBG("%s", hdev->name);
2696
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002697 if (!hci_conn_num(hdev, LE_LINK))
2698 return;
2699
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002700 if (!test_bit(HCI_RAW, &hdev->flags)) {
2701 /* LE tx timeout must be longer than maximum
2702 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d2011-02-10 22:38:53 -03002703 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d2011-02-10 22:38:53 -03002705 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002706 }
2707
2708 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002709 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002710 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002711 u32 priority = (skb_peek(&chan->data_q))->priority;
2712 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002713 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2714 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002715
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002716 /* Stop if priority has changed */
2717 if (skb->priority < priority)
2718 break;
2719
2720 skb = skb_dequeue(&chan->data_q);
2721
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002722 hci_send_frame(skb);
2723 hdev->le_last_tx = jiffies;
2724
2725 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002726 chan->sent++;
2727 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002728 }
2729 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002730
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002731 if (hdev->le_pkts)
2732 hdev->le_cnt = cnt;
2733 else
2734 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002735
2736 if (cnt != tmp)
2737 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002738}
2739
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002740static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002742 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002743 struct sk_buff *skb;
2744
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002745 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2746 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
2748 /* Schedule queues and send stuff to HCI driver */
2749
2750 hci_sched_acl(hdev);
2751
2752 hci_sched_sco(hdev);
2753
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002754 hci_sched_esco(hdev);
2755
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002756 hci_sched_le(hdev);
2757
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 /* Send next queued raw (unknown type) packet */
2759 while ((skb = skb_dequeue(&hdev->raw_q)))
2760 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761}
2762
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002763/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764
2765/* ACL data packet */
2766static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2767{
2768 struct hci_acl_hdr *hdr = (void *) skb->data;
2769 struct hci_conn *conn;
2770 __u16 handle, flags;
2771
2772 skb_pull(skb, HCI_ACL_HDR_SIZE);
2773
2774 handle = __le16_to_cpu(hdr->handle);
2775 flags = hci_flags(handle);
2776 handle = hci_handle(handle);
2777
2778 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2779
2780 hdev->stat.acl_rx++;
2781
2782 hci_dev_lock(hdev);
2783 conn = hci_conn_hash_lookup_handle(hdev, handle);
2784 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002787 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002788
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002790 l2cap_recv_acldata(conn, skb, flags);
2791 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002793 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 hdev->name, handle);
2795 }
2796
2797 kfree_skb(skb);
2798}
2799
2800/* SCO data packet */
2801static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2802{
2803 struct hci_sco_hdr *hdr = (void *) skb->data;
2804 struct hci_conn *conn;
2805 __u16 handle;
2806
2807 skb_pull(skb, HCI_SCO_HDR_SIZE);
2808
2809 handle = __le16_to_cpu(hdr->handle);
2810
2811 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2812
2813 hdev->stat.sco_rx++;
2814
2815 hci_dev_lock(hdev);
2816 conn = hci_conn_hash_lookup_handle(hdev, handle);
2817 hci_dev_unlock(hdev);
2818
2819 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002821 sco_recv_scodata(conn, skb);
2822 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002824 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 hdev->name, handle);
2826 }
2827
2828 kfree_skb(skb);
2829}
2830
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002831static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002833 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 struct sk_buff *skb;
2835
2836 BT_DBG("%s", hdev->name);
2837
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002839 /* Send copy to monitor */
2840 hci_send_to_monitor(hdev, skb);
2841
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 if (atomic_read(&hdev->promisc)) {
2843 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002844 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 }
2846
2847 if (test_bit(HCI_RAW, &hdev->flags)) {
2848 kfree_skb(skb);
2849 continue;
2850 }
2851
2852 if (test_bit(HCI_INIT, &hdev->flags)) {
2853 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002854 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 case HCI_ACLDATA_PKT:
2856 case HCI_SCODATA_PKT:
2857 kfree_skb(skb);
2858 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002859 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 }
2861
2862 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002863 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002865 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 hci_event_packet(hdev, skb);
2867 break;
2868
2869 case HCI_ACLDATA_PKT:
2870 BT_DBG("%s ACL data packet", hdev->name);
2871 hci_acldata_packet(hdev, skb);
2872 break;
2873
2874 case HCI_SCODATA_PKT:
2875 BT_DBG("%s SCO data packet", hdev->name);
2876 hci_scodata_packet(hdev, skb);
2877 break;
2878
2879 default:
2880 kfree_skb(skb);
2881 break;
2882 }
2883 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884}
2885
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002886static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002888 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 struct sk_buff *skb;
2890
2891 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2892
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002894 if (atomic_read(&hdev->cmd_cnt)) {
2895 skb = skb_dequeue(&hdev->cmd_q);
2896 if (!skb)
2897 return;
2898
Wei Yongjun7585b972009-02-25 18:29:52 +08002899 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002901 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2902 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903 atomic_dec(&hdev->cmd_cnt);
2904 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002905 if (test_bit(HCI_RESET, &hdev->flags))
2906 del_timer(&hdev->cmd_timer);
2907 else
2908 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002909 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 } else {
2911 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002912 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 }
2914 }
2915}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002916
2917int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2918{
2919 /* General inquiry access code (GIAC) */
2920 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2921 struct hci_cp_inquiry cp;
2922
2923 BT_DBG("%s", hdev->name);
2924
2925 if (test_bit(HCI_INQUIRY, &hdev->flags))
2926 return -EINPROGRESS;
2927
Johan Hedberg46632622012-01-02 16:06:08 +02002928 inquiry_cache_flush(hdev);
2929
Andre Guedes2519a1f2011-11-07 11:45:24 -03002930 memset(&cp, 0, sizeof(cp));
2931 memcpy(&cp.lap, lap, sizeof(cp.lap));
2932 cp.length = length;
2933
2934 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2935}
Andre Guedes023d5042011-11-04 14:16:52 -03002936
2937int hci_cancel_inquiry(struct hci_dev *hdev)
2938{
2939 BT_DBG("%s", hdev->name);
2940
2941 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2942 return -EPERM;
2943
2944 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2945}