blob: d6dc44cd15b0729a90bc8963ae90182353ed7e10 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 struct sk_buff *skb;
87
88 /* Some CSR based controllers generate a spontaneous
89 * reset complete event during init and any pending
90 * command will never be completed. In such a case we
91 * need to resend whatever was the last sent
92 * command.
93 */
94
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96 return;
97
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99 if (skb) {
100 skb_queue_head(&hdev->cmd_q, skb);
101 queue_work(hdev->workqueue, &hdev->cmd_work);
102 }
103
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200193static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200199 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /* Mandatory initialization */
202
203 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200205 set_bit(HCI_RESET, &hdev->flags);
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220
221 /* Read Class of Device */
222 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223
224 /* Read Local Name */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Optional initialization */
231
232 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200233 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200234 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200238 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200239
240 bacpy(&cp.bdaddr, BDADDR_ANY);
241 cp.delete_all = 1;
242 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200245static void amp_init(struct hci_dev *hdev)
246{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200247 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200249 /* Reset */
250 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251
252 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254}
255
256static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257{
258 struct sk_buff *skb;
259
260 BT_DBG("%s %ld", hdev->name, opt);
261
262 /* Driver initialization */
263
264 /* Special commands */
265 while ((skb = skb_dequeue(&hdev->driver_init))) {
266 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 skb->dev = (void *) hdev;
268
269 skb_queue_tail(&hdev->cmd_q, skb);
270 queue_work(hdev->workqueue, &hdev->cmd_work);
271 }
272 skb_queue_purge(&hdev->driver_init);
273
274 switch (hdev->dev_type) {
275 case HCI_BREDR:
276 bredr_init(hdev);
277 break;
278
279 case HCI_AMP:
280 amp_init(hdev);
281 break;
282
283 default:
284 BT_ERR("Unknown device type %d", hdev->dev_type);
285 break;
286 }
287
288}
289
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300290static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291{
292 BT_DBG("%s", hdev->name);
293
294 /* Read LE buffer size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296}
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 scan = opt;
301
302 BT_DBG("%s %x", hdev->name, scan);
303
304 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __u8 auth = opt;
311
312 BT_DBG("%s %x", hdev->name, auth);
313
314 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200315 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
318static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319{
320 __u8 encrypt = opt;
321
322 BT_DBG("%s %x", hdev->name, encrypt);
323
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200324 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200325 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200328static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329{
330 __le16 policy = cpu_to_le16(opt);
331
Marcel Holtmanna418b892008-11-30 12:17:28 +0100332 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200333
334 /* Default link policy */
335 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336}
337
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900338/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Device is held on return. */
340struct hci_dev *hci_dev_get(int index)
341{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200342 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 BT_DBG("%d", index);
345
346 if (index < 0)
347 return NULL;
348
349 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200350 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (d->id == index) {
352 hdev = hci_dev_hold(d);
353 break;
354 }
355 }
356 read_unlock(&hci_dev_list_lock);
357 return hdev;
358}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200361
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200362bool hci_discovery_active(struct hci_dev *hdev)
363{
364 struct discovery_state *discov = &hdev->discovery;
365
Andre Guedes6fbe1952012-02-03 17:47:58 -0300366 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300367 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300368 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200369 return true;
370
Andre Guedes6fbe1952012-02-03 17:47:58 -0300371 default:
372 return false;
373 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200374}
375
Johan Hedbergff9ef572012-01-04 14:23:45 +0200376void hci_discovery_set_state(struct hci_dev *hdev, int state)
377{
378 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379
380 if (hdev->discovery.state == state)
381 return;
382
383 switch (state) {
384 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200387 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200388 break;
389 case DISCOVERY_STARTING:
390 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300391 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200392 mgmt_discovering(hdev, 1);
393 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394 case DISCOVERY_RESOLVING:
395 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403static void inquiry_cache_flush(struct hci_dev *hdev)
404{
Johan Hedberg30883512012-01-04 14:16:21 +0200405 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200406 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Johan Hedberg561aafb2012-01-04 13:31:59 +0200408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
417struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418{
Johan Hedberg30883512012-01-04 14:16:21 +0200419 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
Johan Hedberg561aafb2012-01-04 13:31:59 +0200424 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200426 return e;
427 }
428
429 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Johan Hedberg561aafb2012-01-04 13:31:59 +0200432struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300433 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200434{
Johan Hedberg30883512012-01-04 14:16:21 +0200435 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440 list_for_each_entry(e, &cache->unknown, list) {
441 if (!bacmp(&e->data.bdaddr, bdaddr))
442 return e;
443 }
444
445 return NULL;
446}
447
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200448struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300449 bdaddr_t *bdaddr,
450 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
454
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 return e;
460 if (!bacmp(&e->data.bdaddr, bdaddr))
461 return e;
462 }
463
464 return NULL;
465}
466
Johan Hedberga3d4e202012-01-09 00:53:02 +0200467void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300468 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200469{
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
473
474 list_del(&ie->list);
475
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
479 break;
480 pos = &p->list;
481 }
482
483 list_add(&ie->list, pos);
484}
485
Johan Hedberg31754052012-01-04 13:39:52 +0200486bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300487 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Johan Hedberg30883512012-01-04 14:16:21 +0200489 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200494 if (ssp)
495 *ssp = data->ssp_mode;
496
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200498 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200499 if (ie->data.ssp_mode && ssp)
500 *ssp = true;
501
Johan Hedberga3d4e202012-01-09 00:53:02 +0200502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
506 }
507
Johan Hedberg561aafb2012-01-04 13:31:59 +0200508 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200509 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200510
Johan Hedberg561aafb2012-01-04 13:31:59 +0200511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200514 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200515
516 list_add(&ie->all, &cache->all);
517
518 if (name_known) {
519 ie->name_state = NAME_KNOWN;
520 } else {
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
523 }
524
525update:
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
529 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 }
531
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200535
536 if (ie->name_state == NAME_NOT_KNOWN)
537 return false;
538
539 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
542static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543{
Johan Hedberg30883512012-01-04 14:16:21 +0200544 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
547 int copied = 0;
548
Johan Hedberg561aafb2012-01-04 13:31:59 +0200549 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200551
552 if (copied >= num)
553 break;
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200563 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565
566 BT_DBG("cache %p, copied %d", cache, copied);
567 return copied;
568}
569
570static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571{
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
574
575 BT_DBG("%s", hdev->name);
576
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
578 return;
579
580 /* Start Inquiry */
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585}
586
587int hci_inquiry(void __user *arg)
588{
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
593 long timeo;
594 __u8 *buf;
595
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
597 return -EFAULT;
598
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200599 hdev = hci_dev_get(ir.dev_id);
600 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 return -ENODEV;
602
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300603 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 inquiry_cache_flush(hdev);
608 do_inquiry = 1;
609 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300610 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Marcel Holtmann04837f62006-07-03 10:02:33 +0200612 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200613
614 if (do_inquiry) {
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 if (err < 0)
617 goto done;
618 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
625 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200627 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 err = -ENOMEM;
629 goto done;
630 }
631
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300632 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
636 BT_DBG("num_rsp %d", ir.num_rsp);
637
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 ptr += sizeof(ir);
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 ir.num_rsp))
642 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900643 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 err = -EFAULT;
645
646 kfree(buf);
647
648done:
649 hci_dev_put(hdev);
650 return err;
651}
652
653/* ---- HCI ioctl helpers ---- */
654
655int hci_dev_open(__u16 dev)
656{
657 struct hci_dev *hdev;
658 int ret = 0;
659
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200660 hdev = hci_dev_get(dev);
661 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return -ENODEV;
663
664 BT_DBG("%s %p", hdev->name, hdev);
665
666 hci_req_lock(hdev);
667
Johan Hovold94324962012-03-15 14:48:41 +0100668 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669 ret = -ENODEV;
670 goto done;
671 }
672
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200673 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674 ret = -ERFKILL;
675 goto done;
676 }
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (test_bit(HCI_UP, &hdev->flags)) {
679 ret = -EALREADY;
680 goto done;
681 }
682
683 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684 set_bit(HCI_RAW, &hdev->flags);
685
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200686 /* Treat all non BR/EDR controllers as raw devices if
687 enable_hs is not set */
688 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100689 set_bit(HCI_RAW, &hdev->flags);
690
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 if (hdev->open(hdev)) {
692 ret = -EIO;
693 goto done;
694 }
695
696 if (!test_bit(HCI_RAW, &hdev->flags)) {
697 atomic_set(&hdev->cmd_cnt, 1);
698 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200699 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700
Marcel Holtmann04837f62006-07-03 10:02:33 +0200701 ret = __hci_request(hdev, hci_init_req, 0,
702 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Andre Guedeseead27d2011-06-30 19:20:55 -0300704 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300705 ret = __hci_request(hdev, hci_le_init_req, 0,
706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
707
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 clear_bit(HCI_INIT, &hdev->flags);
709 }
710
711 if (!ret) {
712 hci_dev_hold(hdev);
713 set_bit(HCI_UP, &hdev->flags);
714 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200715 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300716 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200717 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300718 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200719 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900720 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200722 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200723 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400724 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
726 skb_queue_purge(&hdev->cmd_q);
727 skb_queue_purge(&hdev->rx_q);
728
729 if (hdev->flush)
730 hdev->flush(hdev);
731
732 if (hdev->sent_cmd) {
733 kfree_skb(hdev->sent_cmd);
734 hdev->sent_cmd = NULL;
735 }
736
737 hdev->close(hdev);
738 hdev->flags = 0;
739 }
740
741done:
742 hci_req_unlock(hdev);
743 hci_dev_put(hdev);
744 return ret;
745}
746
747static int hci_dev_do_close(struct hci_dev *hdev)
748{
749 BT_DBG("%s %p", hdev->name, hdev);
750
Andre Guedes28b75a82012-02-03 17:48:00 -0300751 cancel_work_sync(&hdev->le_scan);
752
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 hci_req_cancel(hdev, ENODEV);
754 hci_req_lock(hdev);
755
756 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300757 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 hci_req_unlock(hdev);
759 return 0;
760 }
761
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200762 /* Flush RX and TX works */
763 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400764 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200766 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200767 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200768 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200769 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200770 }
771
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200772 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200773 cancel_delayed_work(&hdev->service_cache);
774
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300775 cancel_delayed_work_sync(&hdev->le_scan_disable);
776
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300777 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 inquiry_cache_flush(hdev);
779 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300780 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781
782 hci_notify(hdev, HCI_DEV_DOWN);
783
784 if (hdev->flush)
785 hdev->flush(hdev);
786
787 /* Reset device */
788 skb_queue_purge(&hdev->cmd_q);
789 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200790 if (!test_bit(HCI_RAW, &hdev->flags) &&
791 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200793 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200794 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 clear_bit(HCI_INIT, &hdev->flags);
796 }
797
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200798 /* flush cmd work */
799 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
801 /* Drop queues */
802 skb_queue_purge(&hdev->rx_q);
803 skb_queue_purge(&hdev->cmd_q);
804 skb_queue_purge(&hdev->raw_q);
805
806 /* Drop last sent command */
807 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300808 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 kfree_skb(hdev->sent_cmd);
810 hdev->sent_cmd = NULL;
811 }
812
813 /* After this point our queues are empty
814 * and no tasks are scheduled. */
815 hdev->close(hdev);
816
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100817 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818 hci_dev_lock(hdev);
819 mgmt_powered(hdev, 0);
820 hci_dev_unlock(hdev);
821 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200822
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823 /* Clear flags */
824 hdev->flags = 0;
825
Johan Hedberge59fda82012-02-22 18:11:53 +0200826 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200827 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 hci_req_unlock(hdev);
830
831 hci_dev_put(hdev);
832 return 0;
833}
834
835int hci_dev_close(__u16 dev)
836{
837 struct hci_dev *hdev;
838 int err;
839
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200840 hdev = hci_dev_get(dev);
841 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100843
844 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
845 cancel_delayed_work(&hdev->power_off);
846
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100848
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 hci_dev_put(hdev);
850 return err;
851}
852
853int hci_dev_reset(__u16 dev)
854{
855 struct hci_dev *hdev;
856 int ret = 0;
857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858 hdev = hci_dev_get(dev);
859 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return -ENODEV;
861
862 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (!test_bit(HCI_UP, &hdev->flags))
865 goto done;
866
867 /* Drop queues */
868 skb_queue_purge(&hdev->rx_q);
869 skb_queue_purge(&hdev->cmd_q);
870
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300871 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 inquiry_cache_flush(hdev);
873 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300874 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875
876 if (hdev->flush)
877 hdev->flush(hdev);
878
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900879 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300880 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881
882 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200883 ret = __hci_request(hdev, hci_reset_req, 0,
884 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 hci_req_unlock(hdev);
888 hci_dev_put(hdev);
889 return ret;
890}
891
892int hci_dev_reset_stat(__u16 dev)
893{
894 struct hci_dev *hdev;
895 int ret = 0;
896
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200897 hdev = hci_dev_get(dev);
898 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 return -ENODEV;
900
901 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
902
903 hci_dev_put(hdev);
904
905 return ret;
906}
907
908int hci_dev_cmd(unsigned int cmd, void __user *arg)
909{
910 struct hci_dev *hdev;
911 struct hci_dev_req dr;
912 int err = 0;
913
914 if (copy_from_user(&dr, arg, sizeof(dr)))
915 return -EFAULT;
916
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200917 hdev = hci_dev_get(dr.dev_id);
918 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 return -ENODEV;
920
921 switch (cmd) {
922 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 break;
926
927 case HCISETENCRYPT:
928 if (!lmp_encrypt_capable(hdev)) {
929 err = -EOPNOTSUPP;
930 break;
931 }
932
933 if (!test_bit(HCI_AUTH, &hdev->flags)) {
934 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200935 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
936 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 if (err)
938 break;
939 }
940
Marcel Holtmann04837f62006-07-03 10:02:33 +0200941 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
942 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 break;
944
945 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200946 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 break;
949
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200950 case HCISETLINKPOL:
951 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
952 msecs_to_jiffies(HCI_INIT_TIMEOUT));
953 break;
954
955 case HCISETLINKMODE:
956 hdev->link_mode = ((__u16) dr.dev_opt) &
957 (HCI_LM_MASTER | HCI_LM_ACCEPT);
958 break;
959
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 case HCISETPTYPE:
961 hdev->pkt_type = (__u16) dr.dev_opt;
962 break;
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200965 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
966 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 break;
968
969 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200970 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
971 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 break;
973
974 default:
975 err = -EINVAL;
976 break;
977 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200978
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979 hci_dev_put(hdev);
980 return err;
981}
982
983int hci_get_dev_list(void __user *arg)
984{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200985 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 struct hci_dev_list_req *dl;
987 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700988 int n = 0, size, err;
989 __u16 dev_num;
990
991 if (get_user(dev_num, (__u16 __user *) arg))
992 return -EFAULT;
993
994 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
995 return -EINVAL;
996
997 size = sizeof(*dl) + dev_num * sizeof(*dr);
998
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200999 dl = kzalloc(size, GFP_KERNEL);
1000 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 return -ENOMEM;
1002
1003 dr = dl->dev_req;
1004
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001005 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001006 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001007 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001008 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001009
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001010 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001012
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 (dr + n)->dev_id = hdev->id;
1014 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 if (++n >= dev_num)
1017 break;
1018 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001019 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020
1021 dl->dev_num = n;
1022 size = sizeof(*dl) + n * sizeof(*dr);
1023
1024 err = copy_to_user(arg, dl, size);
1025 kfree(dl);
1026
1027 return err ? -EFAULT : 0;
1028}
1029
1030int hci_get_dev_info(void __user *arg)
1031{
1032 struct hci_dev *hdev;
1033 struct hci_dev_info di;
1034 int err = 0;
1035
1036 if (copy_from_user(&di, arg, sizeof(di)))
1037 return -EFAULT;
1038
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001039 hdev = hci_dev_get(di.dev_id);
1040 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 return -ENODEV;
1042
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001043 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001044 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001045
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001046 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1047 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001048
Linus Torvalds1da177e2005-04-16 15:20:36 -07001049 strcpy(di.name, hdev->name);
1050 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001051 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 di.flags = hdev->flags;
1053 di.pkt_type = hdev->pkt_type;
1054 di.acl_mtu = hdev->acl_mtu;
1055 di.acl_pkts = hdev->acl_pkts;
1056 di.sco_mtu = hdev->sco_mtu;
1057 di.sco_pkts = hdev->sco_pkts;
1058 di.link_policy = hdev->link_policy;
1059 di.link_mode = hdev->link_mode;
1060
1061 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1062 memcpy(&di.features, &hdev->features, sizeof(di.features));
1063
1064 if (copy_to_user(arg, &di, sizeof(di)))
1065 err = -EFAULT;
1066
1067 hci_dev_put(hdev);
1068
1069 return err;
1070}
1071
1072/* ---- Interface to HCI drivers ---- */
1073
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001074static int hci_rfkill_set_block(void *data, bool blocked)
1075{
1076 struct hci_dev *hdev = data;
1077
1078 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1079
1080 if (!blocked)
1081 return 0;
1082
1083 hci_dev_do_close(hdev);
1084
1085 return 0;
1086}
1087
1088static const struct rfkill_ops hci_rfkill_ops = {
1089 .set_block = hci_rfkill_set_block,
1090};
1091
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092/* Alloc HCI device */
1093struct hci_dev *hci_alloc_dev(void)
1094{
1095 struct hci_dev *hdev;
1096
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001097 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098 if (!hdev)
1099 return NULL;
1100
David Herrmann0ac7e702011-10-08 14:58:47 +02001101 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 skb_queue_head_init(&hdev->driver_init);
1103
1104 return hdev;
1105}
1106EXPORT_SYMBOL(hci_alloc_dev);
1107
1108/* Free HCI device */
1109void hci_free_dev(struct hci_dev *hdev)
1110{
1111 skb_queue_purge(&hdev->driver_init);
1112
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001113 /* will free via device release */
1114 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001115}
1116EXPORT_SYMBOL(hci_free_dev);
1117
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001118static void hci_power_on(struct work_struct *work)
1119{
1120 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1121
1122 BT_DBG("%s", hdev->name);
1123
1124 if (hci_dev_open(hdev->id) < 0)
1125 return;
1126
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001127 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001128 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001129 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001130
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001131 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001132 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001133}
1134
1135static void hci_power_off(struct work_struct *work)
1136{
Johan Hedberg32435532011-11-07 22:16:04 +02001137 struct hci_dev *hdev = container_of(work, struct hci_dev,
1138 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001139
1140 BT_DBG("%s", hdev->name);
1141
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001142 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001143}
1144
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001145static void hci_discov_off(struct work_struct *work)
1146{
1147 struct hci_dev *hdev;
1148 u8 scan = SCAN_PAGE;
1149
1150 hdev = container_of(work, struct hci_dev, discov_off.work);
1151
1152 BT_DBG("%s", hdev->name);
1153
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001154 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001155
1156 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1157
1158 hdev->discov_timeout = 0;
1159
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001160 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001161}
1162
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001163int hci_uuids_clear(struct hci_dev *hdev)
1164{
1165 struct list_head *p, *n;
1166
1167 list_for_each_safe(p, n, &hdev->uuids) {
1168 struct bt_uuid *uuid;
1169
1170 uuid = list_entry(p, struct bt_uuid, list);
1171
1172 list_del(p);
1173 kfree(uuid);
1174 }
1175
1176 return 0;
1177}
1178
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001179int hci_link_keys_clear(struct hci_dev *hdev)
1180{
1181 struct list_head *p, *n;
1182
1183 list_for_each_safe(p, n, &hdev->link_keys) {
1184 struct link_key *key;
1185
1186 key = list_entry(p, struct link_key, list);
1187
1188 list_del(p);
1189 kfree(key);
1190 }
1191
1192 return 0;
1193}
1194
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001195int hci_smp_ltks_clear(struct hci_dev *hdev)
1196{
1197 struct smp_ltk *k, *tmp;
1198
1199 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1200 list_del(&k->list);
1201 kfree(k);
1202 }
1203
1204 return 0;
1205}
1206
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001207struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001209 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001210
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001211 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001212 if (bacmp(bdaddr, &k->bdaddr) == 0)
1213 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001214
1215 return NULL;
1216}
1217
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301218static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001219 u8 key_type, u8 old_key_type)
1220{
1221 /* Legacy key */
1222 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301223 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001224
1225 /* Debug keys are insecure so don't store them persistently */
1226 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301227 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001228
1229 /* Changed combination key and there's no previous one */
1230 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301231 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001232
1233 /* Security mode 3 case */
1234 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301235 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001236
1237 /* Neither local nor remote side had no-bonding as requirement */
1238 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301239 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001240
1241 /* Local side had dedicated bonding as requirement */
1242 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301243 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001244
1245 /* Remote side had dedicated bonding as requirement */
1246 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301247 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001248
1249 /* If none of the above criteria match, then don't store the key
1250 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301251 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001252}
1253
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001258 list_for_each_entry(k, &hdev->long_term_keys, list) {
1259 if (k->ediv != ediv ||
1260 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001261 continue;
1262
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001263 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264 }
1265
1266 return NULL;
1267}
1268EXPORT_SYMBOL(hci_find_ltk);
1269
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001270struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001271 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001272{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001273 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001274
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001275 list_for_each_entry(k, &hdev->long_term_keys, list)
1276 if (addr_type == k->bdaddr_type &&
1277 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001278 return k;
1279
1280 return NULL;
1281}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001282EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001284int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001285 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001286{
1287 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301288 u8 old_key_type;
1289 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001290
1291 old_key = hci_find_link_key(hdev, bdaddr);
1292 if (old_key) {
1293 old_key_type = old_key->type;
1294 key = old_key;
1295 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001296 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001297 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298 if (!key)
1299 return -ENOMEM;
1300 list_add(&key->list, &hdev->link_keys);
1301 }
1302
1303 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001305 /* Some buggy controller combinations generate a changed
1306 * combination key for legacy pairing even when there's no
1307 * previous key */
1308 if (type == HCI_LK_CHANGED_COMBINATION &&
1309 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001310 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001311 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001312 if (conn)
1313 conn->key_type = type;
1314 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001315
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001316 bacpy(&key->bdaddr, bdaddr);
1317 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001318 key->pin_len = pin_len;
1319
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001320 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001321 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001322 else
1323 key->type = type;
1324
Johan Hedberg4df378a2011-04-28 11:29:03 -07001325 if (!new_key)
1326 return 0;
1327
1328 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
Johan Hedberg744cf192011-11-08 20:40:14 +02001330 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001331
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301332 if (conn)
1333 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001334
1335 return 0;
1336}
1337
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001338int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001339 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1340 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001341{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001342 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001344 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1345 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001347 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1348 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001350 else {
1351 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 if (!key)
1353 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001354 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355 }
1356
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001357 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001358 key->bdaddr_type = addr_type;
1359 memcpy(key->val, tk, sizeof(key->val));
1360 key->authenticated = authenticated;
1361 key->ediv = ediv;
1362 key->enc_size = enc_size;
1363 key->type = type;
1364 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001365
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001366 if (!new_key)
1367 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001368
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001369 if (type & HCI_SMP_LTK)
1370 mgmt_new_ltk(hdev, key, 1);
1371
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001372 return 0;
1373}
1374
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001375int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1376{
1377 struct link_key *key;
1378
1379 key = hci_find_link_key(hdev, bdaddr);
1380 if (!key)
1381 return -ENOENT;
1382
1383 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1384
1385 list_del(&key->list);
1386 kfree(key);
1387
1388 return 0;
1389}
1390
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001391int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392{
1393 struct smp_ltk *k, *tmp;
1394
1395 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1396 if (bacmp(bdaddr, &k->bdaddr))
1397 continue;
1398
1399 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1400
1401 list_del(&k->list);
1402 kfree(k);
1403 }
1404
1405 return 0;
1406}
1407
Ville Tervo6bd32322011-02-16 16:32:41 +02001408/* HCI command timer function */
1409static void hci_cmd_timer(unsigned long arg)
1410{
1411 struct hci_dev *hdev = (void *) arg;
1412
1413 BT_ERR("%s command tx timeout", hdev->name);
1414 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001415 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001416}
1417
Szymon Janc2763eda2011-03-22 13:12:22 +01001418struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001419 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001420{
1421 struct oob_data *data;
1422
1423 list_for_each_entry(data, &hdev->remote_oob_data, list)
1424 if (bacmp(bdaddr, &data->bdaddr) == 0)
1425 return data;
1426
1427 return NULL;
1428}
1429
1430int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1431{
1432 struct oob_data *data;
1433
1434 data = hci_find_remote_oob_data(hdev, bdaddr);
1435 if (!data)
1436 return -ENOENT;
1437
1438 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1439
1440 list_del(&data->list);
1441 kfree(data);
1442
1443 return 0;
1444}
1445
1446int hci_remote_oob_data_clear(struct hci_dev *hdev)
1447{
1448 struct oob_data *data, *n;
1449
1450 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1451 list_del(&data->list);
1452 kfree(data);
1453 }
1454
1455 return 0;
1456}
1457
1458int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001459 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001460{
1461 struct oob_data *data;
1462
1463 data = hci_find_remote_oob_data(hdev, bdaddr);
1464
1465 if (!data) {
1466 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1467 if (!data)
1468 return -ENOMEM;
1469
1470 bacpy(&data->bdaddr, bdaddr);
1471 list_add(&data->list, &hdev->remote_oob_data);
1472 }
1473
1474 memcpy(data->hash, hash, sizeof(data->hash));
1475 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1476
1477 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1478
1479 return 0;
1480}
1481
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001482struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001483{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001484 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001486 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001487 if (bacmp(bdaddr, &b->bdaddr) == 0)
1488 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489
1490 return NULL;
1491}
1492
1493int hci_blacklist_clear(struct hci_dev *hdev)
1494{
1495 struct list_head *p, *n;
1496
1497 list_for_each_safe(p, n, &hdev->blacklist) {
1498 struct bdaddr_list *b;
1499
1500 b = list_entry(p, struct bdaddr_list, list);
1501
1502 list_del(p);
1503 kfree(b);
1504 }
1505
1506 return 0;
1507}
1508
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001509int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510{
1511 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512
1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514 return -EBADF;
1515
Antti Julku5e762442011-08-25 16:48:02 +03001516 if (hci_blacklist_lookup(hdev, bdaddr))
1517 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001518
1519 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001520 if (!entry)
1521 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001522
1523 bacpy(&entry->bdaddr, bdaddr);
1524
1525 list_add(&entry->list, &hdev->blacklist);
1526
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001527 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001528}
1529
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001530int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001531{
1532 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001533
Szymon Janc1ec918c2011-11-16 09:32:21 +01001534 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001535 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001536
1537 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001538 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001539 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001540
1541 list_del(&entry->list);
1542 kfree(entry);
1543
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001544 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001545}
1546
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001547static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001548{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001549 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001550 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001551
1552 hci_dev_lock(hdev);
1553
1554 hci_adv_entries_clear(hdev);
1555
1556 hci_dev_unlock(hdev);
1557}
1558
Andre Guedes76c86862011-05-26 16:23:50 -03001559int hci_adv_entries_clear(struct hci_dev *hdev)
1560{
1561 struct adv_entry *entry, *tmp;
1562
1563 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1564 list_del(&entry->list);
1565 kfree(entry);
1566 }
1567
1568 BT_DBG("%s adv cache cleared", hdev->name);
1569
1570 return 0;
1571}
1572
1573struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1574{
1575 struct adv_entry *entry;
1576
1577 list_for_each_entry(entry, &hdev->adv_entries, list)
1578 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1579 return entry;
1580
1581 return NULL;
1582}
1583
1584static inline int is_connectable_adv(u8 evt_type)
1585{
1586 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1587 return 1;
1588
1589 return 0;
1590}
1591
1592int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001593 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001594 return -EINVAL;
1595
1596 /* Only new entries should be added to adv_entries. So, if
1597 * bdaddr was found, don't add it. */
1598 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1599 return 0;
1600
Andre Guedes4777bfd2012-01-30 23:31:28 -03001601 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001602 if (!entry)
1603 return -ENOMEM;
1604
1605 bacpy(&entry->bdaddr, &ev->bdaddr);
1606 entry->bdaddr_type = ev->bdaddr_type;
1607
1608 list_add(&entry->list, &hdev->adv_entries);
1609
1610 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1611 batostr(&entry->bdaddr), entry->bdaddr_type);
1612
1613 return 0;
1614}
1615
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001616static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1617{
1618 struct le_scan_params *param = (struct le_scan_params *) opt;
1619 struct hci_cp_le_set_scan_param cp;
1620
1621 memset(&cp, 0, sizeof(cp));
1622 cp.type = param->type;
1623 cp.interval = cpu_to_le16(param->interval);
1624 cp.window = cpu_to_le16(param->window);
1625
1626 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1627}
1628
1629static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1630{
1631 struct hci_cp_le_set_scan_enable cp;
1632
1633 memset(&cp, 0, sizeof(cp));
1634 cp.enable = 1;
1635
1636 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1637}
1638
1639static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001640 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001641{
1642 long timeo = msecs_to_jiffies(3000);
1643 struct le_scan_params param;
1644 int err;
1645
1646 BT_DBG("%s", hdev->name);
1647
1648 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1649 return -EINPROGRESS;
1650
1651 param.type = type;
1652 param.interval = interval;
1653 param.window = window;
1654
1655 hci_req_lock(hdev);
1656
1657 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001658 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001659 if (!err)
1660 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1661
1662 hci_req_unlock(hdev);
1663
1664 if (err < 0)
1665 return err;
1666
1667 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001668 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001669
1670 return 0;
1671}
1672
1673static void le_scan_disable_work(struct work_struct *work)
1674{
1675 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001676 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001677 struct hci_cp_le_set_scan_enable cp;
1678
1679 BT_DBG("%s", hdev->name);
1680
1681 memset(&cp, 0, sizeof(cp));
1682
1683 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1684}
1685
Andre Guedes28b75a82012-02-03 17:48:00 -03001686static void le_scan_work(struct work_struct *work)
1687{
1688 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1689 struct le_scan_params *param = &hdev->le_scan_params;
1690
1691 BT_DBG("%s", hdev->name);
1692
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001693 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1694 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001695}
1696
1697int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001698 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001699{
1700 struct le_scan_params *param = &hdev->le_scan_params;
1701
1702 BT_DBG("%s", hdev->name);
1703
1704 if (work_busy(&hdev->le_scan))
1705 return -EINPROGRESS;
1706
1707 param->type = type;
1708 param->interval = interval;
1709 param->window = window;
1710 param->timeout = timeout;
1711
1712 queue_work(system_long_wq, &hdev->le_scan);
1713
1714 return 0;
1715}
1716
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717/* Register HCI device */
1718int hci_register_dev(struct hci_dev *hdev)
1719{
1720 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001721 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
David Herrmann010666a2012-01-07 15:47:07 +01001725 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726 return -EINVAL;
1727
Mat Martineau08add512011-11-02 16:18:36 -07001728 /* Do not allow HCI_AMP devices to register at index 0,
1729 * so the index can be used as the AMP controller ID.
1730 */
1731 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1732
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001733 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
1735 /* Find first available device id */
1736 list_for_each(p, &hci_dev_list) {
1737 if (list_entry(p, struct hci_dev, list)->id != id)
1738 break;
1739 head = p; id++;
1740 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001741
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742 sprintf(hdev->name, "hci%d", id);
1743 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001744 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001746 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
1748 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001749 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001751 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001753 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Marcel Holtmann04837f62006-07-03 10:02:33 +02001755 hdev->idle_timeout = 0;
1756 hdev->sniff_max_interval = 800;
1757 hdev->sniff_min_interval = 80;
1758
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001759 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001760 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001761 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001762
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
1764 skb_queue_head_init(&hdev->rx_q);
1765 skb_queue_head_init(&hdev->cmd_q);
1766 skb_queue_head_init(&hdev->raw_q);
1767
Ville Tervo6bd32322011-02-16 16:32:41 +02001768 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1769
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301770 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001771 hdev->reassembly[i] = NULL;
1772
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001774 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Johan Hedberg30883512012-01-04 14:16:21 +02001776 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777
1778 hci_conn_hash_init(hdev);
1779
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001780 INIT_LIST_HEAD(&hdev->mgmt_pending);
1781
David Millerea4bd8b2010-07-30 21:54:49 -07001782 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001783
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001784 INIT_LIST_HEAD(&hdev->uuids);
1785
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001786 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001787 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001788
Szymon Janc2763eda2011-03-22 13:12:22 +01001789 INIT_LIST_HEAD(&hdev->remote_oob_data);
1790
Andre Guedes76c86862011-05-26 16:23:50 -03001791 INIT_LIST_HEAD(&hdev->adv_entries);
1792
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001793 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001794 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001795 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001797 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1800
1801 atomic_set(&hdev->promisc, 0);
1802
Andre Guedes28b75a82012-02-03 17:48:00 -03001803 INIT_WORK(&hdev->le_scan, le_scan_work);
1804
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001805 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1806
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001807 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001809 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1810 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001811 if (!hdev->workqueue) {
1812 error = -ENOMEM;
1813 goto err;
1814 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001815
David Herrmann33ca9542011-10-08 14:58:49 +02001816 error = hci_add_sysfs(hdev);
1817 if (error < 0)
1818 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001820 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1821 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1822 if (hdev->rfkill) {
1823 if (rfkill_register(hdev->rfkill) < 0) {
1824 rfkill_destroy(hdev->rfkill);
1825 hdev->rfkill = NULL;
1826 }
1827 }
1828
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001829 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1830 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001831 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001834 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
1836 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001837
David Herrmann33ca9542011-10-08 14:58:49 +02001838err_wqueue:
1839 destroy_workqueue(hdev->workqueue);
1840err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001841 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001842 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001843 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001844
David Herrmann33ca9542011-10-08 14:58:49 +02001845 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001846}
1847EXPORT_SYMBOL(hci_register_dev);
1848
1849/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001850void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851{
Marcel Holtmannef222012007-07-11 06:42:04 +02001852 int i;
1853
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001854 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
Johan Hovold94324962012-03-15 14:48:41 +01001856 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1857
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001858 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001860 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
1862 hci_dev_do_close(hdev);
1863
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301864 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001865 kfree_skb(hdev->reassembly[i]);
1866
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001867 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001868 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001869 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001870 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001871 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001872 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001873
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001874 /* mgmt_index_removed should take care of emptying the
1875 * pending list */
1876 BUG_ON(!list_empty(&hdev->mgmt_pending));
1877
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 hci_notify(hdev, HCI_DEV_UNREG);
1879
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001880 if (hdev->rfkill) {
1881 rfkill_unregister(hdev->rfkill);
1882 rfkill_destroy(hdev->rfkill);
1883 }
1884
David Herrmannce242972011-10-08 14:58:48 +02001885 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001886
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001887 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001888
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001889 destroy_workqueue(hdev->workqueue);
1890
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001891 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001892 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001893 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001894 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001895 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001896 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001897 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001898 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001899
David Herrmanndc946bd2012-01-07 15:47:24 +01001900 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001901}
1902EXPORT_SYMBOL(hci_unregister_dev);
1903
1904/* Suspend HCI device */
1905int hci_suspend_dev(struct hci_dev *hdev)
1906{
1907 hci_notify(hdev, HCI_DEV_SUSPEND);
1908 return 0;
1909}
1910EXPORT_SYMBOL(hci_suspend_dev);
1911
1912/* Resume HCI device */
1913int hci_resume_dev(struct hci_dev *hdev)
1914{
1915 hci_notify(hdev, HCI_DEV_RESUME);
1916 return 0;
1917}
1918EXPORT_SYMBOL(hci_resume_dev);
1919
Marcel Holtmann76bca882009-11-18 00:40:39 +01001920/* Receive frame from HCI drivers */
1921int hci_recv_frame(struct sk_buff *skb)
1922{
1923 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1924 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1925 && !test_bit(HCI_INIT, &hdev->flags))) {
1926 kfree_skb(skb);
1927 return -ENXIO;
1928 }
1929
1930 /* Incomming skb */
1931 bt_cb(skb)->incoming = 1;
1932
1933 /* Time stamp */
1934 __net_timestamp(skb);
1935
Marcel Holtmann76bca882009-11-18 00:40:39 +01001936 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001937 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001938
Marcel Holtmann76bca882009-11-18 00:40:39 +01001939 return 0;
1940}
1941EXPORT_SYMBOL(hci_recv_frame);
1942
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301943static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001944 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301945{
1946 int len = 0;
1947 int hlen = 0;
1948 int remain = count;
1949 struct sk_buff *skb;
1950 struct bt_skb_cb *scb;
1951
1952 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1953 index >= NUM_REASSEMBLY)
1954 return -EILSEQ;
1955
1956 skb = hdev->reassembly[index];
1957
1958 if (!skb) {
1959 switch (type) {
1960 case HCI_ACLDATA_PKT:
1961 len = HCI_MAX_FRAME_SIZE;
1962 hlen = HCI_ACL_HDR_SIZE;
1963 break;
1964 case HCI_EVENT_PKT:
1965 len = HCI_MAX_EVENT_SIZE;
1966 hlen = HCI_EVENT_HDR_SIZE;
1967 break;
1968 case HCI_SCODATA_PKT:
1969 len = HCI_MAX_SCO_SIZE;
1970 hlen = HCI_SCO_HDR_SIZE;
1971 break;
1972 }
1973
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001974 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301975 if (!skb)
1976 return -ENOMEM;
1977
1978 scb = (void *) skb->cb;
1979 scb->expect = hlen;
1980 scb->pkt_type = type;
1981
1982 skb->dev = (void *) hdev;
1983 hdev->reassembly[index] = skb;
1984 }
1985
1986 while (count) {
1987 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001988 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301989
1990 memcpy(skb_put(skb, len), data, len);
1991
1992 count -= len;
1993 data += len;
1994 scb->expect -= len;
1995 remain = count;
1996
1997 switch (type) {
1998 case HCI_EVENT_PKT:
1999 if (skb->len == HCI_EVENT_HDR_SIZE) {
2000 struct hci_event_hdr *h = hci_event_hdr(skb);
2001 scb->expect = h->plen;
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010
2011 case HCI_ACLDATA_PKT:
2012 if (skb->len == HCI_ACL_HDR_SIZE) {
2013 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2014 scb->expect = __le16_to_cpu(h->dlen);
2015
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2020 }
2021 }
2022 break;
2023
2024 case HCI_SCODATA_PKT:
2025 if (skb->len == HCI_SCO_HDR_SIZE) {
2026 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2027 scb->expect = h->dlen;
2028
2029 if (skb_tailroom(skb) < scb->expect) {
2030 kfree_skb(skb);
2031 hdev->reassembly[index] = NULL;
2032 return -ENOMEM;
2033 }
2034 }
2035 break;
2036 }
2037
2038 if (scb->expect == 0) {
2039 /* Complete frame */
2040
2041 bt_cb(skb)->pkt_type = type;
2042 hci_recv_frame(skb);
2043
2044 hdev->reassembly[index] = NULL;
2045 return remain;
2046 }
2047 }
2048
2049 return remain;
2050}
2051
Marcel Holtmannef222012007-07-11 06:42:04 +02002052int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2053{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302054 int rem = 0;
2055
Marcel Holtmannef222012007-07-11 06:42:04 +02002056 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2057 return -EILSEQ;
2058
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002059 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002060 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302061 if (rem < 0)
2062 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002063
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302064 data += (count - rem);
2065 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002066 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002067
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302068 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002069}
2070EXPORT_SYMBOL(hci_recv_fragment);
2071
Suraj Sumangala99811512010-07-14 13:02:19 +05302072#define STREAM_REASSEMBLY 0
2073
2074int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2075{
2076 int type;
2077 int rem = 0;
2078
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002079 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302080 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2081
2082 if (!skb) {
2083 struct { char type; } *pkt;
2084
2085 /* Start of the frame */
2086 pkt = data;
2087 type = pkt->type;
2088
2089 data++;
2090 count--;
2091 } else
2092 type = bt_cb(skb)->pkt_type;
2093
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002094 rem = hci_reassembly(hdev, type, data, count,
2095 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302096 if (rem < 0)
2097 return rem;
2098
2099 data += (count - rem);
2100 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002101 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302102
2103 return rem;
2104}
2105EXPORT_SYMBOL(hci_recv_stream_fragment);
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107/* ---- Interface to upper protocols ---- */
2108
Linus Torvalds1da177e2005-04-16 15:20:36 -07002109int hci_register_cb(struct hci_cb *cb)
2110{
2111 BT_DBG("%p name %s", cb, cb->name);
2112
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002113 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002115 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116
2117 return 0;
2118}
2119EXPORT_SYMBOL(hci_register_cb);
2120
2121int hci_unregister_cb(struct hci_cb *cb)
2122{
2123 BT_DBG("%p name %s", cb, cb->name);
2124
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002125 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002127 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 return 0;
2130}
2131EXPORT_SYMBOL(hci_unregister_cb);
2132
2133static int hci_send_frame(struct sk_buff *skb)
2134{
2135 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2136
2137 if (!hdev) {
2138 kfree_skb(skb);
2139 return -ENODEV;
2140 }
2141
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002142 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002144 /* Time stamp */
2145 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002147 /* Send copy to monitor */
2148 hci_send_to_monitor(hdev, skb);
2149
2150 if (atomic_read(&hdev->promisc)) {
2151 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002152 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153 }
2154
2155 /* Get rid of skb owner, prior to sending to the driver. */
2156 skb_orphan(skb);
2157
2158 return hdev->send(skb);
2159}
2160
2161/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002162int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163{
2164 int len = HCI_COMMAND_HDR_SIZE + plen;
2165 struct hci_command_hdr *hdr;
2166 struct sk_buff *skb;
2167
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002168 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
2170 skb = bt_skb_alloc(len, GFP_ATOMIC);
2171 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002172 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 return -ENOMEM;
2174 }
2175
2176 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002177 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 hdr->plen = plen;
2179
2180 if (plen)
2181 memcpy(skb_put(skb, plen), param, plen);
2182
2183 BT_DBG("skb len %d", skb->len);
2184
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002185 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002187
Johan Hedberga5040ef2011-01-10 13:28:59 +02002188 if (test_bit(HCI_INIT, &hdev->flags))
2189 hdev->init_last_cmd = opcode;
2190
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002192 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
2194 return 0;
2195}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002198void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199{
2200 struct hci_command_hdr *hdr;
2201
2202 if (!hdev->sent_cmd)
2203 return NULL;
2204
2205 hdr = (void *) hdev->sent_cmd->data;
2206
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002207 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208 return NULL;
2209
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002210 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
2212 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2213}
2214
2215/* Send ACL data */
2216static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2217{
2218 struct hci_acl_hdr *hdr;
2219 int len = skb->len;
2220
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002221 skb_push(skb, HCI_ACL_HDR_SIZE);
2222 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002223 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002224 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2225 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226}
2227
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002228static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2229 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230{
2231 struct hci_dev *hdev = conn->hdev;
2232 struct sk_buff *list;
2233
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002234 list = skb_shinfo(skb)->frag_list;
2235 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 /* Non fragmented */
2237 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2238
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002239 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 } else {
2241 /* Fragmented */
2242 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2243
2244 skb_shinfo(skb)->frag_list = NULL;
2245
2246 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002247 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002249 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002250
2251 flags &= ~ACL_START;
2252 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 do {
2254 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002255
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002257 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002258 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259
2260 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2261
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002262 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 } while (list);
2264
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002265 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002267}
2268
2269void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2270{
2271 struct hci_conn *conn = chan->conn;
2272 struct hci_dev *hdev = conn->hdev;
2273
2274 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2275
2276 skb->dev = (void *) hdev;
2277 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278 hci_add_acl_hdr(skb, conn->handle, flags);
2279
2280 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002282 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283}
2284EXPORT_SYMBOL(hci_send_acl);
2285
2286/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002287void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288{
2289 struct hci_dev *hdev = conn->hdev;
2290 struct hci_sco_hdr hdr;
2291
2292 BT_DBG("%s len %d", hdev->name, skb->len);
2293
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002294 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295 hdr.dlen = skb->len;
2296
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002297 skb_push(skb, HCI_SCO_HDR_SIZE);
2298 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002299 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
2301 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002302 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002305 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306}
2307EXPORT_SYMBOL(hci_send_sco);
2308
2309/* ---- HCI TX task (outgoing data) ---- */
2310
2311/* HCI Connection scheduler */
2312static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2313{
2314 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002315 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002318 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002320
2321 rcu_read_lock();
2322
2323 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002324 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002326
2327 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2328 continue;
2329
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 num++;
2331
2332 if (c->sent < min) {
2333 min = c->sent;
2334 conn = c;
2335 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002336
2337 if (hci_conn_num(hdev, type) == num)
2338 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002339 }
2340
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002341 rcu_read_unlock();
2342
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002344 int cnt, q;
2345
2346 switch (conn->type) {
2347 case ACL_LINK:
2348 cnt = hdev->acl_cnt;
2349 break;
2350 case SCO_LINK:
2351 case ESCO_LINK:
2352 cnt = hdev->sco_cnt;
2353 break;
2354 case LE_LINK:
2355 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2356 break;
2357 default:
2358 cnt = 0;
2359 BT_ERR("Unknown link type");
2360 }
2361
2362 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002363 *quote = q ? q : 1;
2364 } else
2365 *quote = 0;
2366
2367 BT_DBG("conn %p quote %d", conn, *quote);
2368 return conn;
2369}
2370
Ville Tervobae1f5d92011-02-10 22:38:53 -03002371static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002372{
2373 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002374 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002375
Ville Tervobae1f5d92011-02-10 22:38:53 -03002376 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002377
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002378 rcu_read_lock();
2379
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002381 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002382 if (c->type == type && c->sent) {
2383 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 hdev->name, batostr(&c->dst));
2385 hci_acl_disconn(c, 0x13);
2386 }
2387 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002388
2389 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390}
2391
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002392static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2393 int *quote)
2394{
2395 struct hci_conn_hash *h = &hdev->conn_hash;
2396 struct hci_chan *chan = NULL;
2397 int num = 0, min = ~0, cur_prio = 0;
2398 struct hci_conn *conn;
2399 int cnt, q, conn_num = 0;
2400
2401 BT_DBG("%s", hdev->name);
2402
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002403 rcu_read_lock();
2404
2405 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002406 struct hci_chan *tmp;
2407
2408 if (conn->type != type)
2409 continue;
2410
2411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412 continue;
2413
2414 conn_num++;
2415
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002416 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002417 struct sk_buff *skb;
2418
2419 if (skb_queue_empty(&tmp->data_q))
2420 continue;
2421
2422 skb = skb_peek(&tmp->data_q);
2423 if (skb->priority < cur_prio)
2424 continue;
2425
2426 if (skb->priority > cur_prio) {
2427 num = 0;
2428 min = ~0;
2429 cur_prio = skb->priority;
2430 }
2431
2432 num++;
2433
2434 if (conn->sent < min) {
2435 min = conn->sent;
2436 chan = tmp;
2437 }
2438 }
2439
2440 if (hci_conn_num(hdev, type) == conn_num)
2441 break;
2442 }
2443
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002444 rcu_read_unlock();
2445
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002446 if (!chan)
2447 return NULL;
2448
2449 switch (chan->conn->type) {
2450 case ACL_LINK:
2451 cnt = hdev->acl_cnt;
2452 break;
2453 case SCO_LINK:
2454 case ESCO_LINK:
2455 cnt = hdev->sco_cnt;
2456 break;
2457 case LE_LINK:
2458 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2459 break;
2460 default:
2461 cnt = 0;
2462 BT_ERR("Unknown link type");
2463 }
2464
2465 q = cnt / num;
2466 *quote = q ? q : 1;
2467 BT_DBG("chan %p quote %d", chan, *quote);
2468 return chan;
2469}
2470
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002471static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2472{
2473 struct hci_conn_hash *h = &hdev->conn_hash;
2474 struct hci_conn *conn;
2475 int num = 0;
2476
2477 BT_DBG("%s", hdev->name);
2478
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002479 rcu_read_lock();
2480
2481 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002482 struct hci_chan *chan;
2483
2484 if (conn->type != type)
2485 continue;
2486
2487 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2488 continue;
2489
2490 num++;
2491
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002492 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002493 struct sk_buff *skb;
2494
2495 if (chan->sent) {
2496 chan->sent = 0;
2497 continue;
2498 }
2499
2500 if (skb_queue_empty(&chan->data_q))
2501 continue;
2502
2503 skb = skb_peek(&chan->data_q);
2504 if (skb->priority >= HCI_PRIO_MAX - 1)
2505 continue;
2506
2507 skb->priority = HCI_PRIO_MAX - 1;
2508
2509 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2510 skb->priority);
2511 }
2512
2513 if (hci_conn_num(hdev, type) == num)
2514 break;
2515 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002516
2517 rcu_read_unlock();
2518
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002519}
2520
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002521static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2522{
2523 /* Calculate count of blocks used by this packet */
2524 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2525}
2526
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002527static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002528{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 if (!test_bit(HCI_RAW, &hdev->flags)) {
2530 /* ACL tx timeout must be longer than maximum
2531 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002532 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002533 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002534 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002535 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002536}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002537
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002538static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2539{
2540 unsigned int cnt = hdev->acl_cnt;
2541 struct hci_chan *chan;
2542 struct sk_buff *skb;
2543 int quote;
2544
2545 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002546
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002547 while (hdev->acl_cnt &&
2548 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002549 u32 priority = (skb_peek(&chan->data_q))->priority;
2550 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002551 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2552 skb->len, skb->priority);
2553
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002554 /* Stop if priority has changed */
2555 if (skb->priority < priority)
2556 break;
2557
2558 skb = skb_dequeue(&chan->data_q);
2559
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002560 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002561 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002562
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 hci_send_frame(skb);
2564 hdev->acl_last_tx = jiffies;
2565
2566 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002567 chan->sent++;
2568 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 }
2570 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002571
2572 if (cnt != hdev->acl_cnt)
2573 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574}
2575
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002576static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2577{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002578 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002579 struct hci_chan *chan;
2580 struct sk_buff *skb;
2581 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002582
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002583 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002584
2585 while (hdev->block_cnt > 0 &&
2586 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2587 u32 priority = (skb_peek(&chan->data_q))->priority;
2588 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2589 int blocks;
2590
2591 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2592 skb->len, skb->priority);
2593
2594 /* Stop if priority has changed */
2595 if (skb->priority < priority)
2596 break;
2597
2598 skb = skb_dequeue(&chan->data_q);
2599
2600 blocks = __get_blocks(hdev, skb);
2601 if (blocks > hdev->block_cnt)
2602 return;
2603
2604 hci_conn_enter_active_mode(chan->conn,
2605 bt_cb(skb)->force_active);
2606
2607 hci_send_frame(skb);
2608 hdev->acl_last_tx = jiffies;
2609
2610 hdev->block_cnt -= blocks;
2611 quote -= blocks;
2612
2613 chan->sent += blocks;
2614 chan->conn->sent += blocks;
2615 }
2616 }
2617
2618 if (cnt != hdev->block_cnt)
2619 hci_prio_recalculate(hdev, ACL_LINK);
2620}
2621
2622static inline void hci_sched_acl(struct hci_dev *hdev)
2623{
2624 BT_DBG("%s", hdev->name);
2625
2626 if (!hci_conn_num(hdev, ACL_LINK))
2627 return;
2628
2629 switch (hdev->flow_ctl_mode) {
2630 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2631 hci_sched_acl_pkt(hdev);
2632 break;
2633
2634 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2635 hci_sched_acl_blk(hdev);
2636 break;
2637 }
2638}
2639
Linus Torvalds1da177e2005-04-16 15:20:36 -07002640/* Schedule SCO */
2641static inline void hci_sched_sco(struct hci_dev *hdev)
2642{
2643 struct hci_conn *conn;
2644 struct sk_buff *skb;
2645 int quote;
2646
2647 BT_DBG("%s", hdev->name);
2648
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002649 if (!hci_conn_num(hdev, SCO_LINK))
2650 return;
2651
Linus Torvalds1da177e2005-04-16 15:20:36 -07002652 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2653 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2654 BT_DBG("skb %p len %d", skb, skb->len);
2655 hci_send_frame(skb);
2656
2657 conn->sent++;
2658 if (conn->sent == ~0)
2659 conn->sent = 0;
2660 }
2661 }
2662}
2663
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002664static inline void hci_sched_esco(struct hci_dev *hdev)
2665{
2666 struct hci_conn *conn;
2667 struct sk_buff *skb;
2668 int quote;
2669
2670 BT_DBG("%s", hdev->name);
2671
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002672 if (!hci_conn_num(hdev, ESCO_LINK))
2673 return;
2674
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002675 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2676 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2677 BT_DBG("skb %p len %d", skb, skb->len);
2678 hci_send_frame(skb);
2679
2680 conn->sent++;
2681 if (conn->sent == ~0)
2682 conn->sent = 0;
2683 }
2684 }
2685}
2686
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002687static inline void hci_sched_le(struct hci_dev *hdev)
2688{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002689 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002690 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002691 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002692
2693 BT_DBG("%s", hdev->name);
2694
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002695 if (!hci_conn_num(hdev, LE_LINK))
2696 return;
2697
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002698 if (!test_bit(HCI_RAW, &hdev->flags)) {
2699 /* LE tx timeout must be longer than maximum
2700 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002701 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002702 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002703 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002704 }
2705
2706 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002707 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002708 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002709 u32 priority = (skb_peek(&chan->data_q))->priority;
2710 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002711 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2712 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002713
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002714 /* Stop if priority has changed */
2715 if (skb->priority < priority)
2716 break;
2717
2718 skb = skb_dequeue(&chan->data_q);
2719
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002720 hci_send_frame(skb);
2721 hdev->le_last_tx = jiffies;
2722
2723 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002724 chan->sent++;
2725 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002726 }
2727 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002728
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002729 if (hdev->le_pkts)
2730 hdev->le_cnt = cnt;
2731 else
2732 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002733
2734 if (cnt != tmp)
2735 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002736}
2737
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002738static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002740 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741 struct sk_buff *skb;
2742
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002743 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2744 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745
2746 /* Schedule queues and send stuff to HCI driver */
2747
2748 hci_sched_acl(hdev);
2749
2750 hci_sched_sco(hdev);
2751
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002752 hci_sched_esco(hdev);
2753
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002754 hci_sched_le(hdev);
2755
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 /* Send next queued raw (unknown type) packet */
2757 while ((skb = skb_dequeue(&hdev->raw_q)))
2758 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759}
2760
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002761/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762
2763/* ACL data packet */
2764static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2765{
2766 struct hci_acl_hdr *hdr = (void *) skb->data;
2767 struct hci_conn *conn;
2768 __u16 handle, flags;
2769
2770 skb_pull(skb, HCI_ACL_HDR_SIZE);
2771
2772 handle = __le16_to_cpu(hdr->handle);
2773 flags = hci_flags(handle);
2774 handle = hci_handle(handle);
2775
2776 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2777
2778 hdev->stat.acl_rx++;
2779
2780 hci_dev_lock(hdev);
2781 conn = hci_conn_hash_lookup_handle(hdev, handle);
2782 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002783
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002785 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002786
Johan Hedberg671267b2012-05-12 16:11:50 -03002787 hci_dev_lock(hdev);
2788 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2789 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2790 mgmt_device_connected(hdev, &conn->dst, conn->type,
2791 conn->dst_type, 0, NULL, 0,
2792 conn->dev_class);
2793 hci_dev_unlock(hdev);
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002796 l2cap_recv_acldata(conn, skb, flags);
2797 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002799 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 hdev->name, handle);
2801 }
2802
2803 kfree_skb(skb);
2804}
2805
2806/* SCO data packet */
2807static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2808{
2809 struct hci_sco_hdr *hdr = (void *) skb->data;
2810 struct hci_conn *conn;
2811 __u16 handle;
2812
2813 skb_pull(skb, HCI_SCO_HDR_SIZE);
2814
2815 handle = __le16_to_cpu(hdr->handle);
2816
2817 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2818
2819 hdev->stat.sco_rx++;
2820
2821 hci_dev_lock(hdev);
2822 conn = hci_conn_hash_lookup_handle(hdev, handle);
2823 hci_dev_unlock(hdev);
2824
2825 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002827 sco_recv_scodata(conn, skb);
2828 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002830 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 hdev->name, handle);
2832 }
2833
2834 kfree_skb(skb);
2835}
2836
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002837static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002839 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 struct sk_buff *skb;
2841
2842 BT_DBG("%s", hdev->name);
2843
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002845 /* Send copy to monitor */
2846 hci_send_to_monitor(hdev, skb);
2847
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 if (atomic_read(&hdev->promisc)) {
2849 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002850 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 }
2852
2853 if (test_bit(HCI_RAW, &hdev->flags)) {
2854 kfree_skb(skb);
2855 continue;
2856 }
2857
2858 if (test_bit(HCI_INIT, &hdev->flags)) {
2859 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002860 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 case HCI_ACLDATA_PKT:
2862 case HCI_SCODATA_PKT:
2863 kfree_skb(skb);
2864 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 }
2867
2868 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002869 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002871 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 hci_event_packet(hdev, skb);
2873 break;
2874
2875 case HCI_ACLDATA_PKT:
2876 BT_DBG("%s ACL data packet", hdev->name);
2877 hci_acldata_packet(hdev, skb);
2878 break;
2879
2880 case HCI_SCODATA_PKT:
2881 BT_DBG("%s SCO data packet", hdev->name);
2882 hci_scodata_packet(hdev, skb);
2883 break;
2884
2885 default:
2886 kfree_skb(skb);
2887 break;
2888 }
2889 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890}
2891
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002892static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002894 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 struct sk_buff *skb;
2896
2897 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2898
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002900 if (atomic_read(&hdev->cmd_cnt)) {
2901 skb = skb_dequeue(&hdev->cmd_q);
2902 if (!skb)
2903 return;
2904
Wei Yongjun7585b972009-02-25 18:29:52 +08002905 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002907 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2908 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909 atomic_dec(&hdev->cmd_cnt);
2910 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002911 if (test_bit(HCI_RESET, &hdev->flags))
2912 del_timer(&hdev->cmd_timer);
2913 else
2914 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002915 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002916 } else {
2917 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002918 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 }
2920 }
2921}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002922
2923int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2924{
2925 /* General inquiry access code (GIAC) */
2926 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2927 struct hci_cp_inquiry cp;
2928
2929 BT_DBG("%s", hdev->name);
2930
2931 if (test_bit(HCI_INQUIRY, &hdev->flags))
2932 return -EINPROGRESS;
2933
Johan Hedberg46632622012-01-02 16:06:08 +02002934 inquiry_cache_flush(hdev);
2935
Andre Guedes2519a1f2011-11-07 11:45:24 -03002936 memset(&cp, 0, sizeof(cp));
2937 memcpy(&cp.lap, lap, sizeof(cp.lap));
2938 cp.length = length;
2939
2940 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2941}
Andre Guedes023d50492011-11-04 14:16:52 -03002942
2943int hci_cancel_inquiry(struct hci_dev *hdev)
2944{
2945 BT_DBG("%s", hdev->name);
2946
2947 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2948 return -EPERM;
2949
2950 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2951}