blob: e33af63a884a476e689af34a3a56003d2f485f2f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86 struct sk_buff *skb;
87
88 /* Some CSR based controllers generate a spontaneous
89 * reset complete event during init and any pending
90 * command will never be completed. In such a case we
91 * need to resend whatever was the last sent
92 * command.
93 */
94
95 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96 return;
97
98 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99 if (skb) {
100 skb_queue_head(&hdev->cmd_q, skb);
101 queue_work(hdev->workqueue, &hdev->cmd_work);
102 }
103
Johan Hedberg23bb5762010-12-21 23:01:27 +0200104 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114static void hci_req_cancel(struct hci_dev *hdev, int err)
115{
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
122 }
123}
124
125/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900126static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100127 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 DECLARE_WAITQUEUE(wait, current);
130 int err = 0;
131
132 BT_DBG("%s start", hdev->name);
133
134 hdev->req_status = HCI_REQ_PEND;
135
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
138
139 req(hdev, opt);
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return -EINTR;
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700149 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700159 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Johan Hedberga5040ef2011-01-10 13:28:59 +0200161 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
163 BT_DBG("%s end: err %d", hdev->name, err);
164
165 return err;
166}
167
168static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100169 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 int ret;
172
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200173 if (!test_bit(HCI_UP, &hdev->flags))
174 return -ENETDOWN;
175
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176 /* Serialize all requests */
177 hci_req_lock(hdev);
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
180
181 return ret;
182}
183
184static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185{
186 BT_DBG("%s %ld", hdev->name, opt);
187
188 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300189 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
192
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200193static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200195 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800196 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200197 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200199 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 /* Mandatory initialization */
202
203 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300204 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200205 set_bit(HCI_RESET, &hdev->flags);
206 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300207 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208
209 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200212 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220
221 /* Read Class of Device */
222 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223
224 /* Read Local Name */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226
227 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200228 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Optional initialization */
231
232 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200233 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200234 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700237 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200238 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200239
240 bacpy(&cp.bdaddr, BDADDR_ANY);
241 cp.delete_all = 1;
242 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200245static void amp_init(struct hci_dev *hdev)
246{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200247 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200249 /* Reset */
250 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251
252 /* Read Local Version */
253 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254}
255
256static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257{
258 struct sk_buff *skb;
259
260 BT_DBG("%s %ld", hdev->name, opt);
261
262 /* Driver initialization */
263
264 /* Special commands */
265 while ((skb = skb_dequeue(&hdev->driver_init))) {
266 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267 skb->dev = (void *) hdev;
268
269 skb_queue_tail(&hdev->cmd_q, skb);
270 queue_work(hdev->workqueue, &hdev->cmd_work);
271 }
272 skb_queue_purge(&hdev->driver_init);
273
274 switch (hdev->dev_type) {
275 case HCI_BREDR:
276 bredr_init(hdev);
277 break;
278
279 case HCI_AMP:
280 amp_init(hdev);
281 break;
282
283 default:
284 BT_ERR("Unknown device type %d", hdev->dev_type);
285 break;
286 }
287
288}
289
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300290static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291{
292 BT_DBG("%s", hdev->name);
293
294 /* Read LE buffer size */
295 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296}
297
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299{
300 __u8 scan = opt;
301
302 BT_DBG("%s %x", hdev->name, scan);
303
304 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200305 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
308static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309{
310 __u8 auth = opt;
311
312 BT_DBG("%s %x", hdev->name, auth);
313
314 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200315 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316}
317
318static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319{
320 __u8 encrypt = opt;
321
322 BT_DBG("%s %x", hdev->name, encrypt);
323
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200324 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200325 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326}
327
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200328static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329{
330 __le16 policy = cpu_to_le16(opt);
331
Marcel Holtmanna418b892008-11-30 12:17:28 +0100332 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200333
334 /* Default link policy */
335 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336}
337
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900338/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 * Device is held on return. */
340struct hci_dev *hci_dev_get(int index)
341{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200342 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344 BT_DBG("%d", index);
345
346 if (index < 0)
347 return NULL;
348
349 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200350 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 if (d->id == index) {
352 hdev = hci_dev_hold(d);
353 break;
354 }
355 }
356 read_unlock(&hci_dev_list_lock);
357 return hdev;
358}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359
360/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200361
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200362bool hci_discovery_active(struct hci_dev *hdev)
363{
364 struct discovery_state *discov = &hdev->discovery;
365
Andre Guedes6fbe1952012-02-03 17:47:58 -0300366 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300367 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300368 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200369 return true;
370
Andre Guedes6fbe1952012-02-03 17:47:58 -0300371 default:
372 return false;
373 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200374}
375
Johan Hedbergff9ef572012-01-04 14:23:45 +0200376void hci_discovery_set_state(struct hci_dev *hdev, int state)
377{
378 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379
380 if (hdev->discovery.state == state)
381 return;
382
383 switch (state) {
384 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300385 if (hdev->discovery.state != DISCOVERY_STARTING)
386 mgmt_discovering(hdev, 0);
Johan Hedbergf963e8e2012-02-20 23:30:44 +0200387 hdev->discovery.type = 0;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200388 break;
389 case DISCOVERY_STARTING:
390 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300391 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200392 mgmt_discovering(hdev, 1);
393 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394 case DISCOVERY_RESOLVING:
395 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200396 case DISCOVERY_STOPPING:
397 break;
398 }
399
400 hdev->discovery.state = state;
401}
402
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403static void inquiry_cache_flush(struct hci_dev *hdev)
404{
Johan Hedberg30883512012-01-04 14:16:21 +0200405 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200406 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Johan Hedberg561aafb2012-01-04 13:31:59 +0200408 list_for_each_entry_safe(p, n, &cache->all, all) {
409 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412
413 INIT_LIST_HEAD(&cache->unknown);
414 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416
417struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418{
Johan Hedberg30883512012-01-04 14:16:21 +0200419 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
Johan Hedberg561aafb2012-01-04 13:31:59 +0200424 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200426 return e;
427 }
428
429 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
431
Johan Hedberg561aafb2012-01-04 13:31:59 +0200432struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300433 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200434{
Johan Hedberg30883512012-01-04 14:16:21 +0200435 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200436 struct inquiry_entry *e;
437
438 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440 list_for_each_entry(e, &cache->unknown, list) {
441 if (!bacmp(&e->data.bdaddr, bdaddr))
442 return e;
443 }
444
445 return NULL;
446}
447
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200448struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300449 bdaddr_t *bdaddr,
450 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200451{
452 struct discovery_state *cache = &hdev->discovery;
453 struct inquiry_entry *e;
454
455 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457 list_for_each_entry(e, &cache->resolve, list) {
458 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459 return e;
460 if (!bacmp(&e->data.bdaddr, bdaddr))
461 return e;
462 }
463
464 return NULL;
465}
466
Johan Hedberga3d4e202012-01-09 00:53:02 +0200467void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300468 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200469{
470 struct discovery_state *cache = &hdev->discovery;
471 struct list_head *pos = &cache->resolve;
472 struct inquiry_entry *p;
473
474 list_del(&ie->list);
475
476 list_for_each_entry(p, &cache->resolve, list) {
477 if (p->name_state != NAME_PENDING &&
478 abs(p->data.rssi) >= abs(ie->data.rssi))
479 break;
480 pos = &p->list;
481 }
482
483 list_add(&ie->list, pos);
484}
485
Johan Hedberg31754052012-01-04 13:39:52 +0200486bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300487 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Johan Hedberg30883512012-01-04 14:16:21 +0200489 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200490 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
492 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200494 if (ssp)
495 *ssp = data->ssp_mode;
496
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200498 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200499 if (ie->data.ssp_mode && ssp)
500 *ssp = true;
501
Johan Hedberga3d4e202012-01-09 00:53:02 +0200502 if (ie->name_state == NAME_NEEDED &&
503 data->rssi != ie->data.rssi) {
504 ie->data.rssi = data->rssi;
505 hci_inquiry_cache_update_resolve(hdev, ie);
506 }
507
Johan Hedberg561aafb2012-01-04 13:31:59 +0200508 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200509 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200510
Johan Hedberg561aafb2012-01-04 13:31:59 +0200511 /* Entry not in the cache. Add new one. */
512 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200514 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200515
516 list_add(&ie->all, &cache->all);
517
518 if (name_known) {
519 ie->name_state = NAME_KNOWN;
520 } else {
521 ie->name_state = NAME_NOT_KNOWN;
522 list_add(&ie->list, &cache->unknown);
523 }
524
525update:
526 if (name_known && ie->name_state != NAME_KNOWN &&
527 ie->name_state != NAME_PENDING) {
528 ie->name_state = NAME_KNOWN;
529 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530 }
531
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200532 memcpy(&ie->data, data, sizeof(*data));
533 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200535
536 if (ie->name_state == NAME_NOT_KNOWN)
537 return false;
538
539 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540}
541
542static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543{
Johan Hedberg30883512012-01-04 14:16:21 +0200544 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 struct inquiry_info *info = (struct inquiry_info *) buf;
546 struct inquiry_entry *e;
547 int copied = 0;
548
Johan Hedberg561aafb2012-01-04 13:31:59 +0200549 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200551
552 if (copied >= num)
553 break;
554
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 bacpy(&info->bdaddr, &data->bdaddr);
556 info->pscan_rep_mode = data->pscan_rep_mode;
557 info->pscan_period_mode = data->pscan_period_mode;
558 info->pscan_mode = data->pscan_mode;
559 memcpy(info->dev_class, data->dev_class, 3);
560 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200561
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200563 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564 }
565
566 BT_DBG("cache %p, copied %d", cache, copied);
567 return copied;
568}
569
570static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571{
572 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573 struct hci_cp_inquiry cp;
574
575 BT_DBG("%s", hdev->name);
576
577 if (test_bit(HCI_INQUIRY, &hdev->flags))
578 return;
579
580 /* Start Inquiry */
581 memcpy(&cp.lap, &ir->lap, 3);
582 cp.length = ir->length;
583 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200584 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585}
586
587int hci_inquiry(void __user *arg)
588{
589 __u8 __user *ptr = arg;
590 struct hci_inquiry_req ir;
591 struct hci_dev *hdev;
592 int err = 0, do_inquiry = 0, max_rsp;
593 long timeo;
594 __u8 *buf;
595
596 if (copy_from_user(&ir, ptr, sizeof(ir)))
597 return -EFAULT;
598
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200599 hdev = hci_dev_get(ir.dev_id);
600 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 return -ENODEV;
602
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300603 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900604 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200605 inquiry_cache_empty(hdev) ||
606 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 inquiry_cache_flush(hdev);
608 do_inquiry = 1;
609 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300610 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611
Marcel Holtmann04837f62006-07-03 10:02:33 +0200612 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200613
614 if (do_inquiry) {
615 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616 if (err < 0)
617 goto done;
618 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619
620 /* for unlimited number of responses we will use buffer with 255 entries */
621 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624 * copy it to the user space.
625 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100626 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200627 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 err = -ENOMEM;
629 goto done;
630 }
631
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300632 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300634 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635
636 BT_DBG("num_rsp %d", ir.num_rsp);
637
638 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639 ptr += sizeof(ir);
640 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641 ir.num_rsp))
642 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900643 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644 err = -EFAULT;
645
646 kfree(buf);
647
648done:
649 hci_dev_put(hdev);
650 return err;
651}
652
653/* ---- HCI ioctl helpers ---- */
654
655int hci_dev_open(__u16 dev)
656{
657 struct hci_dev *hdev;
658 int ret = 0;
659
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200660 hdev = hci_dev_get(dev);
661 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700662 return -ENODEV;
663
664 BT_DBG("%s %p", hdev->name, hdev);
665
666 hci_req_lock(hdev);
667
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200668 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
669 ret = -ERFKILL;
670 goto done;
671 }
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673 if (test_bit(HCI_UP, &hdev->flags)) {
674 ret = -EALREADY;
675 goto done;
676 }
677
678 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
679 set_bit(HCI_RAW, &hdev->flags);
680
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200681 /* Treat all non BR/EDR controllers as raw devices if
682 enable_hs is not set */
683 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100684 set_bit(HCI_RAW, &hdev->flags);
685
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 if (hdev->open(hdev)) {
687 ret = -EIO;
688 goto done;
689 }
690
691 if (!test_bit(HCI_RAW, &hdev->flags)) {
692 atomic_set(&hdev->cmd_cnt, 1);
693 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200694 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Marcel Holtmann04837f62006-07-03 10:02:33 +0200696 ret = __hci_request(hdev, hci_init_req, 0,
697 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Andre Guedeseead27d2011-06-30 19:20:55 -0300699 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300700 ret = __hci_request(hdev, hci_le_init_req, 0,
701 msecs_to_jiffies(HCI_INIT_TIMEOUT));
702
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703 clear_bit(HCI_INIT, &hdev->flags);
704 }
705
706 if (!ret) {
707 hci_dev_hold(hdev);
708 set_bit(HCI_UP, &hdev->flags);
709 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200710 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300711 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200712 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300713 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200714 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900715 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200717 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200718 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400719 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
721 skb_queue_purge(&hdev->cmd_q);
722 skb_queue_purge(&hdev->rx_q);
723
724 if (hdev->flush)
725 hdev->flush(hdev);
726
727 if (hdev->sent_cmd) {
728 kfree_skb(hdev->sent_cmd);
729 hdev->sent_cmd = NULL;
730 }
731
732 hdev->close(hdev);
733 hdev->flags = 0;
734 }
735
736done:
737 hci_req_unlock(hdev);
738 hci_dev_put(hdev);
739 return ret;
740}
741
742static int hci_dev_do_close(struct hci_dev *hdev)
743{
744 BT_DBG("%s %p", hdev->name, hdev);
745
Andre Guedes28b75a82012-02-03 17:48:00 -0300746 cancel_work_sync(&hdev->le_scan);
747
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748 hci_req_cancel(hdev, ENODEV);
749 hci_req_lock(hdev);
750
751 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300752 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753 hci_req_unlock(hdev);
754 return 0;
755 }
756
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200757 /* Flush RX and TX works */
758 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400759 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700760
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200761 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200762 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200763 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200764 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200765 }
766
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200767 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200768 cancel_delayed_work(&hdev->service_cache);
769
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300770 cancel_delayed_work_sync(&hdev->le_scan_disable);
771
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300772 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 inquiry_cache_flush(hdev);
774 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300775 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700776
777 hci_notify(hdev, HCI_DEV_DOWN);
778
779 if (hdev->flush)
780 hdev->flush(hdev);
781
782 /* Reset device */
783 skb_queue_purge(&hdev->cmd_q);
784 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200785 if (!test_bit(HCI_RAW, &hdev->flags) &&
786 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200788 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200789 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790 clear_bit(HCI_INIT, &hdev->flags);
791 }
792
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200793 /* flush cmd work */
794 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795
796 /* Drop queues */
797 skb_queue_purge(&hdev->rx_q);
798 skb_queue_purge(&hdev->cmd_q);
799 skb_queue_purge(&hdev->raw_q);
800
801 /* Drop last sent command */
802 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300803 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 kfree_skb(hdev->sent_cmd);
805 hdev->sent_cmd = NULL;
806 }
807
808 /* After this point our queues are empty
809 * and no tasks are scheduled. */
810 hdev->close(hdev);
811
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100812 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
813 hci_dev_lock(hdev);
814 mgmt_powered(hdev, 0);
815 hci_dev_unlock(hdev);
816 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 /* Clear flags */
819 hdev->flags = 0;
820
Johan Hedberge59fda82012-02-22 18:11:53 +0200821 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200822 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 hci_req_unlock(hdev);
825
826 hci_dev_put(hdev);
827 return 0;
828}
829
830int hci_dev_close(__u16 dev)
831{
832 struct hci_dev *hdev;
833 int err;
834
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200835 hdev = hci_dev_get(dev);
836 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100838
839 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
840 cancel_delayed_work(&hdev->power_off);
841
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100843
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 hci_dev_put(hdev);
845 return err;
846}
847
848int hci_dev_reset(__u16 dev)
849{
850 struct hci_dev *hdev;
851 int ret = 0;
852
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200853 hdev = hci_dev_get(dev);
854 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 return -ENODEV;
856
857 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700858
859 if (!test_bit(HCI_UP, &hdev->flags))
860 goto done;
861
862 /* Drop queues */
863 skb_queue_purge(&hdev->rx_q);
864 skb_queue_purge(&hdev->cmd_q);
865
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300866 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 inquiry_cache_flush(hdev);
868 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300869 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 if (hdev->flush)
872 hdev->flush(hdev);
873
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900874 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300875 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876
877 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200878 ret = __hci_request(hdev, hci_reset_req, 0,
879 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 hci_req_unlock(hdev);
883 hci_dev_put(hdev);
884 return ret;
885}
886
887int hci_dev_reset_stat(__u16 dev)
888{
889 struct hci_dev *hdev;
890 int ret = 0;
891
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200892 hdev = hci_dev_get(dev);
893 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700894 return -ENODEV;
895
896 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
897
898 hci_dev_put(hdev);
899
900 return ret;
901}
902
903int hci_dev_cmd(unsigned int cmd, void __user *arg)
904{
905 struct hci_dev *hdev;
906 struct hci_dev_req dr;
907 int err = 0;
908
909 if (copy_from_user(&dr, arg, sizeof(dr)))
910 return -EFAULT;
911
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200912 hdev = hci_dev_get(dr.dev_id);
913 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 return -ENODEV;
915
916 switch (cmd) {
917 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200918 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 break;
921
922 case HCISETENCRYPT:
923 if (!lmp_encrypt_capable(hdev)) {
924 err = -EOPNOTSUPP;
925 break;
926 }
927
928 if (!test_bit(HCI_AUTH, &hdev->flags)) {
929 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200930 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
931 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 if (err)
933 break;
934 }
935
Marcel Holtmann04837f62006-07-03 10:02:33 +0200936 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
937 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700938 break;
939
940 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200941 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
942 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943 break;
944
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200945 case HCISETLINKPOL:
946 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
948 break;
949
950 case HCISETLINKMODE:
951 hdev->link_mode = ((__u16) dr.dev_opt) &
952 (HCI_LM_MASTER | HCI_LM_ACCEPT);
953 break;
954
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 case HCISETPTYPE:
956 hdev->pkt_type = (__u16) dr.dev_opt;
957 break;
958
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200960 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
961 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 break;
963
964 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200965 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
966 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 break;
968
969 default:
970 err = -EINVAL;
971 break;
972 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 hci_dev_put(hdev);
975 return err;
976}
977
978int hci_get_dev_list(void __user *arg)
979{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200980 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 struct hci_dev_list_req *dl;
982 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 int n = 0, size, err;
984 __u16 dev_num;
985
986 if (get_user(dev_num, (__u16 __user *) arg))
987 return -EFAULT;
988
989 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
990 return -EINVAL;
991
992 size = sizeof(*dl) + dev_num * sizeof(*dr);
993
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200994 dl = kzalloc(size, GFP_KERNEL);
995 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 return -ENOMEM;
997
998 dr = dl->dev_req;
999
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001000 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001001 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001002 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001003 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001004
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001005 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1006 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 (dr + n)->dev_id = hdev->id;
1009 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001010
Linus Torvalds1da177e2005-04-16 15:20:36 -07001011 if (++n >= dev_num)
1012 break;
1013 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001014 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015
1016 dl->dev_num = n;
1017 size = sizeof(*dl) + n * sizeof(*dr);
1018
1019 err = copy_to_user(arg, dl, size);
1020 kfree(dl);
1021
1022 return err ? -EFAULT : 0;
1023}
1024
1025int hci_get_dev_info(void __user *arg)
1026{
1027 struct hci_dev *hdev;
1028 struct hci_dev_info di;
1029 int err = 0;
1030
1031 if (copy_from_user(&di, arg, sizeof(di)))
1032 return -EFAULT;
1033
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001034 hdev = hci_dev_get(di.dev_id);
1035 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 return -ENODEV;
1037
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001038 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001039 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001040
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001041 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1042 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001043
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 strcpy(di.name, hdev->name);
1045 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001046 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 di.flags = hdev->flags;
1048 di.pkt_type = hdev->pkt_type;
1049 di.acl_mtu = hdev->acl_mtu;
1050 di.acl_pkts = hdev->acl_pkts;
1051 di.sco_mtu = hdev->sco_mtu;
1052 di.sco_pkts = hdev->sco_pkts;
1053 di.link_policy = hdev->link_policy;
1054 di.link_mode = hdev->link_mode;
1055
1056 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1057 memcpy(&di.features, &hdev->features, sizeof(di.features));
1058
1059 if (copy_to_user(arg, &di, sizeof(di)))
1060 err = -EFAULT;
1061
1062 hci_dev_put(hdev);
1063
1064 return err;
1065}
1066
1067/* ---- Interface to HCI drivers ---- */
1068
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001069static int hci_rfkill_set_block(void *data, bool blocked)
1070{
1071 struct hci_dev *hdev = data;
1072
1073 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1074
1075 if (!blocked)
1076 return 0;
1077
1078 hci_dev_do_close(hdev);
1079
1080 return 0;
1081}
1082
1083static const struct rfkill_ops hci_rfkill_ops = {
1084 .set_block = hci_rfkill_set_block,
1085};
1086
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087/* Alloc HCI device */
1088struct hci_dev *hci_alloc_dev(void)
1089{
1090 struct hci_dev *hdev;
1091
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001092 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 if (!hdev)
1094 return NULL;
1095
David Herrmann0ac7e702011-10-08 14:58:47 +02001096 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097 skb_queue_head_init(&hdev->driver_init);
1098
1099 return hdev;
1100}
1101EXPORT_SYMBOL(hci_alloc_dev);
1102
1103/* Free HCI device */
1104void hci_free_dev(struct hci_dev *hdev)
1105{
1106 skb_queue_purge(&hdev->driver_init);
1107
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001108 /* will free via device release */
1109 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110}
1111EXPORT_SYMBOL(hci_free_dev);
1112
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001113static void hci_power_on(struct work_struct *work)
1114{
1115 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1116
1117 BT_DBG("%s", hdev->name);
1118
1119 if (hci_dev_open(hdev->id) < 0)
1120 return;
1121
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001122 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001123 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001124 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001125
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001126 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001127 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001128}
1129
1130static void hci_power_off(struct work_struct *work)
1131{
Johan Hedberg32435532011-11-07 22:16:04 +02001132 struct hci_dev *hdev = container_of(work, struct hci_dev,
1133 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001134
1135 BT_DBG("%s", hdev->name);
1136
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001137 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001138}
1139
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001140static void hci_discov_off(struct work_struct *work)
1141{
1142 struct hci_dev *hdev;
1143 u8 scan = SCAN_PAGE;
1144
1145 hdev = container_of(work, struct hci_dev, discov_off.work);
1146
1147 BT_DBG("%s", hdev->name);
1148
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001149 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001150
1151 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1152
1153 hdev->discov_timeout = 0;
1154
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001155 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001156}
1157
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001158int hci_uuids_clear(struct hci_dev *hdev)
1159{
1160 struct list_head *p, *n;
1161
1162 list_for_each_safe(p, n, &hdev->uuids) {
1163 struct bt_uuid *uuid;
1164
1165 uuid = list_entry(p, struct bt_uuid, list);
1166
1167 list_del(p);
1168 kfree(uuid);
1169 }
1170
1171 return 0;
1172}
1173
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001174int hci_link_keys_clear(struct hci_dev *hdev)
1175{
1176 struct list_head *p, *n;
1177
1178 list_for_each_safe(p, n, &hdev->link_keys) {
1179 struct link_key *key;
1180
1181 key = list_entry(p, struct link_key, list);
1182
1183 list_del(p);
1184 kfree(key);
1185 }
1186
1187 return 0;
1188}
1189
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001190int hci_smp_ltks_clear(struct hci_dev *hdev)
1191{
1192 struct smp_ltk *k, *tmp;
1193
1194 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1195 list_del(&k->list);
1196 kfree(k);
1197 }
1198
1199 return 0;
1200}
1201
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001202struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1203{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001204 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001205
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001206 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001207 if (bacmp(bdaddr, &k->bdaddr) == 0)
1208 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001209
1210 return NULL;
1211}
1212
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001213static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1214 u8 key_type, u8 old_key_type)
1215{
1216 /* Legacy key */
1217 if (key_type < 0x03)
1218 return 1;
1219
1220 /* Debug keys are insecure so don't store them persistently */
1221 if (key_type == HCI_LK_DEBUG_COMBINATION)
1222 return 0;
1223
1224 /* Changed combination key and there's no previous one */
1225 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1226 return 0;
1227
1228 /* Security mode 3 case */
1229 if (!conn)
1230 return 1;
1231
1232 /* Neither local nor remote side had no-bonding as requirement */
1233 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1234 return 1;
1235
1236 /* Local side had dedicated bonding as requirement */
1237 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1238 return 1;
1239
1240 /* Remote side had dedicated bonding as requirement */
1241 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1242 return 1;
1243
1244 /* If none of the above criteria match, then don't store the key
1245 * persistently */
1246 return 0;
1247}
1248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001249struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001250{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001251 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001252
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001253 list_for_each_entry(k, &hdev->long_term_keys, list) {
1254 if (k->ediv != ediv ||
1255 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256 continue;
1257
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001258 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001259 }
1260
1261 return NULL;
1262}
1263EXPORT_SYMBOL(hci_find_ltk);
1264
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001265struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001266 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001267{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001268 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001269
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001270 list_for_each_entry(k, &hdev->long_term_keys, list)
1271 if (addr_type == k->bdaddr_type &&
1272 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273 return k;
1274
1275 return NULL;
1276}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001277EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001278
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001279int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001280 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001281{
1282 struct link_key *key, *old_key;
Johan Hedberg4df378a2011-04-28 11:29:03 -07001283 u8 old_key_type, persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001284
1285 old_key = hci_find_link_key(hdev, bdaddr);
1286 if (old_key) {
1287 old_key_type = old_key->type;
1288 key = old_key;
1289 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001290 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001291 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1292 if (!key)
1293 return -ENOMEM;
1294 list_add(&key->list, &hdev->link_keys);
1295 }
1296
1297 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1298
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001299 /* Some buggy controller combinations generate a changed
1300 * combination key for legacy pairing even when there's no
1301 * previous key */
1302 if (type == HCI_LK_CHANGED_COMBINATION &&
1303 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001304 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001305 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001306 if (conn)
1307 conn->key_type = type;
1308 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001309
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001310 bacpy(&key->bdaddr, bdaddr);
1311 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001312 key->pin_len = pin_len;
1313
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001314 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001315 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001316 else
1317 key->type = type;
1318
Johan Hedberg4df378a2011-04-28 11:29:03 -07001319 if (!new_key)
1320 return 0;
1321
1322 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1323
Johan Hedberg744cf192011-11-08 20:40:14 +02001324 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001325
1326 if (!persistent) {
1327 list_del(&key->list);
1328 kfree(key);
1329 }
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001330
1331 return 0;
1332}
1333
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001334int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001335 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1336 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001337{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001338 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001339
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001340 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1341 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001342
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001343 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1344 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001346 else {
1347 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001348 if (!key)
1349 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001350 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001351 }
1352
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001353 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001354 key->bdaddr_type = addr_type;
1355 memcpy(key->val, tk, sizeof(key->val));
1356 key->authenticated = authenticated;
1357 key->ediv = ediv;
1358 key->enc_size = enc_size;
1359 key->type = type;
1360 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001361
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001362 if (!new_key)
1363 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001364
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001365 if (type & HCI_SMP_LTK)
1366 mgmt_new_ltk(hdev, key, 1);
1367
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001368 return 0;
1369}
1370
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001371int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1372{
1373 struct link_key *key;
1374
1375 key = hci_find_link_key(hdev, bdaddr);
1376 if (!key)
1377 return -ENOENT;
1378
1379 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1380
1381 list_del(&key->list);
1382 kfree(key);
1383
1384 return 0;
1385}
1386
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001387int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1388{
1389 struct smp_ltk *k, *tmp;
1390
1391 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1392 if (bacmp(bdaddr, &k->bdaddr))
1393 continue;
1394
1395 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1396
1397 list_del(&k->list);
1398 kfree(k);
1399 }
1400
1401 return 0;
1402}
1403
Ville Tervo6bd32322011-02-16 16:32:41 +02001404/* HCI command timer function */
1405static void hci_cmd_timer(unsigned long arg)
1406{
1407 struct hci_dev *hdev = (void *) arg;
1408
1409 BT_ERR("%s command tx timeout", hdev->name);
1410 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001411 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001412}
1413
Szymon Janc2763eda2011-03-22 13:12:22 +01001414struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001415 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001416{
1417 struct oob_data *data;
1418
1419 list_for_each_entry(data, &hdev->remote_oob_data, list)
1420 if (bacmp(bdaddr, &data->bdaddr) == 0)
1421 return data;
1422
1423 return NULL;
1424}
1425
1426int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1427{
1428 struct oob_data *data;
1429
1430 data = hci_find_remote_oob_data(hdev, bdaddr);
1431 if (!data)
1432 return -ENOENT;
1433
1434 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1435
1436 list_del(&data->list);
1437 kfree(data);
1438
1439 return 0;
1440}
1441
1442int hci_remote_oob_data_clear(struct hci_dev *hdev)
1443{
1444 struct oob_data *data, *n;
1445
1446 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1447 list_del(&data->list);
1448 kfree(data);
1449 }
1450
1451 return 0;
1452}
1453
1454int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001455 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001456{
1457 struct oob_data *data;
1458
1459 data = hci_find_remote_oob_data(hdev, bdaddr);
1460
1461 if (!data) {
1462 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1463 if (!data)
1464 return -ENOMEM;
1465
1466 bacpy(&data->bdaddr, bdaddr);
1467 list_add(&data->list, &hdev->remote_oob_data);
1468 }
1469
1470 memcpy(data->hash, hash, sizeof(data->hash));
1471 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1472
1473 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1474
1475 return 0;
1476}
1477
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001478struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001479{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001480 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001481
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001482 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001483 if (bacmp(bdaddr, &b->bdaddr) == 0)
1484 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485
1486 return NULL;
1487}
1488
1489int hci_blacklist_clear(struct hci_dev *hdev)
1490{
1491 struct list_head *p, *n;
1492
1493 list_for_each_safe(p, n, &hdev->blacklist) {
1494 struct bdaddr_list *b;
1495
1496 b = list_entry(p, struct bdaddr_list, list);
1497
1498 list_del(p);
1499 kfree(b);
1500 }
1501
1502 return 0;
1503}
1504
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001505int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506{
1507 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508
1509 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1510 return -EBADF;
1511
Antti Julku5e762442011-08-25 16:48:02 +03001512 if (hci_blacklist_lookup(hdev, bdaddr))
1513 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514
1515 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001516 if (!entry)
1517 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001518
1519 bacpy(&entry->bdaddr, bdaddr);
1520
1521 list_add(&entry->list, &hdev->blacklist);
1522
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001523 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001524}
1525
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001526int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001527{
1528 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001529
Szymon Janc1ec918c2011-11-16 09:32:21 +01001530 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001531 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001532
1533 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001534 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001535 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001536
1537 list_del(&entry->list);
1538 kfree(entry);
1539
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001540 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001541}
1542
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001543static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001544{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001545 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001546 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001547
1548 hci_dev_lock(hdev);
1549
1550 hci_adv_entries_clear(hdev);
1551
1552 hci_dev_unlock(hdev);
1553}
1554
Andre Guedes76c86862011-05-26 16:23:50 -03001555int hci_adv_entries_clear(struct hci_dev *hdev)
1556{
1557 struct adv_entry *entry, *tmp;
1558
1559 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1560 list_del(&entry->list);
1561 kfree(entry);
1562 }
1563
1564 BT_DBG("%s adv cache cleared", hdev->name);
1565
1566 return 0;
1567}
1568
1569struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1570{
1571 struct adv_entry *entry;
1572
1573 list_for_each_entry(entry, &hdev->adv_entries, list)
1574 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1575 return entry;
1576
1577 return NULL;
1578}
1579
1580static inline int is_connectable_adv(u8 evt_type)
1581{
1582 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1583 return 1;
1584
1585 return 0;
1586}
1587
1588int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001589 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001590 return -EINVAL;
1591
1592 /* Only new entries should be added to adv_entries. So, if
1593 * bdaddr was found, don't add it. */
1594 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1595 return 0;
1596
Andre Guedes4777bfd2012-01-30 23:31:28 -03001597 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001598 if (!entry)
1599 return -ENOMEM;
1600
1601 bacpy(&entry->bdaddr, &ev->bdaddr);
1602 entry->bdaddr_type = ev->bdaddr_type;
1603
1604 list_add(&entry->list, &hdev->adv_entries);
1605
1606 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1607 batostr(&entry->bdaddr), entry->bdaddr_type);
1608
1609 return 0;
1610}
1611
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001612static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1613{
1614 struct le_scan_params *param = (struct le_scan_params *) opt;
1615 struct hci_cp_le_set_scan_param cp;
1616
1617 memset(&cp, 0, sizeof(cp));
1618 cp.type = param->type;
1619 cp.interval = cpu_to_le16(param->interval);
1620 cp.window = cpu_to_le16(param->window);
1621
1622 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1623}
1624
1625static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1626{
1627 struct hci_cp_le_set_scan_enable cp;
1628
1629 memset(&cp, 0, sizeof(cp));
1630 cp.enable = 1;
1631
1632 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1633}
1634
1635static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001636 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001637{
1638 long timeo = msecs_to_jiffies(3000);
1639 struct le_scan_params param;
1640 int err;
1641
1642 BT_DBG("%s", hdev->name);
1643
1644 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1645 return -EINPROGRESS;
1646
1647 param.type = type;
1648 param.interval = interval;
1649 param.window = window;
1650
1651 hci_req_lock(hdev);
1652
1653 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001654 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001655 if (!err)
1656 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1657
1658 hci_req_unlock(hdev);
1659
1660 if (err < 0)
1661 return err;
1662
1663 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001664 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001665
1666 return 0;
1667}
1668
1669static void le_scan_disable_work(struct work_struct *work)
1670{
1671 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001672 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001673 struct hci_cp_le_set_scan_enable cp;
1674
1675 BT_DBG("%s", hdev->name);
1676
1677 memset(&cp, 0, sizeof(cp));
1678
1679 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1680}
1681
Andre Guedes28b75a82012-02-03 17:48:00 -03001682static void le_scan_work(struct work_struct *work)
1683{
1684 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1685 struct le_scan_params *param = &hdev->le_scan_params;
1686
1687 BT_DBG("%s", hdev->name);
1688
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001689 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1690 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001691}
1692
1693int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001694 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001695{
1696 struct le_scan_params *param = &hdev->le_scan_params;
1697
1698 BT_DBG("%s", hdev->name);
1699
1700 if (work_busy(&hdev->le_scan))
1701 return -EINPROGRESS;
1702
1703 param->type = type;
1704 param->interval = interval;
1705 param->window = window;
1706 param->timeout = timeout;
1707
1708 queue_work(system_long_wq, &hdev->le_scan);
1709
1710 return 0;
1711}
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713/* Register HCI device */
1714int hci_register_dev(struct hci_dev *hdev)
1715{
1716 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001717 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
David Herrmann010666a2012-01-07 15:47:07 +01001721 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 return -EINVAL;
1723
Mat Martineau08add512011-11-02 16:18:36 -07001724 /* Do not allow HCI_AMP devices to register at index 0,
1725 * so the index can be used as the AMP controller ID.
1726 */
1727 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1728
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001729 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
1731 /* Find first available device id */
1732 list_for_each(p, &hci_dev_list) {
1733 if (list_entry(p, struct hci_dev, list)->id != id)
1734 break;
1735 head = p; id++;
1736 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738 sprintf(hdev->name, "hci%d", id);
1739 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001740 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001742 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
1744 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001745 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001747 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001749 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
Marcel Holtmann04837f62006-07-03 10:02:33 +02001751 hdev->idle_timeout = 0;
1752 hdev->sniff_max_interval = 800;
1753 hdev->sniff_min_interval = 80;
1754
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001755 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001756 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001757 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 skb_queue_head_init(&hdev->rx_q);
1761 skb_queue_head_init(&hdev->cmd_q);
1762 skb_queue_head_init(&hdev->raw_q);
1763
Ville Tervo6bd32322011-02-16 16:32:41 +02001764 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1765
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301766 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001767 hdev->reassembly[i] = NULL;
1768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001770 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Johan Hedberg30883512012-01-04 14:16:21 +02001772 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 hci_conn_hash_init(hdev);
1775
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001776 INIT_LIST_HEAD(&hdev->mgmt_pending);
1777
David Millerea4bd8b2010-07-30 21:54:49 -07001778 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001779
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001780 INIT_LIST_HEAD(&hdev->uuids);
1781
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001782 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001783 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001784
Szymon Janc2763eda2011-03-22 13:12:22 +01001785 INIT_LIST_HEAD(&hdev->remote_oob_data);
1786
Andre Guedes76c86862011-05-26 16:23:50 -03001787 INIT_LIST_HEAD(&hdev->adv_entries);
1788
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001789 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001790 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001791 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001792
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001793 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1796
1797 atomic_set(&hdev->promisc, 0);
1798
Andre Guedes28b75a82012-02-03 17:48:00 -03001799 INIT_WORK(&hdev->le_scan, le_scan_work);
1800
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001801 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1802
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001803 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001805 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1806 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001807 if (!hdev->workqueue) {
1808 error = -ENOMEM;
1809 goto err;
1810 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001811
David Herrmann33ca9542011-10-08 14:58:49 +02001812 error = hci_add_sysfs(hdev);
1813 if (error < 0)
1814 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001816 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1817 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1818 if (hdev->rfkill) {
1819 if (rfkill_register(hdev->rfkill) < 0) {
1820 rfkill_destroy(hdev->rfkill);
1821 hdev->rfkill = NULL;
1822 }
1823 }
1824
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001825 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1826 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001827 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001828
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001830 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
1832 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001833
David Herrmann33ca9542011-10-08 14:58:49 +02001834err_wqueue:
1835 destroy_workqueue(hdev->workqueue);
1836err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001837 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001838 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001839 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001840
David Herrmann33ca9542011-10-08 14:58:49 +02001841 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842}
1843EXPORT_SYMBOL(hci_register_dev);
1844
1845/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001846void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847{
Marcel Holtmannef222012007-07-11 06:42:04 +02001848 int i;
1849
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001850 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001851
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001852 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001853 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001854 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855
1856 hci_dev_do_close(hdev);
1857
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301858 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001859 kfree_skb(hdev->reassembly[i]);
1860
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001861 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001862 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001863 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001864 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001865 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001866 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001867
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001868 /* mgmt_index_removed should take care of emptying the
1869 * pending list */
1870 BUG_ON(!list_empty(&hdev->mgmt_pending));
1871
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 hci_notify(hdev, HCI_DEV_UNREG);
1873
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001874 if (hdev->rfkill) {
1875 rfkill_unregister(hdev->rfkill);
1876 rfkill_destroy(hdev->rfkill);
1877 }
1878
David Herrmannce242972011-10-08 14:58:48 +02001879 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001880
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001881 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001882
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001883 destroy_workqueue(hdev->workqueue);
1884
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001885 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001886 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001887 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001888 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001889 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001890 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001891 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001893
David Herrmanndc946bd2012-01-07 15:47:24 +01001894 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001895}
1896EXPORT_SYMBOL(hci_unregister_dev);
1897
1898/* Suspend HCI device */
1899int hci_suspend_dev(struct hci_dev *hdev)
1900{
1901 hci_notify(hdev, HCI_DEV_SUSPEND);
1902 return 0;
1903}
1904EXPORT_SYMBOL(hci_suspend_dev);
1905
1906/* Resume HCI device */
1907int hci_resume_dev(struct hci_dev *hdev)
1908{
1909 hci_notify(hdev, HCI_DEV_RESUME);
1910 return 0;
1911}
1912EXPORT_SYMBOL(hci_resume_dev);
1913
Marcel Holtmann76bca882009-11-18 00:40:39 +01001914/* Receive frame from HCI drivers */
1915int hci_recv_frame(struct sk_buff *skb)
1916{
1917 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1918 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1919 && !test_bit(HCI_INIT, &hdev->flags))) {
1920 kfree_skb(skb);
1921 return -ENXIO;
1922 }
1923
1924 /* Incomming skb */
1925 bt_cb(skb)->incoming = 1;
1926
1927 /* Time stamp */
1928 __net_timestamp(skb);
1929
Marcel Holtmann76bca882009-11-18 00:40:39 +01001930 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001931 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001932
Marcel Holtmann76bca882009-11-18 00:40:39 +01001933 return 0;
1934}
1935EXPORT_SYMBOL(hci_recv_frame);
1936
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301937static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001938 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301939{
1940 int len = 0;
1941 int hlen = 0;
1942 int remain = count;
1943 struct sk_buff *skb;
1944 struct bt_skb_cb *scb;
1945
1946 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1947 index >= NUM_REASSEMBLY)
1948 return -EILSEQ;
1949
1950 skb = hdev->reassembly[index];
1951
1952 if (!skb) {
1953 switch (type) {
1954 case HCI_ACLDATA_PKT:
1955 len = HCI_MAX_FRAME_SIZE;
1956 hlen = HCI_ACL_HDR_SIZE;
1957 break;
1958 case HCI_EVENT_PKT:
1959 len = HCI_MAX_EVENT_SIZE;
1960 hlen = HCI_EVENT_HDR_SIZE;
1961 break;
1962 case HCI_SCODATA_PKT:
1963 len = HCI_MAX_SCO_SIZE;
1964 hlen = HCI_SCO_HDR_SIZE;
1965 break;
1966 }
1967
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001968 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301969 if (!skb)
1970 return -ENOMEM;
1971
1972 scb = (void *) skb->cb;
1973 scb->expect = hlen;
1974 scb->pkt_type = type;
1975
1976 skb->dev = (void *) hdev;
1977 hdev->reassembly[index] = skb;
1978 }
1979
1980 while (count) {
1981 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001982 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301983
1984 memcpy(skb_put(skb, len), data, len);
1985
1986 count -= len;
1987 data += len;
1988 scb->expect -= len;
1989 remain = count;
1990
1991 switch (type) {
1992 case HCI_EVENT_PKT:
1993 if (skb->len == HCI_EVENT_HDR_SIZE) {
1994 struct hci_event_hdr *h = hci_event_hdr(skb);
1995 scb->expect = h->plen;
1996
1997 if (skb_tailroom(skb) < scb->expect) {
1998 kfree_skb(skb);
1999 hdev->reassembly[index] = NULL;
2000 return -ENOMEM;
2001 }
2002 }
2003 break;
2004
2005 case HCI_ACLDATA_PKT:
2006 if (skb->len == HCI_ACL_HDR_SIZE) {
2007 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2008 scb->expect = __le16_to_cpu(h->dlen);
2009
2010 if (skb_tailroom(skb) < scb->expect) {
2011 kfree_skb(skb);
2012 hdev->reassembly[index] = NULL;
2013 return -ENOMEM;
2014 }
2015 }
2016 break;
2017
2018 case HCI_SCODATA_PKT:
2019 if (skb->len == HCI_SCO_HDR_SIZE) {
2020 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2021 scb->expect = h->dlen;
2022
2023 if (skb_tailroom(skb) < scb->expect) {
2024 kfree_skb(skb);
2025 hdev->reassembly[index] = NULL;
2026 return -ENOMEM;
2027 }
2028 }
2029 break;
2030 }
2031
2032 if (scb->expect == 0) {
2033 /* Complete frame */
2034
2035 bt_cb(skb)->pkt_type = type;
2036 hci_recv_frame(skb);
2037
2038 hdev->reassembly[index] = NULL;
2039 return remain;
2040 }
2041 }
2042
2043 return remain;
2044}
2045
Marcel Holtmannef222012007-07-11 06:42:04 +02002046int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2047{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302048 int rem = 0;
2049
Marcel Holtmannef222012007-07-11 06:42:04 +02002050 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2051 return -EILSEQ;
2052
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002053 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002054 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302055 if (rem < 0)
2056 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002057
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302058 data += (count - rem);
2059 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002060 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002061
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302062 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002063}
2064EXPORT_SYMBOL(hci_recv_fragment);
2065
Suraj Sumangala99811512010-07-14 13:02:19 +05302066#define STREAM_REASSEMBLY 0
2067
2068int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2069{
2070 int type;
2071 int rem = 0;
2072
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002073 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302074 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2075
2076 if (!skb) {
2077 struct { char type; } *pkt;
2078
2079 /* Start of the frame */
2080 pkt = data;
2081 type = pkt->type;
2082
2083 data++;
2084 count--;
2085 } else
2086 type = bt_cb(skb)->pkt_type;
2087
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002088 rem = hci_reassembly(hdev, type, data, count,
2089 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302090 if (rem < 0)
2091 return rem;
2092
2093 data += (count - rem);
2094 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002095 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302096
2097 return rem;
2098}
2099EXPORT_SYMBOL(hci_recv_stream_fragment);
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101/* ---- Interface to upper protocols ---- */
2102
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103int hci_register_cb(struct hci_cb *cb)
2104{
2105 BT_DBG("%p name %s", cb, cb->name);
2106
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002107 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002109 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
2111 return 0;
2112}
2113EXPORT_SYMBOL(hci_register_cb);
2114
2115int hci_unregister_cb(struct hci_cb *cb)
2116{
2117 BT_DBG("%p name %s", cb, cb->name);
2118
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002119 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002121 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
2123 return 0;
2124}
2125EXPORT_SYMBOL(hci_unregister_cb);
2126
2127static int hci_send_frame(struct sk_buff *skb)
2128{
2129 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2130
2131 if (!hdev) {
2132 kfree_skb(skb);
2133 return -ENODEV;
2134 }
2135
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002136 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002138 /* Time stamp */
2139 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002141 /* Send copy to monitor */
2142 hci_send_to_monitor(hdev, skb);
2143
2144 if (atomic_read(&hdev->promisc)) {
2145 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002146 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 }
2148
2149 /* Get rid of skb owner, prior to sending to the driver. */
2150 skb_orphan(skb);
2151
2152 return hdev->send(skb);
2153}
2154
2155/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002156int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 int len = HCI_COMMAND_HDR_SIZE + plen;
2159 struct hci_command_hdr *hdr;
2160 struct sk_buff *skb;
2161
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002162 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163
2164 skb = bt_skb_alloc(len, GFP_ATOMIC);
2165 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002166 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 return -ENOMEM;
2168 }
2169
2170 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002171 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 hdr->plen = plen;
2173
2174 if (plen)
2175 memcpy(skb_put(skb, plen), param, plen);
2176
2177 BT_DBG("skb len %d", skb->len);
2178
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002179 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002181
Johan Hedberga5040ef2011-01-10 13:28:59 +02002182 if (test_bit(HCI_INIT, &hdev->flags))
2183 hdev->init_last_cmd = opcode;
2184
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002186 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188 return 0;
2189}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002192void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
2194 struct hci_command_hdr *hdr;
2195
2196 if (!hdev->sent_cmd)
2197 return NULL;
2198
2199 hdr = (void *) hdev->sent_cmd->data;
2200
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002201 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 return NULL;
2203
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002204 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2207}
2208
2209/* Send ACL data */
2210static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2211{
2212 struct hci_acl_hdr *hdr;
2213 int len = skb->len;
2214
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002215 skb_push(skb, HCI_ACL_HDR_SIZE);
2216 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002217 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002218 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2219 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220}
2221
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002222static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2223 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224{
2225 struct hci_dev *hdev = conn->hdev;
2226 struct sk_buff *list;
2227
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002228 list = skb_shinfo(skb)->frag_list;
2229 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 /* Non fragmented */
2231 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2232
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002233 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 } else {
2235 /* Fragmented */
2236 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2237
2238 skb_shinfo(skb)->frag_list = NULL;
2239
2240 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002241 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002243 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002244
2245 flags &= ~ACL_START;
2246 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 do {
2248 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002249
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002251 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002252 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
2254 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2255
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002256 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 } while (list);
2258
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002259 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002261}
2262
2263void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2264{
2265 struct hci_conn *conn = chan->conn;
2266 struct hci_dev *hdev = conn->hdev;
2267
2268 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2269
2270 skb->dev = (void *) hdev;
2271 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2272 hci_add_acl_hdr(skb, conn->handle, flags);
2273
2274 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002275
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002276 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277}
2278EXPORT_SYMBOL(hci_send_acl);
2279
2280/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002281void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282{
2283 struct hci_dev *hdev = conn->hdev;
2284 struct hci_sco_hdr hdr;
2285
2286 BT_DBG("%s len %d", hdev->name, skb->len);
2287
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002288 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 hdr.dlen = skb->len;
2290
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002291 skb_push(skb, HCI_SCO_HDR_SIZE);
2292 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002293 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
2295 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002296 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002297
Linus Torvalds1da177e2005-04-16 15:20:36 -07002298 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002299 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300}
2301EXPORT_SYMBOL(hci_send_sco);
2302
2303/* ---- HCI TX task (outgoing data) ---- */
2304
2305/* HCI Connection scheduler */
2306static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2307{
2308 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002309 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002312 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002314
2315 rcu_read_lock();
2316
2317 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002318 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002320
2321 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2322 continue;
2323
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 num++;
2325
2326 if (c->sent < min) {
2327 min = c->sent;
2328 conn = c;
2329 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002330
2331 if (hci_conn_num(hdev, type) == num)
2332 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002333 }
2334
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002335 rcu_read_unlock();
2336
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002338 int cnt, q;
2339
2340 switch (conn->type) {
2341 case ACL_LINK:
2342 cnt = hdev->acl_cnt;
2343 break;
2344 case SCO_LINK:
2345 case ESCO_LINK:
2346 cnt = hdev->sco_cnt;
2347 break;
2348 case LE_LINK:
2349 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2350 break;
2351 default:
2352 cnt = 0;
2353 BT_ERR("Unknown link type");
2354 }
2355
2356 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002357 *quote = q ? q : 1;
2358 } else
2359 *quote = 0;
2360
2361 BT_DBG("conn %p quote %d", conn, *quote);
2362 return conn;
2363}
2364
Ville Tervobae1f5d92011-02-10 22:38:53 -03002365static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002366{
2367 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002368 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369
Ville Tervobae1f5d92011-02-10 22:38:53 -03002370 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002371
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002372 rcu_read_lock();
2373
Linus Torvalds1da177e2005-04-16 15:20:36 -07002374 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002375 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002376 if (c->type == type && c->sent) {
2377 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378 hdev->name, batostr(&c->dst));
2379 hci_acl_disconn(c, 0x13);
2380 }
2381 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002382
2383 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384}
2385
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002386static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2387 int *quote)
2388{
2389 struct hci_conn_hash *h = &hdev->conn_hash;
2390 struct hci_chan *chan = NULL;
2391 int num = 0, min = ~0, cur_prio = 0;
2392 struct hci_conn *conn;
2393 int cnt, q, conn_num = 0;
2394
2395 BT_DBG("%s", hdev->name);
2396
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002397 rcu_read_lock();
2398
2399 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002400 struct hci_chan *tmp;
2401
2402 if (conn->type != type)
2403 continue;
2404
2405 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2406 continue;
2407
2408 conn_num++;
2409
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002410 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002411 struct sk_buff *skb;
2412
2413 if (skb_queue_empty(&tmp->data_q))
2414 continue;
2415
2416 skb = skb_peek(&tmp->data_q);
2417 if (skb->priority < cur_prio)
2418 continue;
2419
2420 if (skb->priority > cur_prio) {
2421 num = 0;
2422 min = ~0;
2423 cur_prio = skb->priority;
2424 }
2425
2426 num++;
2427
2428 if (conn->sent < min) {
2429 min = conn->sent;
2430 chan = tmp;
2431 }
2432 }
2433
2434 if (hci_conn_num(hdev, type) == conn_num)
2435 break;
2436 }
2437
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002438 rcu_read_unlock();
2439
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002440 if (!chan)
2441 return NULL;
2442
2443 switch (chan->conn->type) {
2444 case ACL_LINK:
2445 cnt = hdev->acl_cnt;
2446 break;
2447 case SCO_LINK:
2448 case ESCO_LINK:
2449 cnt = hdev->sco_cnt;
2450 break;
2451 case LE_LINK:
2452 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2453 break;
2454 default:
2455 cnt = 0;
2456 BT_ERR("Unknown link type");
2457 }
2458
2459 q = cnt / num;
2460 *quote = q ? q : 1;
2461 BT_DBG("chan %p quote %d", chan, *quote);
2462 return chan;
2463}
2464
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002465static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2466{
2467 struct hci_conn_hash *h = &hdev->conn_hash;
2468 struct hci_conn *conn;
2469 int num = 0;
2470
2471 BT_DBG("%s", hdev->name);
2472
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002473 rcu_read_lock();
2474
2475 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002476 struct hci_chan *chan;
2477
2478 if (conn->type != type)
2479 continue;
2480
2481 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2482 continue;
2483
2484 num++;
2485
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002486 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002487 struct sk_buff *skb;
2488
2489 if (chan->sent) {
2490 chan->sent = 0;
2491 continue;
2492 }
2493
2494 if (skb_queue_empty(&chan->data_q))
2495 continue;
2496
2497 skb = skb_peek(&chan->data_q);
2498 if (skb->priority >= HCI_PRIO_MAX - 1)
2499 continue;
2500
2501 skb->priority = HCI_PRIO_MAX - 1;
2502
2503 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2504 skb->priority);
2505 }
2506
2507 if (hci_conn_num(hdev, type) == num)
2508 break;
2509 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002510
2511 rcu_read_unlock();
2512
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002513}
2514
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002515static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2516{
2517 /* Calculate count of blocks used by this packet */
2518 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2519}
2520
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002521static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002522{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002523 if (!test_bit(HCI_RAW, &hdev->flags)) {
2524 /* ACL tx timeout must be longer than maximum
2525 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002526 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002527 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002528 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002529 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002530}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002531
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002532static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2533{
2534 unsigned int cnt = hdev->acl_cnt;
2535 struct hci_chan *chan;
2536 struct sk_buff *skb;
2537 int quote;
2538
2539 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002540
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002541 while (hdev->acl_cnt &&
2542 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002543 u32 priority = (skb_peek(&chan->data_q))->priority;
2544 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002545 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2546 skb->len, skb->priority);
2547
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002548 /* Stop if priority has changed */
2549 if (skb->priority < priority)
2550 break;
2551
2552 skb = skb_dequeue(&chan->data_q);
2553
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002554 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002555 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557 hci_send_frame(skb);
2558 hdev->acl_last_tx = jiffies;
2559
2560 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002561 chan->sent++;
2562 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563 }
2564 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002565
2566 if (cnt != hdev->acl_cnt)
2567 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002568}
2569
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002570static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2571{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002572 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002573 struct hci_chan *chan;
2574 struct sk_buff *skb;
2575 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002576
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002577 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002578
2579 while (hdev->block_cnt > 0 &&
2580 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2581 u32 priority = (skb_peek(&chan->data_q))->priority;
2582 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2583 int blocks;
2584
2585 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2586 skb->len, skb->priority);
2587
2588 /* Stop if priority has changed */
2589 if (skb->priority < priority)
2590 break;
2591
2592 skb = skb_dequeue(&chan->data_q);
2593
2594 blocks = __get_blocks(hdev, skb);
2595 if (blocks > hdev->block_cnt)
2596 return;
2597
2598 hci_conn_enter_active_mode(chan->conn,
2599 bt_cb(skb)->force_active);
2600
2601 hci_send_frame(skb);
2602 hdev->acl_last_tx = jiffies;
2603
2604 hdev->block_cnt -= blocks;
2605 quote -= blocks;
2606
2607 chan->sent += blocks;
2608 chan->conn->sent += blocks;
2609 }
2610 }
2611
2612 if (cnt != hdev->block_cnt)
2613 hci_prio_recalculate(hdev, ACL_LINK);
2614}
2615
2616static inline void hci_sched_acl(struct hci_dev *hdev)
2617{
2618 BT_DBG("%s", hdev->name);
2619
2620 if (!hci_conn_num(hdev, ACL_LINK))
2621 return;
2622
2623 switch (hdev->flow_ctl_mode) {
2624 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2625 hci_sched_acl_pkt(hdev);
2626 break;
2627
2628 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2629 hci_sched_acl_blk(hdev);
2630 break;
2631 }
2632}
2633
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634/* Schedule SCO */
2635static inline void hci_sched_sco(struct hci_dev *hdev)
2636{
2637 struct hci_conn *conn;
2638 struct sk_buff *skb;
2639 int quote;
2640
2641 BT_DBG("%s", hdev->name);
2642
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002643 if (!hci_conn_num(hdev, SCO_LINK))
2644 return;
2645
Linus Torvalds1da177e2005-04-16 15:20:36 -07002646 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2647 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2648 BT_DBG("skb %p len %d", skb, skb->len);
2649 hci_send_frame(skb);
2650
2651 conn->sent++;
2652 if (conn->sent == ~0)
2653 conn->sent = 0;
2654 }
2655 }
2656}
2657
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002658static inline void hci_sched_esco(struct hci_dev *hdev)
2659{
2660 struct hci_conn *conn;
2661 struct sk_buff *skb;
2662 int quote;
2663
2664 BT_DBG("%s", hdev->name);
2665
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002666 if (!hci_conn_num(hdev, ESCO_LINK))
2667 return;
2668
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002669 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671 BT_DBG("skb %p len %d", skb, skb->len);
2672 hci_send_frame(skb);
2673
2674 conn->sent++;
2675 if (conn->sent == ~0)
2676 conn->sent = 0;
2677 }
2678 }
2679}
2680
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002681static inline void hci_sched_le(struct hci_dev *hdev)
2682{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002683 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002684 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002685 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002686
2687 BT_DBG("%s", hdev->name);
2688
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002689 if (!hci_conn_num(hdev, LE_LINK))
2690 return;
2691
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002692 if (!test_bit(HCI_RAW, &hdev->flags)) {
2693 /* LE tx timeout must be longer than maximum
2694 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002695 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002696 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002697 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002698 }
2699
2700 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002701 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002702 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002703 u32 priority = (skb_peek(&chan->data_q))->priority;
2704 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002705 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2706 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002707
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002708 /* Stop if priority has changed */
2709 if (skb->priority < priority)
2710 break;
2711
2712 skb = skb_dequeue(&chan->data_q);
2713
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002714 hci_send_frame(skb);
2715 hdev->le_last_tx = jiffies;
2716
2717 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002718 chan->sent++;
2719 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002720 }
2721 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002722
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002723 if (hdev->le_pkts)
2724 hdev->le_cnt = cnt;
2725 else
2726 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002727
2728 if (cnt != tmp)
2729 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002730}
2731
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002732static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002734 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 struct sk_buff *skb;
2736
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002737 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2738 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739
2740 /* Schedule queues and send stuff to HCI driver */
2741
2742 hci_sched_acl(hdev);
2743
2744 hci_sched_sco(hdev);
2745
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002746 hci_sched_esco(hdev);
2747
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002748 hci_sched_le(hdev);
2749
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750 /* Send next queued raw (unknown type) packet */
2751 while ((skb = skb_dequeue(&hdev->raw_q)))
2752 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753}
2754
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002755/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756
2757/* ACL data packet */
2758static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2759{
2760 struct hci_acl_hdr *hdr = (void *) skb->data;
2761 struct hci_conn *conn;
2762 __u16 handle, flags;
2763
2764 skb_pull(skb, HCI_ACL_HDR_SIZE);
2765
2766 handle = __le16_to_cpu(hdr->handle);
2767 flags = hci_flags(handle);
2768 handle = hci_handle(handle);
2769
2770 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2771
2772 hdev->stat.acl_rx++;
2773
2774 hci_dev_lock(hdev);
2775 conn = hci_conn_hash_lookup_handle(hdev, handle);
2776 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002777
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002779 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002780
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002782 l2cap_recv_acldata(conn, skb, flags);
2783 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002785 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 hdev->name, handle);
2787 }
2788
2789 kfree_skb(skb);
2790}
2791
2792/* SCO data packet */
2793static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2794{
2795 struct hci_sco_hdr *hdr = (void *) skb->data;
2796 struct hci_conn *conn;
2797 __u16 handle;
2798
2799 skb_pull(skb, HCI_SCO_HDR_SIZE);
2800
2801 handle = __le16_to_cpu(hdr->handle);
2802
2803 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2804
2805 hdev->stat.sco_rx++;
2806
2807 hci_dev_lock(hdev);
2808 conn = hci_conn_hash_lookup_handle(hdev, handle);
2809 hci_dev_unlock(hdev);
2810
2811 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002813 sco_recv_scodata(conn, skb);
2814 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002816 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 hdev->name, handle);
2818 }
2819
2820 kfree_skb(skb);
2821}
2822
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002823static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002825 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 struct sk_buff *skb;
2827
2828 BT_DBG("%s", hdev->name);
2829
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002831 /* Send copy to monitor */
2832 hci_send_to_monitor(hdev, skb);
2833
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 if (atomic_read(&hdev->promisc)) {
2835 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002836 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 }
2838
2839 if (test_bit(HCI_RAW, &hdev->flags)) {
2840 kfree_skb(skb);
2841 continue;
2842 }
2843
2844 if (test_bit(HCI_INIT, &hdev->flags)) {
2845 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002846 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 case HCI_ACLDATA_PKT:
2848 case HCI_SCODATA_PKT:
2849 kfree_skb(skb);
2850 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002851 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 }
2853
2854 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002855 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002857 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 hci_event_packet(hdev, skb);
2859 break;
2860
2861 case HCI_ACLDATA_PKT:
2862 BT_DBG("%s ACL data packet", hdev->name);
2863 hci_acldata_packet(hdev, skb);
2864 break;
2865
2866 case HCI_SCODATA_PKT:
2867 BT_DBG("%s SCO data packet", hdev->name);
2868 hci_scodata_packet(hdev, skb);
2869 break;
2870
2871 default:
2872 kfree_skb(skb);
2873 break;
2874 }
2875 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876}
2877
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002878static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002880 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002881 struct sk_buff *skb;
2882
2883 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2884
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002886 if (atomic_read(&hdev->cmd_cnt)) {
2887 skb = skb_dequeue(&hdev->cmd_q);
2888 if (!skb)
2889 return;
2890
Wei Yongjun7585b972009-02-25 18:29:52 +08002891 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002892
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002893 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2894 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002895 atomic_dec(&hdev->cmd_cnt);
2896 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002897 if (test_bit(HCI_RESET, &hdev->flags))
2898 del_timer(&hdev->cmd_timer);
2899 else
2900 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002901 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902 } else {
2903 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002904 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905 }
2906 }
2907}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002908
2909int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2910{
2911 /* General inquiry access code (GIAC) */
2912 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2913 struct hci_cp_inquiry cp;
2914
2915 BT_DBG("%s", hdev->name);
2916
2917 if (test_bit(HCI_INQUIRY, &hdev->flags))
2918 return -EINPROGRESS;
2919
Johan Hedberg46632622012-01-02 16:06:08 +02002920 inquiry_cache_flush(hdev);
2921
Andre Guedes2519a1f2011-11-07 11:45:24 -03002922 memset(&cp, 0, sizeof(cp));
2923 memcpy(&cp.lap, lap, sizeof(cp.lap));
2924 cp.length = length;
2925
2926 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2927}
Andre Guedes023d50492011-11-04 14:16:52 -03002928
2929int hci_cancel_inquiry(struct hci_dev *hdev)
2930{
2931 BT_DBG("%s", hdev->name);
2932
2933 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2934 return -EPERM;
2935
2936 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2937}