blob: 3b3d9a882dbd0d9c9d1e71ed39f860ec467a8892 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300255
256 /* Read Local AMP Info */
257 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200258}
259
260static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
261{
262 struct sk_buff *skb;
263
264 BT_DBG("%s %ld", hdev->name, opt);
265
266 /* Driver initialization */
267
268 /* Special commands */
269 while ((skb = skb_dequeue(&hdev->driver_init))) {
270 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
271 skb->dev = (void *) hdev;
272
273 skb_queue_tail(&hdev->cmd_q, skb);
274 queue_work(hdev->workqueue, &hdev->cmd_work);
275 }
276 skb_queue_purge(&hdev->driver_init);
277
278 switch (hdev->dev_type) {
279 case HCI_BREDR:
280 bredr_init(hdev);
281 break;
282
283 case HCI_AMP:
284 amp_init(hdev);
285 break;
286
287 default:
288 BT_ERR("Unknown device type %d", hdev->dev_type);
289 break;
290 }
291
292}
293
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300294static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
295{
296 BT_DBG("%s", hdev->name);
297
298 /* Read LE buffer size */
299 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
300}
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
303{
304 __u8 scan = opt;
305
306 BT_DBG("%s %x", hdev->name, scan);
307
308 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200309 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
312static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
313{
314 __u8 auth = opt;
315
316 BT_DBG("%s %x", hdev->name, auth);
317
318 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200319 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
323{
324 __u8 encrypt = opt;
325
326 BT_DBG("%s %x", hdev->name, encrypt);
327
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200328 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200329 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200332static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
333{
334 __le16 policy = cpu_to_le16(opt);
335
Marcel Holtmanna418b892008-11-30 12:17:28 +0100336 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200337
338 /* Default link policy */
339 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
340}
341
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900342/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Device is held on return. */
344struct hci_dev *hci_dev_get(int index)
345{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200346 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 BT_DBG("%d", index);
349
350 if (index < 0)
351 return NULL;
352
353 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200354 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (d->id == index) {
356 hdev = hci_dev_hold(d);
357 break;
358 }
359 }
360 read_unlock(&hci_dev_list_lock);
361 return hdev;
362}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200365
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200366bool hci_discovery_active(struct hci_dev *hdev)
367{
368 struct discovery_state *discov = &hdev->discovery;
369
Andre Guedes6fbe1952012-02-03 17:47:58 -0300370 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300371 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200373 return true;
374
Andre Guedes6fbe1952012-02-03 17:47:58 -0300375 default:
376 return false;
377 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200378}
379
Johan Hedbergff9ef572012-01-04 14:23:45 +0200380void hci_discovery_set_state(struct hci_dev *hdev, int state)
381{
382 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
383
384 if (hdev->discovery.state == state)
385 return;
386
387 switch (state) {
388 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300389 if (hdev->discovery.state != DISCOVERY_STARTING)
390 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200391 break;
392 case DISCOVERY_STARTING:
393 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300394 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200395 mgmt_discovering(hdev, 1);
396 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200397 case DISCOVERY_RESOLVING:
398 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200399 case DISCOVERY_STOPPING:
400 break;
401 }
402
403 hdev->discovery.state = state;
404}
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406static void inquiry_cache_flush(struct hci_dev *hdev)
407{
Johan Hedberg30883512012-01-04 14:16:21 +0200408 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200409 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Johan Hedberg561aafb2012-01-04 13:31:59 +0200411 list_for_each_entry_safe(p, n, &cache->all, all) {
412 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200413 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415
416 INIT_LIST_HEAD(&cache->unknown);
417 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
421{
Johan Hedberg30883512012-01-04 14:16:21 +0200422 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200429 return e;
430 }
431
432 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300436 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437{
Johan Hedberg30883512012-01-04 14:16:21 +0200438 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
442
443 list_for_each_entry(e, &cache->unknown, list) {
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200451struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300452 bdaddr_t *bdaddr,
453 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200454{
455 struct discovery_state *cache = &hdev->discovery;
456 struct inquiry_entry *e;
457
458 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459
460 list_for_each_entry(e, &cache->resolve, list) {
461 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 return e;
463 if (!bacmp(&e->data.bdaddr, bdaddr))
464 return e;
465 }
466
467 return NULL;
468}
469
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300471 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200472{
473 struct discovery_state *cache = &hdev->discovery;
474 struct list_head *pos = &cache->resolve;
475 struct inquiry_entry *p;
476
477 list_del(&ie->list);
478
479 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break;
483 pos = &p->list;
484 }
485
486 list_add(&ie->list, pos);
487}
488
Johan Hedberg31754052012-01-04 13:39:52 +0200489bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300490 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
Johan Hedberg30883512012-01-04 14:16:21 +0200492 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200493 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
496
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200497 if (ssp)
498 *ssp = data->ssp_mode;
499
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200500 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200501 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200502 if (ie->data.ssp_mode && ssp)
503 *ssp = true;
504
Johan Hedberga3d4e202012-01-09 00:53:02 +0200505 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie);
509 }
510
Johan Hedberg561aafb2012-01-04 13:31:59 +0200511 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200512 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200513
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 /* Entry not in the cache. Add new one. */
515 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200517 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200518
519 list_add(&ie->all, &cache->all);
520
521 if (name_known) {
522 ie->name_state = NAME_KNOWN;
523 } else {
524 ie->name_state = NAME_NOT_KNOWN;
525 list_add(&ie->list, &cache->unknown);
526 }
527
528update:
529 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200535 memcpy(&ie->data, data, sizeof(*data));
536 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200538
539 if (ie->name_state == NAME_NOT_KNOWN)
540 return false;
541
542 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
545static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
546{
Johan Hedberg30883512012-01-04 14:16:21 +0200547 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 struct inquiry_info *info = (struct inquiry_info *) buf;
549 struct inquiry_entry *e;
550 int copied = 0;
551
Johan Hedberg561aafb2012-01-04 13:31:59 +0200552 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554
555 if (copied >= num)
556 break;
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 bacpy(&info->bdaddr, &data->bdaddr);
559 info->pscan_rep_mode = data->pscan_rep_mode;
560 info->pscan_period_mode = data->pscan_period_mode;
561 info->pscan_mode = data->pscan_mode;
562 memcpy(info->dev_class, data->dev_class, 3);
563 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200566 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
568
569 BT_DBG("cache %p, copied %d", cache, copied);
570 return copied;
571}
572
573static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
574{
575 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
576 struct hci_cp_inquiry cp;
577
578 BT_DBG("%s", hdev->name);
579
580 if (test_bit(HCI_INQUIRY, &hdev->flags))
581 return;
582
583 /* Start Inquiry */
584 memcpy(&cp.lap, &ir->lap, 3);
585 cp.length = ir->length;
586 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200587 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
590int hci_inquiry(void __user *arg)
591{
592 __u8 __user *ptr = arg;
593 struct hci_inquiry_req ir;
594 struct hci_dev *hdev;
595 int err = 0, do_inquiry = 0, max_rsp;
596 long timeo;
597 __u8 *buf;
598
599 if (copy_from_user(&ir, ptr, sizeof(ir)))
600 return -EFAULT;
601
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200602 hdev = hci_dev_get(ir.dev_id);
603 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return -ENODEV;
605
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300606 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200608 inquiry_cache_empty(hdev) ||
609 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 inquiry_cache_flush(hdev);
611 do_inquiry = 1;
612 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300613 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Marcel Holtmann04837f62006-07-03 10:02:33 +0200615 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200616
617 if (do_inquiry) {
618 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
619 if (err < 0)
620 goto done;
621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 /* for unlimited number of responses we will use buffer with 255 entries */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
627 * copy it to the user space.
628 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100629 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200630 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -ENOMEM;
632 goto done;
633 }
634
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300637 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 BT_DBG("num_rsp %d", ir.num_rsp);
640
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp))
645 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900646 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 err = -EFAULT;
648
649 kfree(buf);
650
651done:
652 hci_dev_put(hdev);
653 return err;
654}
655
656/* ---- HCI ioctl helpers ---- */
657
658int hci_dev_open(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200663 hdev = hci_dev_get(dev);
664 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return -ENODEV;
666
667 BT_DBG("%s %p", hdev->name, hdev);
668
669 hci_req_lock(hdev);
670
Johan Hovold94324962012-03-15 14:48:41 +0100671 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
672 ret = -ENODEV;
673 goto done;
674 }
675
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200676 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
677 ret = -ERFKILL;
678 goto done;
679 }
680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (test_bit(HCI_UP, &hdev->flags)) {
682 ret = -EALREADY;
683 goto done;
684 }
685
686 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
687 set_bit(HCI_RAW, &hdev->flags);
688
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200689 /* Treat all non BR/EDR controllers as raw devices if
690 enable_hs is not set */
691 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100692 set_bit(HCI_RAW, &hdev->flags);
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (hdev->open(hdev)) {
695 ret = -EIO;
696 goto done;
697 }
698
699 if (!test_bit(HCI_RAW, &hdev->flags)) {
700 atomic_set(&hdev->cmd_cnt, 1);
701 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200702 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Marcel Holtmann04837f62006-07-03 10:02:33 +0200704 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Andre Guedeseead27d2011-06-30 19:20:55 -0300707 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300708 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 clear_bit(HCI_INIT, &hdev->flags);
712 }
713
714 if (!ret) {
715 hci_dev_hold(hdev);
716 set_bit(HCI_UP, &hdev->flags);
717 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200718 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200720 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300721 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200722 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900723 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200725 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200726 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400727 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 skb_queue_purge(&hdev->cmd_q);
730 skb_queue_purge(&hdev->rx_q);
731
732 if (hdev->flush)
733 hdev->flush(hdev);
734
735 if (hdev->sent_cmd) {
736 kfree_skb(hdev->sent_cmd);
737 hdev->sent_cmd = NULL;
738 }
739
740 hdev->close(hdev);
741 hdev->flags = 0;
742 }
743
744done:
745 hci_req_unlock(hdev);
746 hci_dev_put(hdev);
747 return ret;
748}
749
750static int hci_dev_do_close(struct hci_dev *hdev)
751{
752 BT_DBG("%s %p", hdev->name, hdev);
753
Andre Guedes28b75a82012-02-03 17:48:00 -0300754 cancel_work_sync(&hdev->le_scan);
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 hci_req_cancel(hdev, ENODEV);
757 hci_req_lock(hdev);
758
759 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300760 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 hci_req_unlock(hdev);
762 return 0;
763 }
764
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200765 /* Flush RX and TX works */
766 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400767 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200769 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200770 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200772 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200773 }
774
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200775 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200776 cancel_delayed_work(&hdev->service_cache);
777
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300778 cancel_delayed_work_sync(&hdev->le_scan_disable);
779
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300780 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 inquiry_cache_flush(hdev);
782 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300783 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 hci_notify(hdev, HCI_DEV_DOWN);
786
787 if (hdev->flush)
788 hdev->flush(hdev);
789
790 /* Reset device */
791 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200793 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200796 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200797 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 clear_bit(HCI_INIT, &hdev->flags);
799 }
800
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200801 /* flush cmd work */
802 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 /* Drop queues */
805 skb_queue_purge(&hdev->rx_q);
806 skb_queue_purge(&hdev->cmd_q);
807 skb_queue_purge(&hdev->raw_q);
808
809 /* Drop last sent command */
810 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300811 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 kfree_skb(hdev->sent_cmd);
813 hdev->sent_cmd = NULL;
814 }
815
816 /* After this point our queues are empty
817 * and no tasks are scheduled. */
818 hdev->close(hdev);
819
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100820 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 hci_dev_lock(hdev);
822 mgmt_powered(hdev, 0);
823 hci_dev_unlock(hdev);
824 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 /* Clear flags */
827 hdev->flags = 0;
828
Johan Hedberge59fda82012-02-22 18:11:53 +0200829 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200830 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 hci_req_unlock(hdev);
833
834 hci_dev_put(hdev);
835 return 0;
836}
837
838int hci_dev_close(__u16 dev)
839{
840 struct hci_dev *hdev;
841 int err;
842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200843 hdev = hci_dev_get(dev);
844 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100846
847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
848 cancel_delayed_work(&hdev->power_off);
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 hci_dev_put(hdev);
853 return err;
854}
855
856int hci_dev_reset(__u16 dev)
857{
858 struct hci_dev *hdev;
859 int ret = 0;
860
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200861 hdev = hci_dev_get(dev);
862 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return -ENODEV;
864
865 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 if (!test_bit(HCI_UP, &hdev->flags))
868 goto done;
869
870 /* Drop queues */
871 skb_queue_purge(&hdev->rx_q);
872 skb_queue_purge(&hdev->cmd_q);
873
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300874 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 inquiry_cache_flush(hdev);
876 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300877 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 if (hdev->flush)
880 hdev->flush(hdev);
881
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900882 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300883 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200886 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 hci_req_unlock(hdev);
891 hci_dev_put(hdev);
892 return ret;
893}
894
895int hci_dev_reset_stat(__u16 dev)
896{
897 struct hci_dev *hdev;
898 int ret = 0;
899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900 hdev = hci_dev_get(dev);
901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -ENODEV;
903
904 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
905
906 hci_dev_put(hdev);
907
908 return ret;
909}
910
911int hci_dev_cmd(unsigned int cmd, void __user *arg)
912{
913 struct hci_dev *hdev;
914 struct hci_dev_req dr;
915 int err = 0;
916
917 if (copy_from_user(&dr, arg, sizeof(dr)))
918 return -EFAULT;
919
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200920 hdev = hci_dev_get(dr.dev_id);
921 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -ENODEV;
923
924 switch (cmd) {
925 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200926 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 break;
929
930 case HCISETENCRYPT:
931 if (!lmp_encrypt_capable(hdev)) {
932 err = -EOPNOTSUPP;
933 break;
934 }
935
936 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200938 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (err)
941 break;
942 }
943
Marcel Holtmann04837f62006-07-03 10:02:33 +0200944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 break;
947
948 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200949 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 break;
952
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200953 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break;
957
958 case HCISETLINKMODE:
959 hdev->link_mode = ((__u16) dr.dev_opt) &
960 (HCI_LM_MASTER | HCI_LM_ACCEPT);
961 break;
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 case HCISETPTYPE:
964 hdev->pkt_type = (__u16) dr.dev_opt;
965 break;
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200968 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
969 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 break;
971
972 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200973 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
974 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 break;
976
977 default:
978 err = -EINVAL;
979 break;
980 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 hci_dev_put(hdev);
983 return err;
984}
985
986int hci_get_dev_list(void __user *arg)
987{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200988 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 struct hci_dev_list_req *dl;
990 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 int n = 0, size, err;
992 __u16 dev_num;
993
994 if (get_user(dev_num, (__u16 __user *) arg))
995 return -EFAULT;
996
997 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
998 return -EINVAL;
999
1000 size = sizeof(*dl) + dev_num * sizeof(*dr);
1001
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001002 dl = kzalloc(size, GFP_KERNEL);
1003 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return -ENOMEM;
1005
1006 dr = dl->dev_req;
1007
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001008 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001009 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001010 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001011 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001012
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001013 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1014 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 (dr + n)->dev_id = hdev->id;
1017 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 if (++n >= dev_num)
1020 break;
1021 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001022 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 dl->dev_num = n;
1025 size = sizeof(*dl) + n * sizeof(*dr);
1026
1027 err = copy_to_user(arg, dl, size);
1028 kfree(dl);
1029
1030 return err ? -EFAULT : 0;
1031}
1032
1033int hci_get_dev_info(void __user *arg)
1034{
1035 struct hci_dev *hdev;
1036 struct hci_dev_info di;
1037 int err = 0;
1038
1039 if (copy_from_user(&di, arg, sizeof(di)))
1040 return -EFAULT;
1041
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001042 hdev = hci_dev_get(di.dev_id);
1043 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -ENODEV;
1045
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001047 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001048
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1050 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 strcpy(di.name, hdev->name);
1053 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001054 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 di.flags = hdev->flags;
1056 di.pkt_type = hdev->pkt_type;
1057 di.acl_mtu = hdev->acl_mtu;
1058 di.acl_pkts = hdev->acl_pkts;
1059 di.sco_mtu = hdev->sco_mtu;
1060 di.sco_pkts = hdev->sco_pkts;
1061 di.link_policy = hdev->link_policy;
1062 di.link_mode = hdev->link_mode;
1063
1064 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1065 memcpy(&di.features, &hdev->features, sizeof(di.features));
1066
1067 if (copy_to_user(arg, &di, sizeof(di)))
1068 err = -EFAULT;
1069
1070 hci_dev_put(hdev);
1071
1072 return err;
1073}
1074
1075/* ---- Interface to HCI drivers ---- */
1076
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001077static int hci_rfkill_set_block(void *data, bool blocked)
1078{
1079 struct hci_dev *hdev = data;
1080
1081 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1082
1083 if (!blocked)
1084 return 0;
1085
1086 hci_dev_do_close(hdev);
1087
1088 return 0;
1089}
1090
1091static const struct rfkill_ops hci_rfkill_ops = {
1092 .set_block = hci_rfkill_set_block,
1093};
1094
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095/* Alloc HCI device */
1096struct hci_dev *hci_alloc_dev(void)
1097{
1098 struct hci_dev *hdev;
1099
Marcel Holtmann25ea6db2006-07-06 15:40:09 +02001100 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 if (!hdev)
1102 return NULL;
1103
David Herrmann0ac7e702011-10-08 14:58:47 +02001104 hci_init_sysfs(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 skb_queue_head_init(&hdev->driver_init);
1106
1107 return hdev;
1108}
1109EXPORT_SYMBOL(hci_alloc_dev);
1110
1111/* Free HCI device */
1112void hci_free_dev(struct hci_dev *hdev)
1113{
1114 skb_queue_purge(&hdev->driver_init);
1115
Marcel Holtmanna91f2e32006-07-03 10:02:41 +02001116 /* will free via device release */
1117 put_device(&hdev->dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118}
1119EXPORT_SYMBOL(hci_free_dev);
1120
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001121static void hci_power_on(struct work_struct *work)
1122{
1123 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1124
1125 BT_DBG("%s", hdev->name);
1126
1127 if (hci_dev_open(hdev->id) < 0)
1128 return;
1129
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001130 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001131 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001132 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001133
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001134 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001135 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001136}
1137
1138static void hci_power_off(struct work_struct *work)
1139{
Johan Hedberg32435532011-11-07 22:16:04 +02001140 struct hci_dev *hdev = container_of(work, struct hci_dev,
1141 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001142
1143 BT_DBG("%s", hdev->name);
1144
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001145 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001146}
1147
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001148static void hci_discov_off(struct work_struct *work)
1149{
1150 struct hci_dev *hdev;
1151 u8 scan = SCAN_PAGE;
1152
1153 hdev = container_of(work, struct hci_dev, discov_off.work);
1154
1155 BT_DBG("%s", hdev->name);
1156
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001157 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001158
1159 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1160
1161 hdev->discov_timeout = 0;
1162
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001163 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001164}
1165
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001166int hci_uuids_clear(struct hci_dev *hdev)
1167{
1168 struct list_head *p, *n;
1169
1170 list_for_each_safe(p, n, &hdev->uuids) {
1171 struct bt_uuid *uuid;
1172
1173 uuid = list_entry(p, struct bt_uuid, list);
1174
1175 list_del(p);
1176 kfree(uuid);
1177 }
1178
1179 return 0;
1180}
1181
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001182int hci_link_keys_clear(struct hci_dev *hdev)
1183{
1184 struct list_head *p, *n;
1185
1186 list_for_each_safe(p, n, &hdev->link_keys) {
1187 struct link_key *key;
1188
1189 key = list_entry(p, struct link_key, list);
1190
1191 list_del(p);
1192 kfree(key);
1193 }
1194
1195 return 0;
1196}
1197
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001198int hci_smp_ltks_clear(struct hci_dev *hdev)
1199{
1200 struct smp_ltk *k, *tmp;
1201
1202 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1203 list_del(&k->list);
1204 kfree(k);
1205 }
1206
1207 return 0;
1208}
1209
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001210struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1211{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001212 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001213
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001214 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001215 if (bacmp(bdaddr, &k->bdaddr) == 0)
1216 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001217
1218 return NULL;
1219}
1220
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301221static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001222 u8 key_type, u8 old_key_type)
1223{
1224 /* Legacy key */
1225 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301226 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001227
1228 /* Debug keys are insecure so don't store them persistently */
1229 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301230 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001231
1232 /* Changed combination key and there's no previous one */
1233 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301234 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001235
1236 /* Security mode 3 case */
1237 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001239
1240 /* Neither local nor remote side had no-bonding as requirement */
1241 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301242 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001243
1244 /* Local side had dedicated bonding as requirement */
1245 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301246 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001247
1248 /* Remote side had dedicated bonding as requirement */
1249 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301250 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001251
1252 /* If none of the above criteria match, then don't store the key
1253 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301254 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001255}
1256
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001257struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001258{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001261 list_for_each_entry(k, &hdev->long_term_keys, list) {
1262 if (k->ediv != ediv ||
1263 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264 continue;
1265
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001266 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001267 }
1268
1269 return NULL;
1270}
1271EXPORT_SYMBOL(hci_find_ltk);
1272
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001273struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001274 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001277
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001278 list_for_each_entry(k, &hdev->long_term_keys, list)
1279 if (addr_type == k->bdaddr_type &&
1280 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001281 return k;
1282
1283 return NULL;
1284}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001285EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001286
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001287int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001288 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001289{
1290 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301291 u8 old_key_type;
1292 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293
1294 old_key = hci_find_link_key(hdev, bdaddr);
1295 if (old_key) {
1296 old_key_type = old_key->type;
1297 key = old_key;
1298 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001299 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001300 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1301 if (!key)
1302 return -ENOMEM;
1303 list_add(&key->list, &hdev->link_keys);
1304 }
1305
1306 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1307
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001308 /* Some buggy controller combinations generate a changed
1309 * combination key for legacy pairing even when there's no
1310 * previous key */
1311 if (type == HCI_LK_CHANGED_COMBINATION &&
1312 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001313 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001314 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001315 if (conn)
1316 conn->key_type = type;
1317 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001318
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001319 bacpy(&key->bdaddr, bdaddr);
1320 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001321 key->pin_len = pin_len;
1322
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001323 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001324 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001325 else
1326 key->type = type;
1327
Johan Hedberg4df378a2011-04-28 11:29:03 -07001328 if (!new_key)
1329 return 0;
1330
1331 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1332
Johan Hedberg744cf192011-11-08 20:40:14 +02001333 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001334
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301335 if (conn)
1336 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001337
1338 return 0;
1339}
1340
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001341int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001342 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001343 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001344{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001345 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001347 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1348 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001350 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1351 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001352 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001353 else {
1354 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355 if (!key)
1356 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001357 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001358 }
1359
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001360 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001361 key->bdaddr_type = addr_type;
1362 memcpy(key->val, tk, sizeof(key->val));
1363 key->authenticated = authenticated;
1364 key->ediv = ediv;
1365 key->enc_size = enc_size;
1366 key->type = type;
1367 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001368
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001369 if (!new_key)
1370 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001371
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001372 if (type & HCI_SMP_LTK)
1373 mgmt_new_ltk(hdev, key, 1);
1374
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001375 return 0;
1376}
1377
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001378int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379{
1380 struct link_key *key;
1381
1382 key = hci_find_link_key(hdev, bdaddr);
1383 if (!key)
1384 return -ENOENT;
1385
1386 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1387
1388 list_del(&key->list);
1389 kfree(key);
1390
1391 return 0;
1392}
1393
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001394int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395{
1396 struct smp_ltk *k, *tmp;
1397
1398 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1399 if (bacmp(bdaddr, &k->bdaddr))
1400 continue;
1401
1402 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404 list_del(&k->list);
1405 kfree(k);
1406 }
1407
1408 return 0;
1409}
1410
Ville Tervo6bd32322011-02-16 16:32:41 +02001411/* HCI command timer function */
1412static void hci_cmd_timer(unsigned long arg)
1413{
1414 struct hci_dev *hdev = (void *) arg;
1415
1416 BT_ERR("%s command tx timeout", hdev->name);
1417 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001418 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001419}
1420
Szymon Janc2763eda2011-03-22 13:12:22 +01001421struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001422 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001423{
1424 struct oob_data *data;
1425
1426 list_for_each_entry(data, &hdev->remote_oob_data, list)
1427 if (bacmp(bdaddr, &data->bdaddr) == 0)
1428 return data;
1429
1430 return NULL;
1431}
1432
1433int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434{
1435 struct oob_data *data;
1436
1437 data = hci_find_remote_oob_data(hdev, bdaddr);
1438 if (!data)
1439 return -ENOENT;
1440
1441 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1442
1443 list_del(&data->list);
1444 kfree(data);
1445
1446 return 0;
1447}
1448
1449int hci_remote_oob_data_clear(struct hci_dev *hdev)
1450{
1451 struct oob_data *data, *n;
1452
1453 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1454 list_del(&data->list);
1455 kfree(data);
1456 }
1457
1458 return 0;
1459}
1460
1461int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001462 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001463{
1464 struct oob_data *data;
1465
1466 data = hci_find_remote_oob_data(hdev, bdaddr);
1467
1468 if (!data) {
1469 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1470 if (!data)
1471 return -ENOMEM;
1472
1473 bacpy(&data->bdaddr, bdaddr);
1474 list_add(&data->list, &hdev->remote_oob_data);
1475 }
1476
1477 memcpy(data->hash, hash, sizeof(data->hash));
1478 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1479
1480 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1481
1482 return 0;
1483}
1484
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001485struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001487 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001489 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490 if (bacmp(bdaddr, &b->bdaddr) == 0)
1491 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
1493 return NULL;
1494}
1495
1496int hci_blacklist_clear(struct hci_dev *hdev)
1497{
1498 struct list_head *p, *n;
1499
1500 list_for_each_safe(p, n, &hdev->blacklist) {
1501 struct bdaddr_list *b;
1502
1503 b = list_entry(p, struct bdaddr_list, list);
1504
1505 list_del(p);
1506 kfree(b);
1507 }
1508
1509 return 0;
1510}
1511
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001512int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513{
1514 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001515
1516 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1517 return -EBADF;
1518
Antti Julku5e762442011-08-25 16:48:02 +03001519 if (hci_blacklist_lookup(hdev, bdaddr))
1520 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001521
1522 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001523 if (!entry)
1524 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001525
1526 bacpy(&entry->bdaddr, bdaddr);
1527
1528 list_add(&entry->list, &hdev->blacklist);
1529
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001530 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001531}
1532
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001533int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001534{
1535 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001536
Szymon Janc1ec918c2011-11-16 09:32:21 +01001537 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001538 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001539
1540 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001541 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001542 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001543
1544 list_del(&entry->list);
1545 kfree(entry);
1546
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001547 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001548}
1549
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001550static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001551{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001552 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001553 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001554
1555 hci_dev_lock(hdev);
1556
1557 hci_adv_entries_clear(hdev);
1558
1559 hci_dev_unlock(hdev);
1560}
1561
Andre Guedes76c86862011-05-26 16:23:50 -03001562int hci_adv_entries_clear(struct hci_dev *hdev)
1563{
1564 struct adv_entry *entry, *tmp;
1565
1566 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1567 list_del(&entry->list);
1568 kfree(entry);
1569 }
1570
1571 BT_DBG("%s adv cache cleared", hdev->name);
1572
1573 return 0;
1574}
1575
1576struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1577{
1578 struct adv_entry *entry;
1579
1580 list_for_each_entry(entry, &hdev->adv_entries, list)
1581 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1582 return entry;
1583
1584 return NULL;
1585}
1586
1587static inline int is_connectable_adv(u8 evt_type)
1588{
1589 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1590 return 1;
1591
1592 return 0;
1593}
1594
1595int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001596 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001597 return -EINVAL;
1598
1599 /* Only new entries should be added to adv_entries. So, if
1600 * bdaddr was found, don't add it. */
1601 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602 return 0;
1603
Andre Guedes4777bfd2012-01-30 23:31:28 -03001604 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001605 if (!entry)
1606 return -ENOMEM;
1607
1608 bacpy(&entry->bdaddr, &ev->bdaddr);
1609 entry->bdaddr_type = ev->bdaddr_type;
1610
1611 list_add(&entry->list, &hdev->adv_entries);
1612
1613 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616 return 0;
1617}
1618
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001619static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620{
1621 struct le_scan_params *param = (struct le_scan_params *) opt;
1622 struct hci_cp_le_set_scan_param cp;
1623
1624 memset(&cp, 0, sizeof(cp));
1625 cp.type = param->type;
1626 cp.interval = cpu_to_le16(param->interval);
1627 cp.window = cpu_to_le16(param->window);
1628
1629 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630}
1631
1632static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633{
1634 struct hci_cp_le_set_scan_enable cp;
1635
1636 memset(&cp, 0, sizeof(cp));
1637 cp.enable = 1;
1638
1639 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640}
1641
1642static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001643 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001644{
1645 long timeo = msecs_to_jiffies(3000);
1646 struct le_scan_params param;
1647 int err;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652 return -EINPROGRESS;
1653
1654 param.type = type;
1655 param.interval = interval;
1656 param.window = window;
1657
1658 hci_req_lock(hdev);
1659
1660 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001661 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001662 if (!err)
1663 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665 hci_req_unlock(hdev);
1666
1667 if (err < 0)
1668 return err;
1669
1670 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001671 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001672
1673 return 0;
1674}
1675
Andre Guedes7dbfac12012-03-15 16:52:07 -03001676int hci_cancel_le_scan(struct hci_dev *hdev)
1677{
1678 BT_DBG("%s", hdev->name);
1679
1680 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1681 return -EALREADY;
1682
1683 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1684 struct hci_cp_le_set_scan_enable cp;
1685
1686 /* Send HCI command to disable LE Scan */
1687 memset(&cp, 0, sizeof(cp));
1688 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1689 }
1690
1691 return 0;
1692}
1693
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001694static void le_scan_disable_work(struct work_struct *work)
1695{
1696 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001697 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001698 struct hci_cp_le_set_scan_enable cp;
1699
1700 BT_DBG("%s", hdev->name);
1701
1702 memset(&cp, 0, sizeof(cp));
1703
1704 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1705}
1706
Andre Guedes28b75a82012-02-03 17:48:00 -03001707static void le_scan_work(struct work_struct *work)
1708{
1709 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1710 struct le_scan_params *param = &hdev->le_scan_params;
1711
1712 BT_DBG("%s", hdev->name);
1713
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001714 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1715 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001716}
1717
1718int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001719 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001720{
1721 struct le_scan_params *param = &hdev->le_scan_params;
1722
1723 BT_DBG("%s", hdev->name);
1724
1725 if (work_busy(&hdev->le_scan))
1726 return -EINPROGRESS;
1727
1728 param->type = type;
1729 param->interval = interval;
1730 param->window = window;
1731 param->timeout = timeout;
1732
1733 queue_work(system_long_wq, &hdev->le_scan);
1734
1735 return 0;
1736}
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738/* Register HCI device */
1739int hci_register_dev(struct hci_dev *hdev)
1740{
1741 struct list_head *head = &hci_dev_list, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001742 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
David Herrmanne9b9cfa2012-01-07 15:47:22 +01001744 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
David Herrmann010666a2012-01-07 15:47:07 +01001746 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747 return -EINVAL;
1748
Mat Martineau08add512011-11-02 16:18:36 -07001749 /* Do not allow HCI_AMP devices to register at index 0,
1750 * so the index can be used as the AMP controller ID.
1751 */
1752 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1753
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001754 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
1756 /* Find first available device id */
1757 list_for_each(p, &hci_dev_list) {
1758 if (list_entry(p, struct hci_dev, list)->id != id)
1759 break;
1760 head = p; id++;
1761 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001762
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 sprintf(hdev->name, "hci%d", id);
1764 hdev->id = id;
Andrei Emeltchenkoc6feeb22011-11-16 17:30:20 +02001765 list_add_tail(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001767 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
1769 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001770 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001772 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001774 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Marcel Holtmann04837f62006-07-03 10:02:33 +02001776 hdev->idle_timeout = 0;
1777 hdev->sniff_max_interval = 800;
1778 hdev->sniff_min_interval = 80;
1779
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001780 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001781 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001782 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 skb_queue_head_init(&hdev->rx_q);
1786 skb_queue_head_init(&hdev->cmd_q);
1787 skb_queue_head_init(&hdev->raw_q);
1788
Ville Tervo6bd32322011-02-16 16:32:41 +02001789 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1790
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301791 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001792 hdev->reassembly[i] = NULL;
1793
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001795 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
Johan Hedberg30883512012-01-04 14:16:21 +02001797 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001798
1799 hci_conn_hash_init(hdev);
1800
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001801 INIT_LIST_HEAD(&hdev->mgmt_pending);
1802
David Millerea4bd8b2010-07-30 21:54:49 -07001803 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001804
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001805 INIT_LIST_HEAD(&hdev->uuids);
1806
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001807 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001808 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001809
Szymon Janc2763eda2011-03-22 13:12:22 +01001810 INIT_LIST_HEAD(&hdev->remote_oob_data);
1811
Andre Guedes76c86862011-05-26 16:23:50 -03001812 INIT_LIST_HEAD(&hdev->adv_entries);
1813
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001814 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001815 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001816 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001817
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001818 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1819
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1821
1822 atomic_set(&hdev->promisc, 0);
1823
Andre Guedes28b75a82012-02-03 17:48:00 -03001824 INIT_WORK(&hdev->le_scan, le_scan_work);
1825
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001826 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1827
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001828 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001830 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1831 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001832 if (!hdev->workqueue) {
1833 error = -ENOMEM;
1834 goto err;
1835 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001836
David Herrmann33ca9542011-10-08 14:58:49 +02001837 error = hci_add_sysfs(hdev);
1838 if (error < 0)
1839 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001840
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001841 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1842 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1843 if (hdev->rfkill) {
1844 if (rfkill_register(hdev->rfkill) < 0) {
1845 rfkill_destroy(hdev->rfkill);
1846 hdev->rfkill = NULL;
1847 }
1848 }
1849
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001850 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1851 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001852 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001853
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001855 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001856
1857 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001858
David Herrmann33ca9542011-10-08 14:58:49 +02001859err_wqueue:
1860 destroy_workqueue(hdev->workqueue);
1861err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001862 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001863 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001864 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001865
David Herrmann33ca9542011-10-08 14:58:49 +02001866 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001867}
1868EXPORT_SYMBOL(hci_register_dev);
1869
1870/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001871void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872{
Marcel Holtmannef222012007-07-11 06:42:04 +02001873 int i;
1874
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001875 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001876
Johan Hovold94324962012-03-15 14:48:41 +01001877 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1878
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001879 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001880 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001881 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882
1883 hci_dev_do_close(hdev);
1884
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301885 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001886 kfree_skb(hdev->reassembly[i]);
1887
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001888 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001889 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001890 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001891 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001892 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001893 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001894
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001895 /* mgmt_index_removed should take care of emptying the
1896 * pending list */
1897 BUG_ON(!list_empty(&hdev->mgmt_pending));
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 hci_notify(hdev, HCI_DEV_UNREG);
1900
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001901 if (hdev->rfkill) {
1902 rfkill_unregister(hdev->rfkill);
1903 rfkill_destroy(hdev->rfkill);
1904 }
1905
David Herrmannce242972011-10-08 14:58:48 +02001906 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001907
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001908 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001909
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001910 destroy_workqueue(hdev->workqueue);
1911
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001912 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001913 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001914 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001915 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001916 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001917 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001918 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001919 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001920
David Herrmanndc946bd2012-01-07 15:47:24 +01001921 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001922}
1923EXPORT_SYMBOL(hci_unregister_dev);
1924
1925/* Suspend HCI device */
1926int hci_suspend_dev(struct hci_dev *hdev)
1927{
1928 hci_notify(hdev, HCI_DEV_SUSPEND);
1929 return 0;
1930}
1931EXPORT_SYMBOL(hci_suspend_dev);
1932
1933/* Resume HCI device */
1934int hci_resume_dev(struct hci_dev *hdev)
1935{
1936 hci_notify(hdev, HCI_DEV_RESUME);
1937 return 0;
1938}
1939EXPORT_SYMBOL(hci_resume_dev);
1940
Marcel Holtmann76bca882009-11-18 00:40:39 +01001941/* Receive frame from HCI drivers */
1942int hci_recv_frame(struct sk_buff *skb)
1943{
1944 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1945 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1946 && !test_bit(HCI_INIT, &hdev->flags))) {
1947 kfree_skb(skb);
1948 return -ENXIO;
1949 }
1950
1951 /* Incomming skb */
1952 bt_cb(skb)->incoming = 1;
1953
1954 /* Time stamp */
1955 __net_timestamp(skb);
1956
Marcel Holtmann76bca882009-11-18 00:40:39 +01001957 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001958 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001959
Marcel Holtmann76bca882009-11-18 00:40:39 +01001960 return 0;
1961}
1962EXPORT_SYMBOL(hci_recv_frame);
1963
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301964static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001965 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301966{
1967 int len = 0;
1968 int hlen = 0;
1969 int remain = count;
1970 struct sk_buff *skb;
1971 struct bt_skb_cb *scb;
1972
1973 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1974 index >= NUM_REASSEMBLY)
1975 return -EILSEQ;
1976
1977 skb = hdev->reassembly[index];
1978
1979 if (!skb) {
1980 switch (type) {
1981 case HCI_ACLDATA_PKT:
1982 len = HCI_MAX_FRAME_SIZE;
1983 hlen = HCI_ACL_HDR_SIZE;
1984 break;
1985 case HCI_EVENT_PKT:
1986 len = HCI_MAX_EVENT_SIZE;
1987 hlen = HCI_EVENT_HDR_SIZE;
1988 break;
1989 case HCI_SCODATA_PKT:
1990 len = HCI_MAX_SCO_SIZE;
1991 hlen = HCI_SCO_HDR_SIZE;
1992 break;
1993 }
1994
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001995 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301996 if (!skb)
1997 return -ENOMEM;
1998
1999 scb = (void *) skb->cb;
2000 scb->expect = hlen;
2001 scb->pkt_type = type;
2002
2003 skb->dev = (void *) hdev;
2004 hdev->reassembly[index] = skb;
2005 }
2006
2007 while (count) {
2008 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002009 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302010
2011 memcpy(skb_put(skb, len), data, len);
2012
2013 count -= len;
2014 data += len;
2015 scb->expect -= len;
2016 remain = count;
2017
2018 switch (type) {
2019 case HCI_EVENT_PKT:
2020 if (skb->len == HCI_EVENT_HDR_SIZE) {
2021 struct hci_event_hdr *h = hci_event_hdr(skb);
2022 scb->expect = h->plen;
2023
2024 if (skb_tailroom(skb) < scb->expect) {
2025 kfree_skb(skb);
2026 hdev->reassembly[index] = NULL;
2027 return -ENOMEM;
2028 }
2029 }
2030 break;
2031
2032 case HCI_ACLDATA_PKT:
2033 if (skb->len == HCI_ACL_HDR_SIZE) {
2034 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2035 scb->expect = __le16_to_cpu(h->dlen);
2036
2037 if (skb_tailroom(skb) < scb->expect) {
2038 kfree_skb(skb);
2039 hdev->reassembly[index] = NULL;
2040 return -ENOMEM;
2041 }
2042 }
2043 break;
2044
2045 case HCI_SCODATA_PKT:
2046 if (skb->len == HCI_SCO_HDR_SIZE) {
2047 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2048 scb->expect = h->dlen;
2049
2050 if (skb_tailroom(skb) < scb->expect) {
2051 kfree_skb(skb);
2052 hdev->reassembly[index] = NULL;
2053 return -ENOMEM;
2054 }
2055 }
2056 break;
2057 }
2058
2059 if (scb->expect == 0) {
2060 /* Complete frame */
2061
2062 bt_cb(skb)->pkt_type = type;
2063 hci_recv_frame(skb);
2064
2065 hdev->reassembly[index] = NULL;
2066 return remain;
2067 }
2068 }
2069
2070 return remain;
2071}
2072
Marcel Holtmannef222012007-07-11 06:42:04 +02002073int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2074{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302075 int rem = 0;
2076
Marcel Holtmannef222012007-07-11 06:42:04 +02002077 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2078 return -EILSEQ;
2079
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002080 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002081 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302082 if (rem < 0)
2083 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002084
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302085 data += (count - rem);
2086 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002087 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002088
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302089 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002090}
2091EXPORT_SYMBOL(hci_recv_fragment);
2092
Suraj Sumangala99811512010-07-14 13:02:19 +05302093#define STREAM_REASSEMBLY 0
2094
2095int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2096{
2097 int type;
2098 int rem = 0;
2099
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002100 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302101 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2102
2103 if (!skb) {
2104 struct { char type; } *pkt;
2105
2106 /* Start of the frame */
2107 pkt = data;
2108 type = pkt->type;
2109
2110 data++;
2111 count--;
2112 } else
2113 type = bt_cb(skb)->pkt_type;
2114
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002115 rem = hci_reassembly(hdev, type, data, count,
2116 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302117 if (rem < 0)
2118 return rem;
2119
2120 data += (count - rem);
2121 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002122 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302123
2124 return rem;
2125}
2126EXPORT_SYMBOL(hci_recv_stream_fragment);
2127
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128/* ---- Interface to upper protocols ---- */
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130int hci_register_cb(struct hci_cb *cb)
2131{
2132 BT_DBG("%p name %s", cb, cb->name);
2133
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002134 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002136 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138 return 0;
2139}
2140EXPORT_SYMBOL(hci_register_cb);
2141
2142int hci_unregister_cb(struct hci_cb *cb)
2143{
2144 BT_DBG("%p name %s", cb, cb->name);
2145
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002146 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002148 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
2150 return 0;
2151}
2152EXPORT_SYMBOL(hci_unregister_cb);
2153
2154static int hci_send_frame(struct sk_buff *skb)
2155{
2156 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2157
2158 if (!hdev) {
2159 kfree_skb(skb);
2160 return -ENODEV;
2161 }
2162
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002163 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002165 /* Time stamp */
2166 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002168 /* Send copy to monitor */
2169 hci_send_to_monitor(hdev, skb);
2170
2171 if (atomic_read(&hdev->promisc)) {
2172 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002173 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 }
2175
2176 /* Get rid of skb owner, prior to sending to the driver. */
2177 skb_orphan(skb);
2178
2179 return hdev->send(skb);
2180}
2181
2182/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002183int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184{
2185 int len = HCI_COMMAND_HDR_SIZE + plen;
2186 struct hci_command_hdr *hdr;
2187 struct sk_buff *skb;
2188
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002189 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191 skb = bt_skb_alloc(len, GFP_ATOMIC);
2192 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002193 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 return -ENOMEM;
2195 }
2196
2197 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002198 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 hdr->plen = plen;
2200
2201 if (plen)
2202 memcpy(skb_put(skb, plen), param, plen);
2203
2204 BT_DBG("skb len %d", skb->len);
2205
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002208
Johan Hedberga5040ef2011-01-10 13:28:59 +02002209 if (test_bit(HCI_INIT, &hdev->flags))
2210 hdev->init_last_cmd = opcode;
2211
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002213 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 return 0;
2216}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002219void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220{
2221 struct hci_command_hdr *hdr;
2222
2223 if (!hdev->sent_cmd)
2224 return NULL;
2225
2226 hdr = (void *) hdev->sent_cmd->data;
2227
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002228 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 return NULL;
2230
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002231 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
2233 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2234}
2235
2236/* Send ACL data */
2237static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2238{
2239 struct hci_acl_hdr *hdr;
2240 int len = skb->len;
2241
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002242 skb_push(skb, HCI_ACL_HDR_SIZE);
2243 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002244 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002245 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2246 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247}
2248
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002249static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2250 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
2252 struct hci_dev *hdev = conn->hdev;
2253 struct sk_buff *list;
2254
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002255 list = skb_shinfo(skb)->frag_list;
2256 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 /* Non fragmented */
2258 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2259
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002260 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 } else {
2262 /* Fragmented */
2263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2264
2265 skb_shinfo(skb)->frag_list = NULL;
2266
2267 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002268 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002270 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002271
2272 flags &= ~ACL_START;
2273 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 do {
2275 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002278 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002279 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
2281 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2282
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002283 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 } while (list);
2285
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002286 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002288}
2289
2290void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2291{
2292 struct hci_conn *conn = chan->conn;
2293 struct hci_dev *hdev = conn->hdev;
2294
2295 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2296
2297 skb->dev = (void *) hdev;
2298 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2299 hci_add_acl_hdr(skb, conn->handle, flags);
2300
2301 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002303 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304}
2305EXPORT_SYMBOL(hci_send_acl);
2306
2307/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002308void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
2310 struct hci_dev *hdev = conn->hdev;
2311 struct hci_sco_hdr hdr;
2312
2313 BT_DBG("%s len %d", hdev->name, skb->len);
2314
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002315 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 hdr.dlen = skb->len;
2317
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002318 skb_push(skb, HCI_SCO_HDR_SIZE);
2319 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002320 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321
2322 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002323 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002324
Linus Torvalds1da177e2005-04-16 15:20:36 -07002325 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002326 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327}
2328EXPORT_SYMBOL(hci_send_sco);
2329
2330/* ---- HCI TX task (outgoing data) ---- */
2331
2332/* HCI Connection scheduler */
2333static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2334{
2335 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002336 struct hci_conn *conn = NULL, *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002337 int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002338
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002339 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002341
2342 rcu_read_lock();
2343
2344 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002345 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002346 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002347
2348 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2349 continue;
2350
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 num++;
2352
2353 if (c->sent < min) {
2354 min = c->sent;
2355 conn = c;
2356 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002357
2358 if (hci_conn_num(hdev, type) == num)
2359 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 }
2361
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002362 rcu_read_unlock();
2363
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002365 int cnt, q;
2366
2367 switch (conn->type) {
2368 case ACL_LINK:
2369 cnt = hdev->acl_cnt;
2370 break;
2371 case SCO_LINK:
2372 case ESCO_LINK:
2373 cnt = hdev->sco_cnt;
2374 break;
2375 case LE_LINK:
2376 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2377 break;
2378 default:
2379 cnt = 0;
2380 BT_ERR("Unknown link type");
2381 }
2382
2383 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002384 *quote = q ? q : 1;
2385 } else
2386 *quote = 0;
2387
2388 BT_DBG("conn %p quote %d", conn, *quote);
2389 return conn;
2390}
2391
Ville Tervobae1f5d92011-02-10 22:38:53 -03002392static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002393{
2394 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002395 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002396
Ville Tervobae1f5d92011-02-10 22:38:53 -03002397 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002399 rcu_read_lock();
2400
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002402 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002403 if (c->type == type && c->sent) {
2404 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002405 hdev->name, batostr(&c->dst));
2406 hci_acl_disconn(c, 0x13);
2407 }
2408 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002409
2410 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411}
2412
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002413static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2414 int *quote)
2415{
2416 struct hci_conn_hash *h = &hdev->conn_hash;
2417 struct hci_chan *chan = NULL;
2418 int num = 0, min = ~0, cur_prio = 0;
2419 struct hci_conn *conn;
2420 int cnt, q, conn_num = 0;
2421
2422 BT_DBG("%s", hdev->name);
2423
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002424 rcu_read_lock();
2425
2426 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002427 struct hci_chan *tmp;
2428
2429 if (conn->type != type)
2430 continue;
2431
2432 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2433 continue;
2434
2435 conn_num++;
2436
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002437 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002438 struct sk_buff *skb;
2439
2440 if (skb_queue_empty(&tmp->data_q))
2441 continue;
2442
2443 skb = skb_peek(&tmp->data_q);
2444 if (skb->priority < cur_prio)
2445 continue;
2446
2447 if (skb->priority > cur_prio) {
2448 num = 0;
2449 min = ~0;
2450 cur_prio = skb->priority;
2451 }
2452
2453 num++;
2454
2455 if (conn->sent < min) {
2456 min = conn->sent;
2457 chan = tmp;
2458 }
2459 }
2460
2461 if (hci_conn_num(hdev, type) == conn_num)
2462 break;
2463 }
2464
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002465 rcu_read_unlock();
2466
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002467 if (!chan)
2468 return NULL;
2469
2470 switch (chan->conn->type) {
2471 case ACL_LINK:
2472 cnt = hdev->acl_cnt;
2473 break;
2474 case SCO_LINK:
2475 case ESCO_LINK:
2476 cnt = hdev->sco_cnt;
2477 break;
2478 case LE_LINK:
2479 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2480 break;
2481 default:
2482 cnt = 0;
2483 BT_ERR("Unknown link type");
2484 }
2485
2486 q = cnt / num;
2487 *quote = q ? q : 1;
2488 BT_DBG("chan %p quote %d", chan, *quote);
2489 return chan;
2490}
2491
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002492static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2493{
2494 struct hci_conn_hash *h = &hdev->conn_hash;
2495 struct hci_conn *conn;
2496 int num = 0;
2497
2498 BT_DBG("%s", hdev->name);
2499
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002500 rcu_read_lock();
2501
2502 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002503 struct hci_chan *chan;
2504
2505 if (conn->type != type)
2506 continue;
2507
2508 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2509 continue;
2510
2511 num++;
2512
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002513 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002514 struct sk_buff *skb;
2515
2516 if (chan->sent) {
2517 chan->sent = 0;
2518 continue;
2519 }
2520
2521 if (skb_queue_empty(&chan->data_q))
2522 continue;
2523
2524 skb = skb_peek(&chan->data_q);
2525 if (skb->priority >= HCI_PRIO_MAX - 1)
2526 continue;
2527
2528 skb->priority = HCI_PRIO_MAX - 1;
2529
2530 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2531 skb->priority);
2532 }
2533
2534 if (hci_conn_num(hdev, type) == num)
2535 break;
2536 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002537
2538 rcu_read_unlock();
2539
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002540}
2541
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002542static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2543{
2544 /* Calculate count of blocks used by this packet */
2545 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2546}
2547
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002548static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002550 if (!test_bit(HCI_RAW, &hdev->flags)) {
2551 /* ACL tx timeout must be longer than maximum
2552 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002553 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002554 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002555 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002556 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002557}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002559static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2560{
2561 unsigned int cnt = hdev->acl_cnt;
2562 struct hci_chan *chan;
2563 struct sk_buff *skb;
2564 int quote;
2565
2566 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002567
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002568 while (hdev->acl_cnt &&
2569 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002570 u32 priority = (skb_peek(&chan->data_q))->priority;
2571 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002572 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2573 skb->len, skb->priority);
2574
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002575 /* Stop if priority has changed */
2576 if (skb->priority < priority)
2577 break;
2578
2579 skb = skb_dequeue(&chan->data_q);
2580
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002581 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002582 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002583
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 hci_send_frame(skb);
2585 hdev->acl_last_tx = jiffies;
2586
2587 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002588 chan->sent++;
2589 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 }
2591 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002592
2593 if (cnt != hdev->acl_cnt)
2594 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595}
2596
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002597static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2598{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002599 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002600 struct hci_chan *chan;
2601 struct sk_buff *skb;
2602 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002603
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002604 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002605
2606 while (hdev->block_cnt > 0 &&
2607 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2608 u32 priority = (skb_peek(&chan->data_q))->priority;
2609 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2610 int blocks;
2611
2612 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2613 skb->len, skb->priority);
2614
2615 /* Stop if priority has changed */
2616 if (skb->priority < priority)
2617 break;
2618
2619 skb = skb_dequeue(&chan->data_q);
2620
2621 blocks = __get_blocks(hdev, skb);
2622 if (blocks > hdev->block_cnt)
2623 return;
2624
2625 hci_conn_enter_active_mode(chan->conn,
2626 bt_cb(skb)->force_active);
2627
2628 hci_send_frame(skb);
2629 hdev->acl_last_tx = jiffies;
2630
2631 hdev->block_cnt -= blocks;
2632 quote -= blocks;
2633
2634 chan->sent += blocks;
2635 chan->conn->sent += blocks;
2636 }
2637 }
2638
2639 if (cnt != hdev->block_cnt)
2640 hci_prio_recalculate(hdev, ACL_LINK);
2641}
2642
2643static inline void hci_sched_acl(struct hci_dev *hdev)
2644{
2645 BT_DBG("%s", hdev->name);
2646
2647 if (!hci_conn_num(hdev, ACL_LINK))
2648 return;
2649
2650 switch (hdev->flow_ctl_mode) {
2651 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2652 hci_sched_acl_pkt(hdev);
2653 break;
2654
2655 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2656 hci_sched_acl_blk(hdev);
2657 break;
2658 }
2659}
2660
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661/* Schedule SCO */
2662static inline void hci_sched_sco(struct hci_dev *hdev)
2663{
2664 struct hci_conn *conn;
2665 struct sk_buff *skb;
2666 int quote;
2667
2668 BT_DBG("%s", hdev->name);
2669
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002670 if (!hci_conn_num(hdev, SCO_LINK))
2671 return;
2672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2674 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2675 BT_DBG("skb %p len %d", skb, skb->len);
2676 hci_send_frame(skb);
2677
2678 conn->sent++;
2679 if (conn->sent == ~0)
2680 conn->sent = 0;
2681 }
2682 }
2683}
2684
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002685static inline void hci_sched_esco(struct hci_dev *hdev)
2686{
2687 struct hci_conn *conn;
2688 struct sk_buff *skb;
2689 int quote;
2690
2691 BT_DBG("%s", hdev->name);
2692
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002693 if (!hci_conn_num(hdev, ESCO_LINK))
2694 return;
2695
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002696 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2697 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2698 BT_DBG("skb %p len %d", skb, skb->len);
2699 hci_send_frame(skb);
2700
2701 conn->sent++;
2702 if (conn->sent == ~0)
2703 conn->sent = 0;
2704 }
2705 }
2706}
2707
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708static inline void hci_sched_le(struct hci_dev *hdev)
2709{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002710 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002711 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002712 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002713
2714 BT_DBG("%s", hdev->name);
2715
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002716 if (!hci_conn_num(hdev, LE_LINK))
2717 return;
2718
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002719 if (!test_bit(HCI_RAW, &hdev->flags)) {
2720 /* LE tx timeout must be longer than maximum
2721 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002722 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002723 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002724 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002725 }
2726
2727 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002728 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002729 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002730 u32 priority = (skb_peek(&chan->data_q))->priority;
2731 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002732 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2733 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002734
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002735 /* Stop if priority has changed */
2736 if (skb->priority < priority)
2737 break;
2738
2739 skb = skb_dequeue(&chan->data_q);
2740
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002741 hci_send_frame(skb);
2742 hdev->le_last_tx = jiffies;
2743
2744 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002745 chan->sent++;
2746 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002747 }
2748 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002749
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002750 if (hdev->le_pkts)
2751 hdev->le_cnt = cnt;
2752 else
2753 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002754
2755 if (cnt != tmp)
2756 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002757}
2758
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002759static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002761 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 struct sk_buff *skb;
2763
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002764 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2765 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767 /* Schedule queues and send stuff to HCI driver */
2768
2769 hci_sched_acl(hdev);
2770
2771 hci_sched_sco(hdev);
2772
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002773 hci_sched_esco(hdev);
2774
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002775 hci_sched_le(hdev);
2776
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 /* Send next queued raw (unknown type) packet */
2778 while ((skb = skb_dequeue(&hdev->raw_q)))
2779 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780}
2781
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002782/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783
2784/* ACL data packet */
2785static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2786{
2787 struct hci_acl_hdr *hdr = (void *) skb->data;
2788 struct hci_conn *conn;
2789 __u16 handle, flags;
2790
2791 skb_pull(skb, HCI_ACL_HDR_SIZE);
2792
2793 handle = __le16_to_cpu(hdr->handle);
2794 flags = hci_flags(handle);
2795 handle = hci_handle(handle);
2796
2797 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2798
2799 hdev->stat.acl_rx++;
2800
2801 hci_dev_lock(hdev);
2802 conn = hci_conn_hash_lookup_handle(hdev, handle);
2803 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002804
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002806 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002807
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002809 l2cap_recv_acldata(conn, skb, flags);
2810 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002812 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 hdev->name, handle);
2814 }
2815
2816 kfree_skb(skb);
2817}
2818
2819/* SCO data packet */
2820static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2821{
2822 struct hci_sco_hdr *hdr = (void *) skb->data;
2823 struct hci_conn *conn;
2824 __u16 handle;
2825
2826 skb_pull(skb, HCI_SCO_HDR_SIZE);
2827
2828 handle = __le16_to_cpu(hdr->handle);
2829
2830 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2831
2832 hdev->stat.sco_rx++;
2833
2834 hci_dev_lock(hdev);
2835 conn = hci_conn_hash_lookup_handle(hdev, handle);
2836 hci_dev_unlock(hdev);
2837
2838 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002840 sco_recv_scodata(conn, skb);
2841 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002843 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 hdev->name, handle);
2845 }
2846
2847 kfree_skb(skb);
2848}
2849
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002850static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002852 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 struct sk_buff *skb;
2854
2855 BT_DBG("%s", hdev->name);
2856
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002858 /* Send copy to monitor */
2859 hci_send_to_monitor(hdev, skb);
2860
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 if (atomic_read(&hdev->promisc)) {
2862 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002863 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 }
2865
2866 if (test_bit(HCI_RAW, &hdev->flags)) {
2867 kfree_skb(skb);
2868 continue;
2869 }
2870
2871 if (test_bit(HCI_INIT, &hdev->flags)) {
2872 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002873 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874 case HCI_ACLDATA_PKT:
2875 case HCI_SCODATA_PKT:
2876 kfree_skb(skb);
2877 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002878 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 }
2880
2881 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002882 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002884 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 hci_event_packet(hdev, skb);
2886 break;
2887
2888 case HCI_ACLDATA_PKT:
2889 BT_DBG("%s ACL data packet", hdev->name);
2890 hci_acldata_packet(hdev, skb);
2891 break;
2892
2893 case HCI_SCODATA_PKT:
2894 BT_DBG("%s SCO data packet", hdev->name);
2895 hci_scodata_packet(hdev, skb);
2896 break;
2897
2898 default:
2899 kfree_skb(skb);
2900 break;
2901 }
2902 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903}
2904
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002905static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002906{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002907 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908 struct sk_buff *skb;
2909
2910 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2911
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002913 if (atomic_read(&hdev->cmd_cnt)) {
2914 skb = skb_dequeue(&hdev->cmd_q);
2915 if (!skb)
2916 return;
2917
Wei Yongjun7585b972009-02-25 18:29:52 +08002918 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002920 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2921 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 atomic_dec(&hdev->cmd_cnt);
2923 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002924 if (test_bit(HCI_RESET, &hdev->flags))
2925 del_timer(&hdev->cmd_timer);
2926 else
2927 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002928 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 } else {
2930 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002931 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 }
2933 }
2934}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002935
2936int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2937{
2938 /* General inquiry access code (GIAC) */
2939 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2940 struct hci_cp_inquiry cp;
2941
2942 BT_DBG("%s", hdev->name);
2943
2944 if (test_bit(HCI_INQUIRY, &hdev->flags))
2945 return -EINPROGRESS;
2946
Johan Hedberg46632622012-01-02 16:06:08 +02002947 inquiry_cache_flush(hdev);
2948
Andre Guedes2519a1f2011-11-07 11:45:24 -03002949 memset(&cp, 0, sizeof(cp));
2950 memcpy(&cp.lap, lap, sizeof(cp.lap));
2951 cp.length = length;
2952
2953 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2954}
Andre Guedes023d50492011-11-04 14:16:52 -03002955
2956int hci_cancel_inquiry(struct hci_dev *hdev)
2957{
2958 BT_DBG("%s", hdev->name);
2959
2960 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002961 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002962
2963 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2964}