blob: d3fb986d6b27d5f07b10814dec73bd17c196a2f0 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900127static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100128 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129{
130 DECLARE_WAITQUEUE(wait, current);
131 int err = 0;
132
133 BT_DBG("%s start", hdev->name);
134
135 hdev->req_status = HCI_REQ_PEND;
136
137 add_wait_queue(&hdev->req_wait_q, &wait);
138 set_current_state(TASK_INTERRUPTIBLE);
139
140 req(hdev, opt);
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return -EINTR;
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700150 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700160 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Johan Hedberga5040ef2011-01-10 13:28:59 +0200162 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163
164 BT_DBG("%s end: err %d", hdev->name, err);
165
166 return err;
167}
168
169static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100170 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 int ret;
173
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200174 if (!test_bit(HCI_UP, &hdev->flags))
175 return -ENETDOWN;
176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Serialize all requests */
178 hci_req_lock(hdev);
179 ret = __hci_request(hdev, req, opt, timeout);
180 hci_req_unlock(hdev);
181
182 return ret;
183}
184
185static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186{
187 BT_DBG("%s %ld", hdev->name, opt);
188
189 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300190 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200191 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200196 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800197 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200198 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200200 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Mandatory initialization */
203
204 /* Reset */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300205 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200206 set_bit(HCI_RESET, &hdev->flags);
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300208 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200211 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200213 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200217 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200220 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222 /* Read Class of Device */
223 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225 /* Read Local Name */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200229 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
231 /* Optional initialization */
232
233 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200234 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200235 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700238 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200239 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200240
241 bacpy(&cp.bdaddr, BDADDR_ANY);
242 cp.delete_all = 1;
243 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244}
245
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200246static void amp_init(struct hci_dev *hdev)
247{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200248 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200250 /* Reset */
251 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253 /* Read Local Version */
254 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300255
256 /* Read Local AMP Info */
257 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200258}
259
260static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
261{
262 struct sk_buff *skb;
263
264 BT_DBG("%s %ld", hdev->name, opt);
265
266 /* Driver initialization */
267
268 /* Special commands */
269 while ((skb = skb_dequeue(&hdev->driver_init))) {
270 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
271 skb->dev = (void *) hdev;
272
273 skb_queue_tail(&hdev->cmd_q, skb);
274 queue_work(hdev->workqueue, &hdev->cmd_work);
275 }
276 skb_queue_purge(&hdev->driver_init);
277
278 switch (hdev->dev_type) {
279 case HCI_BREDR:
280 bredr_init(hdev);
281 break;
282
283 case HCI_AMP:
284 amp_init(hdev);
285 break;
286
287 default:
288 BT_ERR("Unknown device type %d", hdev->dev_type);
289 break;
290 }
291
292}
293
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300294static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
295{
296 BT_DBG("%s", hdev->name);
297
298 /* Read LE buffer size */
299 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
300}
301
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
303{
304 __u8 scan = opt;
305
306 BT_DBG("%s %x", hdev->name, scan);
307
308 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200309 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
312static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
313{
314 __u8 auth = opt;
315
316 BT_DBG("%s %x", hdev->name, auth);
317
318 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200319 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320}
321
322static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
323{
324 __u8 encrypt = opt;
325
326 BT_DBG("%s %x", hdev->name, encrypt);
327
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200328 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200329 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330}
331
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200332static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
333{
334 __le16 policy = cpu_to_le16(opt);
335
Marcel Holtmanna418b892008-11-30 12:17:28 +0100336 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200337
338 /* Default link policy */
339 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
340}
341
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900342/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 * Device is held on return. */
344struct hci_dev *hci_dev_get(int index)
345{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200346 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348 BT_DBG("%d", index);
349
350 if (index < 0)
351 return NULL;
352
353 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200354 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 if (d->id == index) {
356 hdev = hci_dev_hold(d);
357 break;
358 }
359 }
360 read_unlock(&hci_dev_list_lock);
361 return hdev;
362}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363
364/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200365
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200366bool hci_discovery_active(struct hci_dev *hdev)
367{
368 struct discovery_state *discov = &hdev->discovery;
369
Andre Guedes6fbe1952012-02-03 17:47:58 -0300370 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300371 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200373 return true;
374
Andre Guedes6fbe1952012-02-03 17:47:58 -0300375 default:
376 return false;
377 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200378}
379
Johan Hedbergff9ef572012-01-04 14:23:45 +0200380void hci_discovery_set_state(struct hci_dev *hdev, int state)
381{
382 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
383
384 if (hdev->discovery.state == state)
385 return;
386
387 switch (state) {
388 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300389 if (hdev->discovery.state != DISCOVERY_STARTING)
390 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200391 break;
392 case DISCOVERY_STARTING:
393 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300394 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200395 mgmt_discovering(hdev, 1);
396 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200397 case DISCOVERY_RESOLVING:
398 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200399 case DISCOVERY_STOPPING:
400 break;
401 }
402
403 hdev->discovery.state = state;
404}
405
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406static void inquiry_cache_flush(struct hci_dev *hdev)
407{
Johan Hedberg30883512012-01-04 14:16:21 +0200408 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200409 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Johan Hedberg561aafb2012-01-04 13:31:59 +0200411 list_for_each_entry_safe(p, n, &cache->all, all) {
412 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200413 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415
416 INIT_LIST_HEAD(&cache->unknown);
417 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418}
419
420struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
421{
Johan Hedberg30883512012-01-04 14:16:21 +0200422 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200429 return e;
430 }
431
432 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433}
434
Johan Hedberg561aafb2012-01-04 13:31:59 +0200435struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300436 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437{
Johan Hedberg30883512012-01-04 14:16:21 +0200438 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
442
443 list_for_each_entry(e, &cache->unknown, list) {
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200451struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300452 bdaddr_t *bdaddr,
453 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200454{
455 struct discovery_state *cache = &hdev->discovery;
456 struct inquiry_entry *e;
457
458 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
459
460 list_for_each_entry(e, &cache->resolve, list) {
461 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
462 return e;
463 if (!bacmp(&e->data.bdaddr, bdaddr))
464 return e;
465 }
466
467 return NULL;
468}
469
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300471 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200472{
473 struct discovery_state *cache = &hdev->discovery;
474 struct list_head *pos = &cache->resolve;
475 struct inquiry_entry *p;
476
477 list_del(&ie->list);
478
479 list_for_each_entry(p, &cache->resolve, list) {
480 if (p->name_state != NAME_PENDING &&
481 abs(p->data.rssi) >= abs(ie->data.rssi))
482 break;
483 pos = &p->list;
484 }
485
486 list_add(&ie->list, pos);
487}
488
Johan Hedberg31754052012-01-04 13:39:52 +0200489bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300490 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
Johan Hedberg30883512012-01-04 14:16:21 +0200492 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200493 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494
495 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
496
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200497 if (ssp)
498 *ssp = data->ssp_mode;
499
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200500 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200501 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200502 if (ie->data.ssp_mode && ssp)
503 *ssp = true;
504
Johan Hedberga3d4e202012-01-09 00:53:02 +0200505 if (ie->name_state == NAME_NEEDED &&
506 data->rssi != ie->data.rssi) {
507 ie->data.rssi = data->rssi;
508 hci_inquiry_cache_update_resolve(hdev, ie);
509 }
510
Johan Hedberg561aafb2012-01-04 13:31:59 +0200511 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200512 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200513
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 /* Entry not in the cache. Add new one. */
515 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
516 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200517 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200518
519 list_add(&ie->all, &cache->all);
520
521 if (name_known) {
522 ie->name_state = NAME_KNOWN;
523 } else {
524 ie->name_state = NAME_NOT_KNOWN;
525 list_add(&ie->list, &cache->unknown);
526 }
527
528update:
529 if (name_known && ie->name_state != NAME_KNOWN &&
530 ie->name_state != NAME_PENDING) {
531 ie->name_state = NAME_KNOWN;
532 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 }
534
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200535 memcpy(&ie->data, data, sizeof(*data));
536 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200538
539 if (ie->name_state == NAME_NOT_KNOWN)
540 return false;
541
542 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700543}
544
545static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
546{
Johan Hedberg30883512012-01-04 14:16:21 +0200547 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 struct inquiry_info *info = (struct inquiry_info *) buf;
549 struct inquiry_entry *e;
550 int copied = 0;
551
Johan Hedberg561aafb2012-01-04 13:31:59 +0200552 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554
555 if (copied >= num)
556 break;
557
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 bacpy(&info->bdaddr, &data->bdaddr);
559 info->pscan_rep_mode = data->pscan_rep_mode;
560 info->pscan_period_mode = data->pscan_period_mode;
561 info->pscan_mode = data->pscan_mode;
562 memcpy(info->dev_class, data->dev_class, 3);
563 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200566 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 }
568
569 BT_DBG("cache %p, copied %d", cache, copied);
570 return copied;
571}
572
573static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
574{
575 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
576 struct hci_cp_inquiry cp;
577
578 BT_DBG("%s", hdev->name);
579
580 if (test_bit(HCI_INQUIRY, &hdev->flags))
581 return;
582
583 /* Start Inquiry */
584 memcpy(&cp.lap, &ir->lap, 3);
585 cp.length = ir->length;
586 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200587 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588}
589
590int hci_inquiry(void __user *arg)
591{
592 __u8 __user *ptr = arg;
593 struct hci_inquiry_req ir;
594 struct hci_dev *hdev;
595 int err = 0, do_inquiry = 0, max_rsp;
596 long timeo;
597 __u8 *buf;
598
599 if (copy_from_user(&ir, ptr, sizeof(ir)))
600 return -EFAULT;
601
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200602 hdev = hci_dev_get(ir.dev_id);
603 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return -ENODEV;
605
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300606 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900607 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200608 inquiry_cache_empty(hdev) ||
609 ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 inquiry_cache_flush(hdev);
611 do_inquiry = 1;
612 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300613 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614
Marcel Holtmann04837f62006-07-03 10:02:33 +0200615 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200616
617 if (do_inquiry) {
618 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
619 if (err < 0)
620 goto done;
621 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 /* for unlimited number of responses we will use buffer with 255 entries */
624 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
625
626 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
627 * copy it to the user space.
628 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100629 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200630 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -ENOMEM;
632 goto done;
633 }
634
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300635 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300637 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
639 BT_DBG("num_rsp %d", ir.num_rsp);
640
641 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
642 ptr += sizeof(ir);
643 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
644 ir.num_rsp))
645 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900646 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 err = -EFAULT;
648
649 kfree(buf);
650
651done:
652 hci_dev_put(hdev);
653 return err;
654}
655
656/* ---- HCI ioctl helpers ---- */
657
658int hci_dev_open(__u16 dev)
659{
660 struct hci_dev *hdev;
661 int ret = 0;
662
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200663 hdev = hci_dev_get(dev);
664 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 return -ENODEV;
666
667 BT_DBG("%s %p", hdev->name, hdev);
668
669 hci_req_lock(hdev);
670
Johan Hovold94324962012-03-15 14:48:41 +0100671 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
672 ret = -ENODEV;
673 goto done;
674 }
675
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200676 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
677 ret = -ERFKILL;
678 goto done;
679 }
680
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 if (test_bit(HCI_UP, &hdev->flags)) {
682 ret = -EALREADY;
683 goto done;
684 }
685
686 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
687 set_bit(HCI_RAW, &hdev->flags);
688
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200689 /* Treat all non BR/EDR controllers as raw devices if
690 enable_hs is not set */
691 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100692 set_bit(HCI_RAW, &hdev->flags);
693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 if (hdev->open(hdev)) {
695 ret = -EIO;
696 goto done;
697 }
698
699 if (!test_bit(HCI_RAW, &hdev->flags)) {
700 atomic_set(&hdev->cmd_cnt, 1);
701 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200702 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700703
Marcel Holtmann04837f62006-07-03 10:02:33 +0200704 ret = __hci_request(hdev, hci_init_req, 0,
705 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706
Andre Guedeseead27d2011-06-30 19:20:55 -0300707 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300708 ret = __hci_request(hdev, hci_le_init_req, 0,
709 msecs_to_jiffies(HCI_INIT_TIMEOUT));
710
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711 clear_bit(HCI_INIT, &hdev->flags);
712 }
713
714 if (!ret) {
715 hci_dev_hold(hdev);
716 set_bit(HCI_UP, &hdev->flags);
717 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200718 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200720 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300721 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200722 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900723 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200725 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200726 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400727 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728
729 skb_queue_purge(&hdev->cmd_q);
730 skb_queue_purge(&hdev->rx_q);
731
732 if (hdev->flush)
733 hdev->flush(hdev);
734
735 if (hdev->sent_cmd) {
736 kfree_skb(hdev->sent_cmd);
737 hdev->sent_cmd = NULL;
738 }
739
740 hdev->close(hdev);
741 hdev->flags = 0;
742 }
743
744done:
745 hci_req_unlock(hdev);
746 hci_dev_put(hdev);
747 return ret;
748}
749
750static int hci_dev_do_close(struct hci_dev *hdev)
751{
752 BT_DBG("%s %p", hdev->name, hdev);
753
Andre Guedes28b75a82012-02-03 17:48:00 -0300754 cancel_work_sync(&hdev->le_scan);
755
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756 hci_req_cancel(hdev, ENODEV);
757 hci_req_lock(hdev);
758
759 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300760 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 hci_req_unlock(hdev);
762 return 0;
763 }
764
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200765 /* Flush RX and TX works */
766 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400767 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200769 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200770 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200772 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200773 }
774
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200775 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200776 cancel_delayed_work(&hdev->service_cache);
777
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300778 cancel_delayed_work_sync(&hdev->le_scan_disable);
779
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300780 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 inquiry_cache_flush(hdev);
782 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300783 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 hci_notify(hdev, HCI_DEV_DOWN);
786
787 if (hdev->flush)
788 hdev->flush(hdev);
789
790 /* Reset device */
791 skb_queue_purge(&hdev->cmd_q);
792 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200793 if (!test_bit(HCI_RAW, &hdev->flags) &&
794 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200796 __hci_request(hdev, hci_reset_req, 0,
Gustavo F. Padovancad44c22011-12-23 18:59:13 -0200797 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700798 clear_bit(HCI_INIT, &hdev->flags);
799 }
800
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200801 /* flush cmd work */
802 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803
804 /* Drop queues */
805 skb_queue_purge(&hdev->rx_q);
806 skb_queue_purge(&hdev->cmd_q);
807 skb_queue_purge(&hdev->raw_q);
808
809 /* Drop last sent command */
810 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300811 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 kfree_skb(hdev->sent_cmd);
813 hdev->sent_cmd = NULL;
814 }
815
816 /* After this point our queues are empty
817 * and no tasks are scheduled. */
818 hdev->close(hdev);
819
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100820 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
821 hci_dev_lock(hdev);
822 mgmt_powered(hdev, 0);
823 hci_dev_unlock(hdev);
824 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200825
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826 /* Clear flags */
827 hdev->flags = 0;
828
Johan Hedberge59fda82012-02-22 18:11:53 +0200829 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200830 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 hci_req_unlock(hdev);
833
834 hci_dev_put(hdev);
835 return 0;
836}
837
838int hci_dev_close(__u16 dev)
839{
840 struct hci_dev *hdev;
841 int err;
842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200843 hdev = hci_dev_get(dev);
844 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100846
847 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
848 cancel_delayed_work(&hdev->power_off);
849
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 hci_dev_put(hdev);
853 return err;
854}
855
856int hci_dev_reset(__u16 dev)
857{
858 struct hci_dev *hdev;
859 int ret = 0;
860
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200861 hdev = hci_dev_get(dev);
862 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 return -ENODEV;
864
865 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 if (!test_bit(HCI_UP, &hdev->flags))
868 goto done;
869
870 /* Drop queues */
871 skb_queue_purge(&hdev->rx_q);
872 skb_queue_purge(&hdev->cmd_q);
873
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300874 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 inquiry_cache_flush(hdev);
876 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300877 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878
879 if (hdev->flush)
880 hdev->flush(hdev);
881
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900882 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300883 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200886 ret = __hci_request(hdev, hci_reset_req, 0,
887 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
889done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890 hci_req_unlock(hdev);
891 hci_dev_put(hdev);
892 return ret;
893}
894
895int hci_dev_reset_stat(__u16 dev)
896{
897 struct hci_dev *hdev;
898 int ret = 0;
899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900 hdev = hci_dev_get(dev);
901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -ENODEV;
903
904 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
905
906 hci_dev_put(hdev);
907
908 return ret;
909}
910
911int hci_dev_cmd(unsigned int cmd, void __user *arg)
912{
913 struct hci_dev *hdev;
914 struct hci_dev_req dr;
915 int err = 0;
916
917 if (copy_from_user(&dr, arg, sizeof(dr)))
918 return -EFAULT;
919
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200920 hdev = hci_dev_get(dr.dev_id);
921 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 return -ENODEV;
923
924 switch (cmd) {
925 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200926 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
927 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 break;
929
930 case HCISETENCRYPT:
931 if (!lmp_encrypt_capable(hdev)) {
932 err = -EOPNOTSUPP;
933 break;
934 }
935
936 if (!test_bit(HCI_AUTH, &hdev->flags)) {
937 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200938 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 if (err)
941 break;
942 }
943
Marcel Holtmann04837f62006-07-03 10:02:33 +0200944 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
945 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 break;
947
948 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200949 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
950 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 break;
952
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200953 case HCISETLINKPOL:
954 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
955 msecs_to_jiffies(HCI_INIT_TIMEOUT));
956 break;
957
958 case HCISETLINKMODE:
959 hdev->link_mode = ((__u16) dr.dev_opt) &
960 (HCI_LM_MASTER | HCI_LM_ACCEPT);
961 break;
962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 case HCISETPTYPE:
964 hdev->pkt_type = (__u16) dr.dev_opt;
965 break;
966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200968 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
969 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 break;
971
972 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200973 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
974 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 break;
976
977 default:
978 err = -EINVAL;
979 break;
980 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200981
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 hci_dev_put(hdev);
983 return err;
984}
985
986int hci_get_dev_list(void __user *arg)
987{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200988 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 struct hci_dev_list_req *dl;
990 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 int n = 0, size, err;
992 __u16 dev_num;
993
994 if (get_user(dev_num, (__u16 __user *) arg))
995 return -EFAULT;
996
997 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
998 return -EINVAL;
999
1000 size = sizeof(*dl) + dev_num * sizeof(*dr);
1001
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001002 dl = kzalloc(size, GFP_KERNEL);
1003 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 return -ENOMEM;
1005
1006 dr = dl->dev_req;
1007
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001008 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001009 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001010 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001011 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001012
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001013 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1014 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001015
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016 (dr + n)->dev_id = hdev->id;
1017 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001018
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 if (++n >= dev_num)
1020 break;
1021 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001022 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023
1024 dl->dev_num = n;
1025 size = sizeof(*dl) + n * sizeof(*dr);
1026
1027 err = copy_to_user(arg, dl, size);
1028 kfree(dl);
1029
1030 return err ? -EFAULT : 0;
1031}
1032
1033int hci_get_dev_info(void __user *arg)
1034{
1035 struct hci_dev *hdev;
1036 struct hci_dev_info di;
1037 int err = 0;
1038
1039 if (copy_from_user(&di, arg, sizeof(di)))
1040 return -EFAULT;
1041
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001042 hdev = hci_dev_get(di.dev_id);
1043 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 return -ENODEV;
1045
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001046 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001047 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001048
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001049 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1050 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001051
Linus Torvalds1da177e2005-04-16 15:20:36 -07001052 strcpy(di.name, hdev->name);
1053 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001054 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 di.flags = hdev->flags;
1056 di.pkt_type = hdev->pkt_type;
1057 di.acl_mtu = hdev->acl_mtu;
1058 di.acl_pkts = hdev->acl_pkts;
1059 di.sco_mtu = hdev->sco_mtu;
1060 di.sco_pkts = hdev->sco_pkts;
1061 di.link_policy = hdev->link_policy;
1062 di.link_mode = hdev->link_mode;
1063
1064 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1065 memcpy(&di.features, &hdev->features, sizeof(di.features));
1066
1067 if (copy_to_user(arg, &di, sizeof(di)))
1068 err = -EFAULT;
1069
1070 hci_dev_put(hdev);
1071
1072 return err;
1073}
1074
1075/* ---- Interface to HCI drivers ---- */
1076
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001077static int hci_rfkill_set_block(void *data, bool blocked)
1078{
1079 struct hci_dev *hdev = data;
1080
1081 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1082
1083 if (!blocked)
1084 return 0;
1085
1086 hci_dev_do_close(hdev);
1087
1088 return 0;
1089}
1090
1091static const struct rfkill_ops hci_rfkill_ops = {
1092 .set_block = hci_rfkill_set_block,
1093};
1094
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001095static void hci_power_on(struct work_struct *work)
1096{
1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1098
1099 BT_DBG("%s", hdev->name);
1100
1101 if (hci_dev_open(hdev->id) < 0)
1102 return;
1103
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001105 schedule_delayed_work(&hdev->power_off,
Johan Hedberg32435532011-11-07 22:16:04 +02001106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001107
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001109 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001110}
1111
1112static void hci_power_off(struct work_struct *work)
1113{
Johan Hedberg32435532011-11-07 22:16:04 +02001114 struct hci_dev *hdev = container_of(work, struct hci_dev,
1115 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001116
1117 BT_DBG("%s", hdev->name);
1118
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001119 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001120}
1121
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001122static void hci_discov_off(struct work_struct *work)
1123{
1124 struct hci_dev *hdev;
1125 u8 scan = SCAN_PAGE;
1126
1127 hdev = container_of(work, struct hci_dev, discov_off.work);
1128
1129 BT_DBG("%s", hdev->name);
1130
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001131 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001132
1133 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1134
1135 hdev->discov_timeout = 0;
1136
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001137 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001138}
1139
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001140int hci_uuids_clear(struct hci_dev *hdev)
1141{
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->uuids) {
1145 struct bt_uuid *uuid;
1146
1147 uuid = list_entry(p, struct bt_uuid, list);
1148
1149 list_del(p);
1150 kfree(uuid);
1151 }
1152
1153 return 0;
1154}
1155
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001156int hci_link_keys_clear(struct hci_dev *hdev)
1157{
1158 struct list_head *p, *n;
1159
1160 list_for_each_safe(p, n, &hdev->link_keys) {
1161 struct link_key *key;
1162
1163 key = list_entry(p, struct link_key, list);
1164
1165 list_del(p);
1166 kfree(key);
1167 }
1168
1169 return 0;
1170}
1171
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001172int hci_smp_ltks_clear(struct hci_dev *hdev)
1173{
1174 struct smp_ltk *k, *tmp;
1175
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1177 list_del(&k->list);
1178 kfree(k);
1179 }
1180
1181 return 0;
1182}
1183
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001184struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001186 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001187
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001188 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001189 if (bacmp(bdaddr, &k->bdaddr) == 0)
1190 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191
1192 return NULL;
1193}
1194
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301195static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001196 u8 key_type, u8 old_key_type)
1197{
1198 /* Legacy key */
1199 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301200 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001201
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301204 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001205
1206 /* Changed combination key and there's no previous one */
1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301208 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001209
1210 /* Security mode 3 case */
1211 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301212 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001213
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301216 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001217
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301220 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001221
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301224 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001225
1226 /* If none of the above criteria match, then don't store the key
1227 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301228 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229}
1230
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238 continue;
1239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001241 }
1242
1243 return NULL;
1244}
1245EXPORT_SYMBOL(hci_find_ltk);
1246
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001247struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001248 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001249{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001250 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001255 return k;
1256
1257 return NULL;
1258}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001259EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001260
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001261int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001263{
1264 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301265 u8 old_key_type;
1266 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001267
1268 old_key = hci_find_link_key(hdev, bdaddr);
1269 if (old_key) {
1270 old_key_type = old_key->type;
1271 key = old_key;
1272 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001273 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001274 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1275 if (!key)
1276 return -ENOMEM;
1277 list_add(&key->list, &hdev->link_keys);
1278 }
1279
1280 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1281
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001282 /* Some buggy controller combinations generate a changed
1283 * combination key for legacy pairing even when there's no
1284 * previous key */
1285 if (type == HCI_LK_CHANGED_COMBINATION &&
1286 (!conn || conn->remote_auth == 0xff) &&
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001287 old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001288 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001289 if (conn)
1290 conn->key_type = type;
1291 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001292
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293 bacpy(&key->bdaddr, bdaddr);
1294 memcpy(key->val, val, 16);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001295 key->pin_len = pin_len;
1296
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001297 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001298 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001299 else
1300 key->type = type;
1301
Johan Hedberg4df378a2011-04-28 11:29:03 -07001302 if (!new_key)
1303 return 0;
1304
1305 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1306
Johan Hedberg744cf192011-11-08 20:40:14 +02001307 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001308
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301309 if (conn)
1310 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001311
1312 return 0;
1313}
1314
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001315int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001316 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001317 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001320
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1322 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1325 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 else {
1328 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329 if (!key)
1330 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001331 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 }
1333
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001334 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001335 key->bdaddr_type = addr_type;
1336 memcpy(key->val, tk, sizeof(key->val));
1337 key->authenticated = authenticated;
1338 key->ediv = ediv;
1339 key->enc_size = enc_size;
1340 key->type = type;
1341 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001342
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001343 if (!new_key)
1344 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001346 if (type & HCI_SMP_LTK)
1347 mgmt_new_ltk(hdev, key, 1);
1348
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349 return 0;
1350}
1351
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001352int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353{
1354 struct link_key *key;
1355
1356 key = hci_find_link_key(hdev, bdaddr);
1357 if (!key)
1358 return -ENOENT;
1359
1360 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1361
1362 list_del(&key->list);
1363 kfree(key);
1364
1365 return 0;
1366}
1367
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001368int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369{
1370 struct smp_ltk *k, *tmp;
1371
1372 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1373 if (bacmp(bdaddr, &k->bdaddr))
1374 continue;
1375
1376 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1377
1378 list_del(&k->list);
1379 kfree(k);
1380 }
1381
1382 return 0;
1383}
1384
Ville Tervo6bd32322011-02-16 16:32:41 +02001385/* HCI command timer function */
1386static void hci_cmd_timer(unsigned long arg)
1387{
1388 struct hci_dev *hdev = (void *) arg;
1389
1390 BT_ERR("%s command tx timeout", hdev->name);
1391 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001392 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001393}
1394
Szymon Janc2763eda2011-03-22 13:12:22 +01001395struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001396 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001397{
1398 struct oob_data *data;
1399
1400 list_for_each_entry(data, &hdev->remote_oob_data, list)
1401 if (bacmp(bdaddr, &data->bdaddr) == 0)
1402 return data;
1403
1404 return NULL;
1405}
1406
1407int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1408{
1409 struct oob_data *data;
1410
1411 data = hci_find_remote_oob_data(hdev, bdaddr);
1412 if (!data)
1413 return -ENOENT;
1414
1415 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1416
1417 list_del(&data->list);
1418 kfree(data);
1419
1420 return 0;
1421}
1422
1423int hci_remote_oob_data_clear(struct hci_dev *hdev)
1424{
1425 struct oob_data *data, *n;
1426
1427 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1428 list_del(&data->list);
1429 kfree(data);
1430 }
1431
1432 return 0;
1433}
1434
1435int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001436 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001437{
1438 struct oob_data *data;
1439
1440 data = hci_find_remote_oob_data(hdev, bdaddr);
1441
1442 if (!data) {
1443 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1444 if (!data)
1445 return -ENOMEM;
1446
1447 bacpy(&data->bdaddr, bdaddr);
1448 list_add(&data->list, &hdev->remote_oob_data);
1449 }
1450
1451 memcpy(data->hash, hash, sizeof(data->hash));
1452 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1453
1454 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1455
1456 return 0;
1457}
1458
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001459struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001460{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001461 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001463 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001464 if (bacmp(bdaddr, &b->bdaddr) == 0)
1465 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466
1467 return NULL;
1468}
1469
1470int hci_blacklist_clear(struct hci_dev *hdev)
1471{
1472 struct list_head *p, *n;
1473
1474 list_for_each_safe(p, n, &hdev->blacklist) {
1475 struct bdaddr_list *b;
1476
1477 b = list_entry(p, struct bdaddr_list, list);
1478
1479 list_del(p);
1480 kfree(b);
1481 }
1482
1483 return 0;
1484}
1485
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001486int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001487{
1488 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489
1490 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1491 return -EBADF;
1492
Antti Julku5e762442011-08-25 16:48:02 +03001493 if (hci_blacklist_lookup(hdev, bdaddr))
1494 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
1496 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001497 if (!entry)
1498 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 bacpy(&entry->bdaddr, bdaddr);
1501
1502 list_add(&entry->list, &hdev->blacklist);
1503
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001504 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001505}
1506
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001507int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508{
1509 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510
Szymon Janc1ec918c2011-11-16 09:32:21 +01001511 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001512 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513
1514 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001515 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001516 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517
1518 list_del(&entry->list);
1519 kfree(entry);
1520
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001521 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001522}
1523
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001524static void hci_clear_adv_cache(struct work_struct *work)
Andre Guedes35815082011-05-26 16:23:53 -03001525{
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001526 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001527 adv_work.work);
Andre Guedes35815082011-05-26 16:23:53 -03001528
1529 hci_dev_lock(hdev);
1530
1531 hci_adv_entries_clear(hdev);
1532
1533 hci_dev_unlock(hdev);
1534}
1535
Andre Guedes76c86862011-05-26 16:23:50 -03001536int hci_adv_entries_clear(struct hci_dev *hdev)
1537{
1538 struct adv_entry *entry, *tmp;
1539
1540 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1541 list_del(&entry->list);
1542 kfree(entry);
1543 }
1544
1545 BT_DBG("%s adv cache cleared", hdev->name);
1546
1547 return 0;
1548}
1549
1550struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1551{
1552 struct adv_entry *entry;
1553
1554 list_for_each_entry(entry, &hdev->adv_entries, list)
1555 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1556 return entry;
1557
1558 return NULL;
1559}
1560
1561static inline int is_connectable_adv(u8 evt_type)
1562{
1563 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1564 return 1;
1565
1566 return 0;
1567}
1568
1569int hci_add_adv_entry(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001570 struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
Andre Guedes76c86862011-05-26 16:23:50 -03001571 return -EINVAL;
1572
1573 /* Only new entries should be added to adv_entries. So, if
1574 * bdaddr was found, don't add it. */
1575 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1576 return 0;
1577
Andre Guedes4777bfd2012-01-30 23:31:28 -03001578 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
Andre Guedes76c86862011-05-26 16:23:50 -03001579 if (!entry)
1580 return -ENOMEM;
1581
1582 bacpy(&entry->bdaddr, &ev->bdaddr);
1583 entry->bdaddr_type = ev->bdaddr_type;
1584
1585 list_add(&entry->list, &hdev->adv_entries);
1586
1587 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1588 batostr(&entry->bdaddr), entry->bdaddr_type);
1589
1590 return 0;
1591}
1592
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001593static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1594{
1595 struct le_scan_params *param = (struct le_scan_params *) opt;
1596 struct hci_cp_le_set_scan_param cp;
1597
1598 memset(&cp, 0, sizeof(cp));
1599 cp.type = param->type;
1600 cp.interval = cpu_to_le16(param->interval);
1601 cp.window = cpu_to_le16(param->window);
1602
1603 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1604}
1605
1606static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1607{
1608 struct hci_cp_le_set_scan_enable cp;
1609
1610 memset(&cp, 0, sizeof(cp));
1611 cp.enable = 1;
1612
1613 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1614}
1615
1616static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001617 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001618{
1619 long timeo = msecs_to_jiffies(3000);
1620 struct le_scan_params param;
1621 int err;
1622
1623 BT_DBG("%s", hdev->name);
1624
1625 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1626 return -EINPROGRESS;
1627
1628 param.type = type;
1629 param.interval = interval;
1630 param.window = window;
1631
1632 hci_req_lock(hdev);
1633
1634 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001635 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001636 if (!err)
1637 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1638
1639 hci_req_unlock(hdev);
1640
1641 if (err < 0)
1642 return err;
1643
1644 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001645 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001646
1647 return 0;
1648}
1649
Andre Guedes7dbfac12012-03-15 16:52:07 -03001650int hci_cancel_le_scan(struct hci_dev *hdev)
1651{
1652 BT_DBG("%s", hdev->name);
1653
1654 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1655 return -EALREADY;
1656
1657 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1658 struct hci_cp_le_set_scan_enable cp;
1659
1660 /* Send HCI command to disable LE Scan */
1661 memset(&cp, 0, sizeof(cp));
1662 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1663 }
1664
1665 return 0;
1666}
1667
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001668static void le_scan_disable_work(struct work_struct *work)
1669{
1670 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001671 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001672 struct hci_cp_le_set_scan_enable cp;
1673
1674 BT_DBG("%s", hdev->name);
1675
1676 memset(&cp, 0, sizeof(cp));
1677
1678 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1679}
1680
Andre Guedes28b75a82012-02-03 17:48:00 -03001681static void le_scan_work(struct work_struct *work)
1682{
1683 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1684 struct le_scan_params *param = &hdev->le_scan_params;
1685
1686 BT_DBG("%s", hdev->name);
1687
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001688 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1689 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001690}
1691
1692int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001693 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001694{
1695 struct le_scan_params *param = &hdev->le_scan_params;
1696
1697 BT_DBG("%s", hdev->name);
1698
1699 if (work_busy(&hdev->le_scan))
1700 return -EINPROGRESS;
1701
1702 param->type = type;
1703 param->interval = interval;
1704 param->window = window;
1705 param->timeout = timeout;
1706
1707 queue_work(system_long_wq, &hdev->le_scan);
1708
1709 return 0;
1710}
1711
David Herrmann9be0dab2012-04-22 14:39:57 +02001712/* Alloc HCI device */
1713struct hci_dev *hci_alloc_dev(void)
1714{
1715 struct hci_dev *hdev;
1716
1717 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1718 if (!hdev)
1719 return NULL;
1720
1721 hci_init_sysfs(hdev);
1722 skb_queue_head_init(&hdev->driver_init);
1723
1724 return hdev;
1725}
1726EXPORT_SYMBOL(hci_alloc_dev);
1727
1728/* Free HCI device */
1729void hci_free_dev(struct hci_dev *hdev)
1730{
1731 skb_queue_purge(&hdev->driver_init);
1732
1733 /* will free via device release */
1734 put_device(&hdev->dev);
1735}
1736EXPORT_SYMBOL(hci_free_dev);
1737
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738/* Register HCI device */
1739int hci_register_dev(struct hci_dev *hdev)
1740{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001741 struct list_head *head, *p;
Mat Martineau08add512011-11-02 16:18:36 -07001742 int i, id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
David Herrmann010666a2012-01-07 15:47:07 +01001744 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 return -EINVAL;
1746
Ulisses Furquimfc507442012-04-18 12:13:04 -03001747 write_lock(&hci_dev_list_lock);
1748
Mat Martineau08add512011-11-02 16:18:36 -07001749 /* Do not allow HCI_AMP devices to register at index 0,
1750 * so the index can be used as the AMP controller ID.
1751 */
1752 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001753 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 /* Find first available device id */
1756 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001757 int nid = list_entry(p, struct hci_dev, list)->id;
1758 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001760 if (nid == id)
1761 id++;
1762 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001764
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 sprintf(hdev->name, "hci%d", id);
1766 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001767
1768 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1769
Ulisses Furquimfc507442012-04-18 12:13:04 -03001770 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001772 mutex_init(&hdev->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773
1774 hdev->flags = 0;
Andre Guedesd23264a2011-11-25 20:53:38 -03001775 hdev->dev_flags = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
Marcel Holtmann5b7f9902007-07-11 09:51:55 +02001777 hdev->esco_type = (ESCO_HV1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778 hdev->link_mode = (HCI_LM_ACCEPT);
Johan Hedberg17fa4b92011-01-25 13:28:33 +02001779 hdev->io_capability = 0x03; /* No Input No Output */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Marcel Holtmann04837f62006-07-03 10:02:33 +02001781 hdev->idle_timeout = 0;
1782 hdev->sniff_max_interval = 800;
1783 hdev->sniff_min_interval = 80;
1784
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001785 INIT_WORK(&hdev->rx_work, hci_rx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001786 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001787 INIT_WORK(&hdev->tx_work, hci_tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001788
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
1790 skb_queue_head_init(&hdev->rx_q);
1791 skb_queue_head_init(&hdev->cmd_q);
1792 skb_queue_head_init(&hdev->raw_q);
1793
Ville Tervo6bd32322011-02-16 16:32:41 +02001794 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1795
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301796 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001797 hdev->reassembly[i] = NULL;
1798
Linus Torvalds1da177e2005-04-16 15:20:36 -07001799 init_waitqueue_head(&hdev->req_wait_q);
Thomas Gleixnera6a67ef2009-07-26 08:18:19 +00001800 mutex_init(&hdev->req_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Johan Hedberg30883512012-01-04 14:16:21 +02001802 discovery_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 hci_conn_hash_init(hdev);
1805
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001806 INIT_LIST_HEAD(&hdev->mgmt_pending);
1807
David Millerea4bd8b2010-07-30 21:54:49 -07001808 INIT_LIST_HEAD(&hdev->blacklist);
Johan Hedbergf0358562010-05-18 13:20:32 +02001809
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001810 INIT_LIST_HEAD(&hdev->uuids);
1811
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001812 INIT_LIST_HEAD(&hdev->link_keys);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001813 INIT_LIST_HEAD(&hdev->long_term_keys);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001814
Szymon Janc2763eda2011-03-22 13:12:22 +01001815 INIT_LIST_HEAD(&hdev->remote_oob_data);
1816
Andre Guedes76c86862011-05-26 16:23:50 -03001817 INIT_LIST_HEAD(&hdev->adv_entries);
1818
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001819 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001820 INIT_WORK(&hdev->power_on, hci_power_on);
Johan Hedberg32435532011-11-07 22:16:04 +02001821 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001822
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001823 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1824
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1826
1827 atomic_set(&hdev->promisc, 0);
1828
Andre Guedes28b75a82012-02-03 17:48:00 -03001829 INIT_WORK(&hdev->le_scan, le_scan_work);
1830
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001831 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1832
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001833 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001835 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1836 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001837 if (!hdev->workqueue) {
1838 error = -ENOMEM;
1839 goto err;
1840 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001841
David Herrmann33ca9542011-10-08 14:58:49 +02001842 error = hci_add_sysfs(hdev);
1843 if (error < 0)
1844 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001846 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1847 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1848 if (hdev->rfkill) {
1849 if (rfkill_register(hdev->rfkill) < 0) {
1850 rfkill_destroy(hdev->rfkill);
1851 hdev->rfkill = NULL;
1852 }
1853 }
1854
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001855 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1856 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001857 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001858
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001860 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861
1862 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001863
David Herrmann33ca9542011-10-08 14:58:49 +02001864err_wqueue:
1865 destroy_workqueue(hdev->workqueue);
1866err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001867 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001868 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001869 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001870
David Herrmann33ca9542011-10-08 14:58:49 +02001871 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872}
1873EXPORT_SYMBOL(hci_register_dev);
1874
1875/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001876void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877{
Marcel Holtmannef222012007-07-11 06:42:04 +02001878 int i;
1879
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001880 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881
Johan Hovold94324962012-03-15 14:48:41 +01001882 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1883
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001884 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001885 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001886 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
1888 hci_dev_do_close(hdev);
1889
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301890 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001891 kfree_skb(hdev->reassembly[i]);
1892
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001893 if (!test_bit(HCI_INIT, &hdev->flags) &&
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001894 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001895 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001896 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001897 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001898 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001899
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001900 /* mgmt_index_removed should take care of emptying the
1901 * pending list */
1902 BUG_ON(!list_empty(&hdev->mgmt_pending));
1903
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904 hci_notify(hdev, HCI_DEV_UNREG);
1905
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001906 if (hdev->rfkill) {
1907 rfkill_unregister(hdev->rfkill);
1908 rfkill_destroy(hdev->rfkill);
1909 }
1910
David Herrmannce242972011-10-08 14:58:48 +02001911 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001912
Gustavo F. Padovandb323f22011-06-20 16:39:29 -03001913 cancel_delayed_work_sync(&hdev->adv_work);
Gustavo F. Padovanc6f3c5f2011-02-15 20:22:03 -03001914
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001915 destroy_workqueue(hdev->workqueue);
1916
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001917 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001918 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001919 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001920 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001921 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001922 hci_remote_oob_data_clear(hdev);
Andre Guedes76c86862011-05-26 16:23:50 -03001923 hci_adv_entries_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001924 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001925
David Herrmanndc946bd2012-01-07 15:47:24 +01001926 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927}
1928EXPORT_SYMBOL(hci_unregister_dev);
1929
1930/* Suspend HCI device */
1931int hci_suspend_dev(struct hci_dev *hdev)
1932{
1933 hci_notify(hdev, HCI_DEV_SUSPEND);
1934 return 0;
1935}
1936EXPORT_SYMBOL(hci_suspend_dev);
1937
1938/* Resume HCI device */
1939int hci_resume_dev(struct hci_dev *hdev)
1940{
1941 hci_notify(hdev, HCI_DEV_RESUME);
1942 return 0;
1943}
1944EXPORT_SYMBOL(hci_resume_dev);
1945
Marcel Holtmann76bca882009-11-18 00:40:39 +01001946/* Receive frame from HCI drivers */
1947int hci_recv_frame(struct sk_buff *skb)
1948{
1949 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1950 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1951 && !test_bit(HCI_INIT, &hdev->flags))) {
1952 kfree_skb(skb);
1953 return -ENXIO;
1954 }
1955
1956 /* Incomming skb */
1957 bt_cb(skb)->incoming = 1;
1958
1959 /* Time stamp */
1960 __net_timestamp(skb);
1961
Marcel Holtmann76bca882009-11-18 00:40:39 +01001962 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001963 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001964
Marcel Holtmann76bca882009-11-18 00:40:39 +01001965 return 0;
1966}
1967EXPORT_SYMBOL(hci_recv_frame);
1968
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301969static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001970 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301971{
1972 int len = 0;
1973 int hlen = 0;
1974 int remain = count;
1975 struct sk_buff *skb;
1976 struct bt_skb_cb *scb;
1977
1978 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1979 index >= NUM_REASSEMBLY)
1980 return -EILSEQ;
1981
1982 skb = hdev->reassembly[index];
1983
1984 if (!skb) {
1985 switch (type) {
1986 case HCI_ACLDATA_PKT:
1987 len = HCI_MAX_FRAME_SIZE;
1988 hlen = HCI_ACL_HDR_SIZE;
1989 break;
1990 case HCI_EVENT_PKT:
1991 len = HCI_MAX_EVENT_SIZE;
1992 hlen = HCI_EVENT_HDR_SIZE;
1993 break;
1994 case HCI_SCODATA_PKT:
1995 len = HCI_MAX_SCO_SIZE;
1996 hlen = HCI_SCO_HDR_SIZE;
1997 break;
1998 }
1999
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002000 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302001 if (!skb)
2002 return -ENOMEM;
2003
2004 scb = (void *) skb->cb;
2005 scb->expect = hlen;
2006 scb->pkt_type = type;
2007
2008 skb->dev = (void *) hdev;
2009 hdev->reassembly[index] = skb;
2010 }
2011
2012 while (count) {
2013 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002014 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302015
2016 memcpy(skb_put(skb, len), data, len);
2017
2018 count -= len;
2019 data += len;
2020 scb->expect -= len;
2021 remain = count;
2022
2023 switch (type) {
2024 case HCI_EVENT_PKT:
2025 if (skb->len == HCI_EVENT_HDR_SIZE) {
2026 struct hci_event_hdr *h = hci_event_hdr(skb);
2027 scb->expect = h->plen;
2028
2029 if (skb_tailroom(skb) < scb->expect) {
2030 kfree_skb(skb);
2031 hdev->reassembly[index] = NULL;
2032 return -ENOMEM;
2033 }
2034 }
2035 break;
2036
2037 case HCI_ACLDATA_PKT:
2038 if (skb->len == HCI_ACL_HDR_SIZE) {
2039 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2040 scb->expect = __le16_to_cpu(h->dlen);
2041
2042 if (skb_tailroom(skb) < scb->expect) {
2043 kfree_skb(skb);
2044 hdev->reassembly[index] = NULL;
2045 return -ENOMEM;
2046 }
2047 }
2048 break;
2049
2050 case HCI_SCODATA_PKT:
2051 if (skb->len == HCI_SCO_HDR_SIZE) {
2052 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2053 scb->expect = h->dlen;
2054
2055 if (skb_tailroom(skb) < scb->expect) {
2056 kfree_skb(skb);
2057 hdev->reassembly[index] = NULL;
2058 return -ENOMEM;
2059 }
2060 }
2061 break;
2062 }
2063
2064 if (scb->expect == 0) {
2065 /* Complete frame */
2066
2067 bt_cb(skb)->pkt_type = type;
2068 hci_recv_frame(skb);
2069
2070 hdev->reassembly[index] = NULL;
2071 return remain;
2072 }
2073 }
2074
2075 return remain;
2076}
2077
Marcel Holtmannef222012007-07-11 06:42:04 +02002078int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2079{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302080 int rem = 0;
2081
Marcel Holtmannef222012007-07-11 06:42:04 +02002082 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2083 return -EILSEQ;
2084
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002085 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002086 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302087 if (rem < 0)
2088 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002089
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302090 data += (count - rem);
2091 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002092 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002093
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302094 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002095}
2096EXPORT_SYMBOL(hci_recv_fragment);
2097
Suraj Sumangala99811512010-07-14 13:02:19 +05302098#define STREAM_REASSEMBLY 0
2099
2100int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2101{
2102 int type;
2103 int rem = 0;
2104
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002105 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302106 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2107
2108 if (!skb) {
2109 struct { char type; } *pkt;
2110
2111 /* Start of the frame */
2112 pkt = data;
2113 type = pkt->type;
2114
2115 data++;
2116 count--;
2117 } else
2118 type = bt_cb(skb)->pkt_type;
2119
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002120 rem = hci_reassembly(hdev, type, data, count,
2121 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302122 if (rem < 0)
2123 return rem;
2124
2125 data += (count - rem);
2126 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002127 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302128
2129 return rem;
2130}
2131EXPORT_SYMBOL(hci_recv_stream_fragment);
2132
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133/* ---- Interface to upper protocols ---- */
2134
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135int hci_register_cb(struct hci_cb *cb)
2136{
2137 BT_DBG("%p name %s", cb, cb->name);
2138
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002139 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002141 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 return 0;
2144}
2145EXPORT_SYMBOL(hci_register_cb);
2146
2147int hci_unregister_cb(struct hci_cb *cb)
2148{
2149 BT_DBG("%p name %s", cb, cb->name);
2150
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002151 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002153 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155 return 0;
2156}
2157EXPORT_SYMBOL(hci_unregister_cb);
2158
2159static int hci_send_frame(struct sk_buff *skb)
2160{
2161 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2162
2163 if (!hdev) {
2164 kfree_skb(skb);
2165 return -ENODEV;
2166 }
2167
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002168 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002170 /* Time stamp */
2171 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002173 /* Send copy to monitor */
2174 hci_send_to_monitor(hdev, skb);
2175
2176 if (atomic_read(&hdev->promisc)) {
2177 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002178 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 }
2180
2181 /* Get rid of skb owner, prior to sending to the driver. */
2182 skb_orphan(skb);
2183
2184 return hdev->send(skb);
2185}
2186
2187/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002188int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189{
2190 int len = HCI_COMMAND_HDR_SIZE + plen;
2191 struct hci_command_hdr *hdr;
2192 struct sk_buff *skb;
2193
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002194 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195
2196 skb = bt_skb_alloc(len, GFP_ATOMIC);
2197 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002198 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 return -ENOMEM;
2200 }
2201
2202 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002203 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 hdr->plen = plen;
2205
2206 if (plen)
2207 memcpy(skb_put(skb, plen), param, plen);
2208
2209 BT_DBG("skb len %d", skb->len);
2210
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002211 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002213
Johan Hedberga5040ef2011-01-10 13:28:59 +02002214 if (test_bit(HCI_INIT, &hdev->flags))
2215 hdev->init_last_cmd = opcode;
2216
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002218 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
2220 return 0;
2221}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
2223/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002224void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
2226 struct hci_command_hdr *hdr;
2227
2228 if (!hdev->sent_cmd)
2229 return NULL;
2230
2231 hdr = (void *) hdev->sent_cmd->data;
2232
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002233 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 return NULL;
2235
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002236 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
2238 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2239}
2240
2241/* Send ACL data */
2242static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2243{
2244 struct hci_acl_hdr *hdr;
2245 int len = skb->len;
2246
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002247 skb_push(skb, HCI_ACL_HDR_SIZE);
2248 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002249 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002250 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2251 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252}
2253
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002254static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2255 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256{
2257 struct hci_dev *hdev = conn->hdev;
2258 struct sk_buff *list;
2259
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002260 list = skb_shinfo(skb)->frag_list;
2261 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 /* Non fragmented */
2263 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2264
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002265 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 } else {
2267 /* Fragmented */
2268 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2269
2270 skb_shinfo(skb)->frag_list = NULL;
2271
2272 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002273 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002275 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002276
2277 flags &= ~ACL_START;
2278 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279 do {
2280 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002283 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002284 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285
2286 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2287
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002288 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289 } while (list);
2290
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002291 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002293}
2294
2295void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2296{
2297 struct hci_conn *conn = chan->conn;
2298 struct hci_dev *hdev = conn->hdev;
2299
2300 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2301
2302 skb->dev = (void *) hdev;
2303 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2304 hci_add_acl_hdr(skb, conn->handle, flags);
2305
2306 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002308 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309}
2310EXPORT_SYMBOL(hci_send_acl);
2311
2312/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002313void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314{
2315 struct hci_dev *hdev = conn->hdev;
2316 struct hci_sco_hdr hdr;
2317
2318 BT_DBG("%s len %d", hdev->name, skb->len);
2319
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002320 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 hdr.dlen = skb->len;
2322
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002323 skb_push(skb, HCI_SCO_HDR_SIZE);
2324 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002325 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326
2327 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002328 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002329
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002331 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002332}
2333EXPORT_SYMBOL(hci_send_sco);
2334
2335/* ---- HCI TX task (outgoing data) ---- */
2336
2337/* HCI Connection scheduler */
2338static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2339{
2340 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002341 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002342 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002343
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002344 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002345 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002346
2347 rcu_read_lock();
2348
2349 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002350 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002351 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002352
2353 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2354 continue;
2355
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 num++;
2357
2358 if (c->sent < min) {
2359 min = c->sent;
2360 conn = c;
2361 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002362
2363 if (hci_conn_num(hdev, type) == num)
2364 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002365 }
2366
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002367 rcu_read_unlock();
2368
Linus Torvalds1da177e2005-04-16 15:20:36 -07002369 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002370 int cnt, q;
2371
2372 switch (conn->type) {
2373 case ACL_LINK:
2374 cnt = hdev->acl_cnt;
2375 break;
2376 case SCO_LINK:
2377 case ESCO_LINK:
2378 cnt = hdev->sco_cnt;
2379 break;
2380 case LE_LINK:
2381 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2382 break;
2383 default:
2384 cnt = 0;
2385 BT_ERR("Unknown link type");
2386 }
2387
2388 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389 *quote = q ? q : 1;
2390 } else
2391 *quote = 0;
2392
2393 BT_DBG("conn %p quote %d", conn, *quote);
2394 return conn;
2395}
2396
Ville Tervobae1f5d92011-02-10 22:38:53 -03002397static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002398{
2399 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002400 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002401
Ville Tervobae1f5d92011-02-10 22:38:53 -03002402 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002404 rcu_read_lock();
2405
Linus Torvalds1da177e2005-04-16 15:20:36 -07002406 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002407 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002408 if (c->type == type && c->sent) {
2409 BT_ERR("%s killing stalled connection %s",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002410 hdev->name, batostr(&c->dst));
2411 hci_acl_disconn(c, 0x13);
2412 }
2413 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002414
2415 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002416}
2417
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002418static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2419 int *quote)
2420{
2421 struct hci_conn_hash *h = &hdev->conn_hash;
2422 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002423 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002424 struct hci_conn *conn;
2425 int cnt, q, conn_num = 0;
2426
2427 BT_DBG("%s", hdev->name);
2428
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002429 rcu_read_lock();
2430
2431 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002432 struct hci_chan *tmp;
2433
2434 if (conn->type != type)
2435 continue;
2436
2437 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2438 continue;
2439
2440 conn_num++;
2441
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002442 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002443 struct sk_buff *skb;
2444
2445 if (skb_queue_empty(&tmp->data_q))
2446 continue;
2447
2448 skb = skb_peek(&tmp->data_q);
2449 if (skb->priority < cur_prio)
2450 continue;
2451
2452 if (skb->priority > cur_prio) {
2453 num = 0;
2454 min = ~0;
2455 cur_prio = skb->priority;
2456 }
2457
2458 num++;
2459
2460 if (conn->sent < min) {
2461 min = conn->sent;
2462 chan = tmp;
2463 }
2464 }
2465
2466 if (hci_conn_num(hdev, type) == conn_num)
2467 break;
2468 }
2469
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002470 rcu_read_unlock();
2471
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002472 if (!chan)
2473 return NULL;
2474
2475 switch (chan->conn->type) {
2476 case ACL_LINK:
2477 cnt = hdev->acl_cnt;
2478 break;
2479 case SCO_LINK:
2480 case ESCO_LINK:
2481 cnt = hdev->sco_cnt;
2482 break;
2483 case LE_LINK:
2484 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2485 break;
2486 default:
2487 cnt = 0;
2488 BT_ERR("Unknown link type");
2489 }
2490
2491 q = cnt / num;
2492 *quote = q ? q : 1;
2493 BT_DBG("chan %p quote %d", chan, *quote);
2494 return chan;
2495}
2496
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002497static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2498{
2499 struct hci_conn_hash *h = &hdev->conn_hash;
2500 struct hci_conn *conn;
2501 int num = 0;
2502
2503 BT_DBG("%s", hdev->name);
2504
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002505 rcu_read_lock();
2506
2507 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002508 struct hci_chan *chan;
2509
2510 if (conn->type != type)
2511 continue;
2512
2513 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2514 continue;
2515
2516 num++;
2517
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002518 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002519 struct sk_buff *skb;
2520
2521 if (chan->sent) {
2522 chan->sent = 0;
2523 continue;
2524 }
2525
2526 if (skb_queue_empty(&chan->data_q))
2527 continue;
2528
2529 skb = skb_peek(&chan->data_q);
2530 if (skb->priority >= HCI_PRIO_MAX - 1)
2531 continue;
2532
2533 skb->priority = HCI_PRIO_MAX - 1;
2534
2535 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2536 skb->priority);
2537 }
2538
2539 if (hci_conn_num(hdev, type) == num)
2540 break;
2541 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002542
2543 rcu_read_unlock();
2544
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002545}
2546
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002547static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2548{
2549 /* Calculate count of blocks used by this packet */
2550 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2551}
2552
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002553static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002554{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 if (!test_bit(HCI_RAW, &hdev->flags)) {
2556 /* ACL tx timeout must be longer than maximum
2557 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002558 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenkocc48dc02012-01-04 16:42:26 +02002559 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002560 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002562}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002564static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2565{
2566 unsigned int cnt = hdev->acl_cnt;
2567 struct hci_chan *chan;
2568 struct sk_buff *skb;
2569 int quote;
2570
2571 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002572
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002573 while (hdev->acl_cnt &&
2574 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002575 u32 priority = (skb_peek(&chan->data_q))->priority;
2576 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002577 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2578 skb->len, skb->priority);
2579
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002580 /* Stop if priority has changed */
2581 if (skb->priority < priority)
2582 break;
2583
2584 skb = skb_dequeue(&chan->data_q);
2585
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002586 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002587 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 hci_send_frame(skb);
2590 hdev->acl_last_tx = jiffies;
2591
2592 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002593 chan->sent++;
2594 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002595 }
2596 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002597
2598 if (cnt != hdev->acl_cnt)
2599 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002600}
2601
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002602static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2603{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002604 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002605 struct hci_chan *chan;
2606 struct sk_buff *skb;
2607 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002608
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002609 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002610
2611 while (hdev->block_cnt > 0 &&
2612 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2613 u32 priority = (skb_peek(&chan->data_q))->priority;
2614 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2615 int blocks;
2616
2617 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2618 skb->len, skb->priority);
2619
2620 /* Stop if priority has changed */
2621 if (skb->priority < priority)
2622 break;
2623
2624 skb = skb_dequeue(&chan->data_q);
2625
2626 blocks = __get_blocks(hdev, skb);
2627 if (blocks > hdev->block_cnt)
2628 return;
2629
2630 hci_conn_enter_active_mode(chan->conn,
2631 bt_cb(skb)->force_active);
2632
2633 hci_send_frame(skb);
2634 hdev->acl_last_tx = jiffies;
2635
2636 hdev->block_cnt -= blocks;
2637 quote -= blocks;
2638
2639 chan->sent += blocks;
2640 chan->conn->sent += blocks;
2641 }
2642 }
2643
2644 if (cnt != hdev->block_cnt)
2645 hci_prio_recalculate(hdev, ACL_LINK);
2646}
2647
2648static inline void hci_sched_acl(struct hci_dev *hdev)
2649{
2650 BT_DBG("%s", hdev->name);
2651
2652 if (!hci_conn_num(hdev, ACL_LINK))
2653 return;
2654
2655 switch (hdev->flow_ctl_mode) {
2656 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2657 hci_sched_acl_pkt(hdev);
2658 break;
2659
2660 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2661 hci_sched_acl_blk(hdev);
2662 break;
2663 }
2664}
2665
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666/* Schedule SCO */
2667static inline void hci_sched_sco(struct hci_dev *hdev)
2668{
2669 struct hci_conn *conn;
2670 struct sk_buff *skb;
2671 int quote;
2672
2673 BT_DBG("%s", hdev->name);
2674
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002675 if (!hci_conn_num(hdev, SCO_LINK))
2676 return;
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2679 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2680 BT_DBG("skb %p len %d", skb, skb->len);
2681 hci_send_frame(skb);
2682
2683 conn->sent++;
2684 if (conn->sent == ~0)
2685 conn->sent = 0;
2686 }
2687 }
2688}
2689
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002690static inline void hci_sched_esco(struct hci_dev *hdev)
2691{
2692 struct hci_conn *conn;
2693 struct sk_buff *skb;
2694 int quote;
2695
2696 BT_DBG("%s", hdev->name);
2697
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002698 if (!hci_conn_num(hdev, ESCO_LINK))
2699 return;
2700
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002701 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2702 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2703 BT_DBG("skb %p len %d", skb, skb->len);
2704 hci_send_frame(skb);
2705
2706 conn->sent++;
2707 if (conn->sent == ~0)
2708 conn->sent = 0;
2709 }
2710 }
2711}
2712
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002713static inline void hci_sched_le(struct hci_dev *hdev)
2714{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002715 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002716 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002717 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002718
2719 BT_DBG("%s", hdev->name);
2720
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002721 if (!hci_conn_num(hdev, LE_LINK))
2722 return;
2723
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 if (!test_bit(HCI_RAW, &hdev->flags)) {
2725 /* LE tx timeout must be longer than maximum
2726 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002727 if (!hdev->le_cnt && hdev->le_pkts &&
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002728 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002729 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002730 }
2731
2732 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002733 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002734 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002735 u32 priority = (skb_peek(&chan->data_q))->priority;
2736 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002737 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2738 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002739
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002740 /* Stop if priority has changed */
2741 if (skb->priority < priority)
2742 break;
2743
2744 skb = skb_dequeue(&chan->data_q);
2745
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002746 hci_send_frame(skb);
2747 hdev->le_last_tx = jiffies;
2748
2749 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002750 chan->sent++;
2751 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002752 }
2753 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002754
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002755 if (hdev->le_pkts)
2756 hdev->le_cnt = cnt;
2757 else
2758 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002759
2760 if (cnt != tmp)
2761 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002762}
2763
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002764static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002766 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 struct sk_buff *skb;
2768
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002769 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2770 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771
2772 /* Schedule queues and send stuff to HCI driver */
2773
2774 hci_sched_acl(hdev);
2775
2776 hci_sched_sco(hdev);
2777
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002778 hci_sched_esco(hdev);
2779
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002780 hci_sched_le(hdev);
2781
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 /* Send next queued raw (unknown type) packet */
2783 while ((skb = skb_dequeue(&hdev->raw_q)))
2784 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785}
2786
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002787/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788
2789/* ACL data packet */
2790static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2791{
2792 struct hci_acl_hdr *hdr = (void *) skb->data;
2793 struct hci_conn *conn;
2794 __u16 handle, flags;
2795
2796 skb_pull(skb, HCI_ACL_HDR_SIZE);
2797
2798 handle = __le16_to_cpu(hdr->handle);
2799 flags = hci_flags(handle);
2800 handle = hci_handle(handle);
2801
2802 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2803
2804 hdev->stat.acl_rx++;
2805
2806 hci_dev_lock(hdev);
2807 conn = hci_conn_hash_lookup_handle(hdev, handle);
2808 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002809
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002811 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002814 l2cap_recv_acldata(conn, skb, flags);
2815 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002817 BT_ERR("%s ACL packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 hdev->name, handle);
2819 }
2820
2821 kfree_skb(skb);
2822}
2823
2824/* SCO data packet */
2825static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2826{
2827 struct hci_sco_hdr *hdr = (void *) skb->data;
2828 struct hci_conn *conn;
2829 __u16 handle;
2830
2831 skb_pull(skb, HCI_SCO_HDR_SIZE);
2832
2833 handle = __le16_to_cpu(hdr->handle);
2834
2835 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2836
2837 hdev->stat.sco_rx++;
2838
2839 hci_dev_lock(hdev);
2840 conn = hci_conn_hash_lookup_handle(hdev, handle);
2841 hci_dev_unlock(hdev);
2842
2843 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002845 sco_recv_scodata(conn, skb);
2846 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002848 BT_ERR("%s SCO packet for unknown connection handle %d",
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 hdev->name, handle);
2850 }
2851
2852 kfree_skb(skb);
2853}
2854
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002855static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002857 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 struct sk_buff *skb;
2859
2860 BT_DBG("%s", hdev->name);
2861
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002863 /* Send copy to monitor */
2864 hci_send_to_monitor(hdev, skb);
2865
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866 if (atomic_read(&hdev->promisc)) {
2867 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002868 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002869 }
2870
2871 if (test_bit(HCI_RAW, &hdev->flags)) {
2872 kfree_skb(skb);
2873 continue;
2874 }
2875
2876 if (test_bit(HCI_INIT, &hdev->flags)) {
2877 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002878 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879 case HCI_ACLDATA_PKT:
2880 case HCI_SCODATA_PKT:
2881 kfree_skb(skb);
2882 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002883 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002884 }
2885
2886 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002887 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002889 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 hci_event_packet(hdev, skb);
2891 break;
2892
2893 case HCI_ACLDATA_PKT:
2894 BT_DBG("%s ACL data packet", hdev->name);
2895 hci_acldata_packet(hdev, skb);
2896 break;
2897
2898 case HCI_SCODATA_PKT:
2899 BT_DBG("%s SCO data packet", hdev->name);
2900 hci_scodata_packet(hdev, skb);
2901 break;
2902
2903 default:
2904 kfree_skb(skb);
2905 break;
2906 }
2907 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002908}
2909
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002910static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002912 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913 struct sk_buff *skb;
2914
2915 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2916
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002918 if (atomic_read(&hdev->cmd_cnt)) {
2919 skb = skb_dequeue(&hdev->cmd_q);
2920 if (!skb)
2921 return;
2922
Wei Yongjun7585b972009-02-25 18:29:52 +08002923 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002924
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002925 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2926 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 atomic_dec(&hdev->cmd_cnt);
2928 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002929 if (test_bit(HCI_RESET, &hdev->flags))
2930 del_timer(&hdev->cmd_timer);
2931 else
2932 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002933 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 } else {
2935 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002936 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 }
2938 }
2939}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002940
2941int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2942{
2943 /* General inquiry access code (GIAC) */
2944 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2945 struct hci_cp_inquiry cp;
2946
2947 BT_DBG("%s", hdev->name);
2948
2949 if (test_bit(HCI_INQUIRY, &hdev->flags))
2950 return -EINPROGRESS;
2951
Johan Hedberg46632622012-01-02 16:06:08 +02002952 inquiry_cache_flush(hdev);
2953
Andre Guedes2519a1f2011-11-07 11:45:24 -03002954 memset(&cp, 0, sizeof(cp));
2955 memcpy(&cp.lap, lap, sizeof(cp.lap));
2956 cp.length = length;
2957
2958 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2959}
Andre Guedes023d50492011-11-04 14:16:52 -03002960
2961int hci_cancel_inquiry(struct hci_dev *hdev)
2962{
2963 BT_DBG("%s", hdev->name);
2964
2965 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002966 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002967
2968 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2969}