blob: 2dc61d38bf627bad096821873faf81e2ba29bd15 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300127static int __hci_request(struct hci_dev *hdev,
128 void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300170static int hci_request(struct hci_dev *hdev,
171 void (*req)(struct hci_dev *hdev, unsigned long opt),
172 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
174 int ret;
175
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200176 if (!test_bit(HCI_UP, &hdev->flags))
177 return -ENETDOWN;
178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /* Serialize all requests */
180 hci_req_lock(hdev);
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
183
184 return ret;
185}
186
187static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188{
189 BT_DBG("%s %ld", hdev->name, opt);
190
191 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300192 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194}
195
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200196static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200198 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800199 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200200 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200202 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /* Mandatory initialization */
205
206 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200207 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200208 set_bit(HCI_RESET, &hdev->flags);
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300210 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200215 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200217
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200219 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
223
224 /* Read Class of Device */
225 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
226
227 /* Read Local Name */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
230 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200231 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233 /* Optional initialization */
234
235 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200236 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200237 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700240 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200241 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200242
243 bacpy(&cp.bdaddr, BDADDR_ANY);
244 cp.delete_all = 1;
245 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246}
247
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200248static void amp_init(struct hci_dev *hdev)
249{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200250 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200252 /* Reset */
253 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
254
255 /* Read Local Version */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300257
258 /* Read Local AMP Info */
259 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200260}
261
262static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
263{
264 struct sk_buff *skb;
265
266 BT_DBG("%s %ld", hdev->name, opt);
267
268 /* Driver initialization */
269
270 /* Special commands */
271 while ((skb = skb_dequeue(&hdev->driver_init))) {
272 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
273 skb->dev = (void *) hdev;
274
275 skb_queue_tail(&hdev->cmd_q, skb);
276 queue_work(hdev->workqueue, &hdev->cmd_work);
277 }
278 skb_queue_purge(&hdev->driver_init);
279
280 switch (hdev->dev_type) {
281 case HCI_BREDR:
282 bredr_init(hdev);
283 break;
284
285 case HCI_AMP:
286 amp_init(hdev);
287 break;
288
289 default:
290 BT_ERR("Unknown device type %d", hdev->dev_type);
291 break;
292 }
293
294}
295
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300296static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
297{
298 BT_DBG("%s", hdev->name);
299
300 /* Read LE buffer size */
301 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
302}
303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 scan = opt;
307
308 BT_DBG("%s %x", hdev->name, scan);
309
310 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200311 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
314static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __u8 auth = opt;
317
318 BT_DBG("%s %x", hdev->name, auth);
319
320 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200321 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322}
323
324static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
325{
326 __u8 encrypt = opt;
327
328 BT_DBG("%s %x", hdev->name, encrypt);
329
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200330 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200331 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332}
333
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200334static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
335{
336 __le16 policy = cpu_to_le16(opt);
337
Marcel Holtmanna418b892008-11-30 12:17:28 +0100338 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200339
340 /* Default link policy */
341 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
342}
343
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900344/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 * Device is held on return. */
346struct hci_dev *hci_dev_get(int index)
347{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200348 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
350 BT_DBG("%d", index);
351
352 if (index < 0)
353 return NULL;
354
355 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200356 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 if (d->id == index) {
358 hdev = hci_dev_hold(d);
359 break;
360 }
361 }
362 read_unlock(&hci_dev_list_lock);
363 return hdev;
364}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200367
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200368bool hci_discovery_active(struct hci_dev *hdev)
369{
370 struct discovery_state *discov = &hdev->discovery;
371
Andre Guedes6fbe1952012-02-03 17:47:58 -0300372 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300373 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300374 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200375 return true;
376
Andre Guedes6fbe1952012-02-03 17:47:58 -0300377 default:
378 return false;
379 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200380}
381
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382void hci_discovery_set_state(struct hci_dev *hdev, int state)
383{
384 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
385
386 if (hdev->discovery.state == state)
387 return;
388
389 switch (state) {
390 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300391 if (hdev->discovery.state != DISCOVERY_STARTING)
392 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200393 break;
394 case DISCOVERY_STARTING:
395 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300396 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200397 mgmt_discovering(hdev, 1);
398 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399 case DISCOVERY_RESOLVING:
400 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200401 case DISCOVERY_STOPPING:
402 break;
403 }
404
405 hdev->discovery.state = state;
406}
407
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408static void inquiry_cache_flush(struct hci_dev *hdev)
409{
Johan Hedberg30883512012-01-04 14:16:21 +0200410 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200411 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412
Johan Hedberg561aafb2012-01-04 13:31:59 +0200413 list_for_each_entry_safe(p, n, &cache->all, all) {
414 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200415 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200417
418 INIT_LIST_HEAD(&cache->unknown);
419 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300422struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
423 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424{
Johan Hedberg30883512012-01-04 14:16:21 +0200425 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 struct inquiry_entry *e;
427
428 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
429
Johan Hedberg561aafb2012-01-04 13:31:59 +0200430 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200432 return e;
433 }
434
435 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436}
437
Johan Hedberg561aafb2012-01-04 13:31:59 +0200438struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300439 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200440{
Johan Hedberg30883512012-01-04 14:16:21 +0200441 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200442 struct inquiry_entry *e;
443
444 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
445
446 list_for_each_entry(e, &cache->unknown, list) {
447 if (!bacmp(&e->data.bdaddr, bdaddr))
448 return e;
449 }
450
451 return NULL;
452}
453
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200454struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300455 bdaddr_t *bdaddr,
456 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200457{
458 struct discovery_state *cache = &hdev->discovery;
459 struct inquiry_entry *e;
460
461 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
462
463 list_for_each_entry(e, &cache->resolve, list) {
464 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
465 return e;
466 if (!bacmp(&e->data.bdaddr, bdaddr))
467 return e;
468 }
469
470 return NULL;
471}
472
Johan Hedberga3d4e202012-01-09 00:53:02 +0200473void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300474 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200475{
476 struct discovery_state *cache = &hdev->discovery;
477 struct list_head *pos = &cache->resolve;
478 struct inquiry_entry *p;
479
480 list_del(&ie->list);
481
482 list_for_each_entry(p, &cache->resolve, list) {
483 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300484 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200485 break;
486 pos = &p->list;
487 }
488
489 list_add(&ie->list, pos);
490}
491
Johan Hedberg31754052012-01-04 13:39:52 +0200492bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300493 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494{
Johan Hedberg30883512012-01-04 14:16:21 +0200495 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200496 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
499
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200500 if (ssp)
501 *ssp = data->ssp_mode;
502
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200503 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200504 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200505 if (ie->data.ssp_mode && ssp)
506 *ssp = true;
507
Johan Hedberga3d4e202012-01-09 00:53:02 +0200508 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300509 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200510 ie->data.rssi = data->rssi;
511 hci_inquiry_cache_update_resolve(hdev, ie);
512 }
513
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200515 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200516
Johan Hedberg561aafb2012-01-04 13:31:59 +0200517 /* Entry not in the cache. Add new one. */
518 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
519 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200520 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200521
522 list_add(&ie->all, &cache->all);
523
524 if (name_known) {
525 ie->name_state = NAME_KNOWN;
526 } else {
527 ie->name_state = NAME_NOT_KNOWN;
528 list_add(&ie->list, &cache->unknown);
529 }
530
531update:
532 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300533 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200534 ie->name_state = NAME_KNOWN;
535 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 }
537
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200538 memcpy(&ie->data, data, sizeof(*data));
539 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200541
542 if (ie->name_state == NAME_NOT_KNOWN)
543 return false;
544
545 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546}
547
548static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
549{
Johan Hedberg30883512012-01-04 14:16:21 +0200550 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 struct inquiry_info *info = (struct inquiry_info *) buf;
552 struct inquiry_entry *e;
553 int copied = 0;
554
Johan Hedberg561aafb2012-01-04 13:31:59 +0200555 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700556 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200557
558 if (copied >= num)
559 break;
560
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561 bacpy(&info->bdaddr, &data->bdaddr);
562 info->pscan_rep_mode = data->pscan_rep_mode;
563 info->pscan_period_mode = data->pscan_period_mode;
564 info->pscan_mode = data->pscan_mode;
565 memcpy(info->dev_class, data->dev_class, 3);
566 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200567
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200569 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 }
571
572 BT_DBG("cache %p, copied %d", cache, copied);
573 return copied;
574}
575
576static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
577{
578 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
579 struct hci_cp_inquiry cp;
580
581 BT_DBG("%s", hdev->name);
582
583 if (test_bit(HCI_INQUIRY, &hdev->flags))
584 return;
585
586 /* Start Inquiry */
587 memcpy(&cp.lap, &ir->lap, 3);
588 cp.length = ir->length;
589 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200590 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591}
592
593int hci_inquiry(void __user *arg)
594{
595 __u8 __user *ptr = arg;
596 struct hci_inquiry_req ir;
597 struct hci_dev *hdev;
598 int err = 0, do_inquiry = 0, max_rsp;
599 long timeo;
600 __u8 *buf;
601
602 if (copy_from_user(&ir, ptr, sizeof(ir)))
603 return -EFAULT;
604
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200605 hdev = hci_dev_get(ir.dev_id);
606 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 return -ENODEV;
608
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300609 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900610 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300611 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 inquiry_cache_flush(hdev);
613 do_inquiry = 1;
614 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300615 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616
Marcel Holtmann04837f62006-07-03 10:02:33 +0200617 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200618
619 if (do_inquiry) {
620 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
621 if (err < 0)
622 goto done;
623 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624
625 /* for unlimited number of responses we will use buffer with 255 entries */
626 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
627
628 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
629 * copy it to the user space.
630 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100631 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200632 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 err = -ENOMEM;
634 goto done;
635 }
636
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300637 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300639 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
641 BT_DBG("num_rsp %d", ir.num_rsp);
642
643 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
644 ptr += sizeof(ir);
645 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300646 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900648 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 err = -EFAULT;
650
651 kfree(buf);
652
653done:
654 hci_dev_put(hdev);
655 return err;
656}
657
658/* ---- HCI ioctl helpers ---- */
659
660int hci_dev_open(__u16 dev)
661{
662 struct hci_dev *hdev;
663 int ret = 0;
664
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200665 hdev = hci_dev_get(dev);
666 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700667 return -ENODEV;
668
669 BT_DBG("%s %p", hdev->name, hdev);
670
671 hci_req_lock(hdev);
672
Johan Hovold94324962012-03-15 14:48:41 +0100673 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
674 ret = -ENODEV;
675 goto done;
676 }
677
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200678 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
679 ret = -ERFKILL;
680 goto done;
681 }
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (test_bit(HCI_UP, &hdev->flags)) {
684 ret = -EALREADY;
685 goto done;
686 }
687
688 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
689 set_bit(HCI_RAW, &hdev->flags);
690
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200691 /* Treat all non BR/EDR controllers as raw devices if
692 enable_hs is not set */
693 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100694 set_bit(HCI_RAW, &hdev->flags);
695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 if (hdev->open(hdev)) {
697 ret = -EIO;
698 goto done;
699 }
700
701 if (!test_bit(HCI_RAW, &hdev->flags)) {
702 atomic_set(&hdev->cmd_cnt, 1);
703 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200704 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705
Marcel Holtmann04837f62006-07-03 10:02:33 +0200706 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300707 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708
Andre Guedeseead27d2011-06-30 19:20:55 -0300709 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300710 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300711 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 clear_bit(HCI_INIT, &hdev->flags);
714 }
715
716 if (!ret) {
717 hci_dev_hold(hdev);
718 set_bit(HCI_UP, &hdev->flags);
719 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200720 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300721 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200722 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300723 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200724 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900725 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200727 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200728 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400729 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730
731 skb_queue_purge(&hdev->cmd_q);
732 skb_queue_purge(&hdev->rx_q);
733
734 if (hdev->flush)
735 hdev->flush(hdev);
736
737 if (hdev->sent_cmd) {
738 kfree_skb(hdev->sent_cmd);
739 hdev->sent_cmd = NULL;
740 }
741
742 hdev->close(hdev);
743 hdev->flags = 0;
744 }
745
746done:
747 hci_req_unlock(hdev);
748 hci_dev_put(hdev);
749 return ret;
750}
751
752static int hci_dev_do_close(struct hci_dev *hdev)
753{
754 BT_DBG("%s %p", hdev->name, hdev);
755
Andre Guedes28b75a82012-02-03 17:48:00 -0300756 cancel_work_sync(&hdev->le_scan);
757
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 hci_req_cancel(hdev, ENODEV);
759 hci_req_lock(hdev);
760
761 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300762 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 hci_req_unlock(hdev);
764 return 0;
765 }
766
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200767 /* Flush RX and TX works */
768 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400769 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200771 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200772 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200773 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200774 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200775 }
776
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200777 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200778 cancel_delayed_work(&hdev->service_cache);
779
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300780 cancel_delayed_work_sync(&hdev->le_scan_disable);
781
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300782 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 inquiry_cache_flush(hdev);
784 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300785 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 hci_notify(hdev, HCI_DEV_DOWN);
788
789 if (hdev->flush)
790 hdev->flush(hdev);
791
792 /* Reset device */
793 skb_queue_purge(&hdev->cmd_q);
794 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200795 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200796 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200798 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300799 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800 clear_bit(HCI_INIT, &hdev->flags);
801 }
802
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200803 /* flush cmd work */
804 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 /* Drop queues */
807 skb_queue_purge(&hdev->rx_q);
808 skb_queue_purge(&hdev->cmd_q);
809 skb_queue_purge(&hdev->raw_q);
810
811 /* Drop last sent command */
812 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300813 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 kfree_skb(hdev->sent_cmd);
815 hdev->sent_cmd = NULL;
816 }
817
818 /* After this point our queues are empty
819 * and no tasks are scheduled. */
820 hdev->close(hdev);
821
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100822 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
823 hci_dev_lock(hdev);
824 mgmt_powered(hdev, 0);
825 hci_dev_unlock(hdev);
826 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200827
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 /* Clear flags */
829 hdev->flags = 0;
830
Johan Hedberge59fda82012-02-22 18:11:53 +0200831 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200832 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 hci_req_unlock(hdev);
835
836 hci_dev_put(hdev);
837 return 0;
838}
839
840int hci_dev_close(__u16 dev)
841{
842 struct hci_dev *hdev;
843 int err;
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 hdev = hci_dev_get(dev);
846 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100848
849 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
850 cancel_delayed_work(&hdev->power_off);
851
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100853
Linus Torvalds1da177e2005-04-16 15:20:36 -0700854 hci_dev_put(hdev);
855 return err;
856}
857
858int hci_dev_reset(__u16 dev)
859{
860 struct hci_dev *hdev;
861 int ret = 0;
862
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200863 hdev = hci_dev_get(dev);
864 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 return -ENODEV;
866
867 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 if (!test_bit(HCI_UP, &hdev->flags))
870 goto done;
871
872 /* Drop queues */
873 skb_queue_purge(&hdev->rx_q);
874 skb_queue_purge(&hdev->cmd_q);
875
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300876 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 inquiry_cache_flush(hdev);
878 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300879 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700880
881 if (hdev->flush)
882 hdev->flush(hdev);
883
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900884 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300885 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
887 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200888 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300889 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
891done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700892 hci_req_unlock(hdev);
893 hci_dev_put(hdev);
894 return ret;
895}
896
897int hci_dev_reset_stat(__u16 dev)
898{
899 struct hci_dev *hdev;
900 int ret = 0;
901
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200902 hdev = hci_dev_get(dev);
903 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700904 return -ENODEV;
905
906 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
907
908 hci_dev_put(hdev);
909
910 return ret;
911}
912
913int hci_dev_cmd(unsigned int cmd, void __user *arg)
914{
915 struct hci_dev *hdev;
916 struct hci_dev_req dr;
917 int err = 0;
918
919 if (copy_from_user(&dr, arg, sizeof(dr)))
920 return -EFAULT;
921
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200922 hdev = hci_dev_get(dr.dev_id);
923 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 return -ENODEV;
925
926 switch (cmd) {
927 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200928 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 break;
931
932 case HCISETENCRYPT:
933 if (!lmp_encrypt_capable(hdev)) {
934 err = -EOPNOTSUPP;
935 break;
936 }
937
938 if (!test_bit(HCI_AUTH, &hdev->flags)) {
939 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200940 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300941 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 if (err)
943 break;
944 }
945
Marcel Holtmann04837f62006-07-03 10:02:33 +0200946 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300947 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 break;
949
950 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200951 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300952 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 break;
954
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200955 case HCISETLINKPOL:
956 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300957 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200958 break;
959
960 case HCISETLINKMODE:
961 hdev->link_mode = ((__u16) dr.dev_opt) &
962 (HCI_LM_MASTER | HCI_LM_ACCEPT);
963 break;
964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 case HCISETPTYPE:
966 hdev->pkt_type = (__u16) dr.dev_opt;
967 break;
968
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200970 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
971 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 break;
973
974 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200975 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
976 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 break;
978
979 default:
980 err = -EINVAL;
981 break;
982 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200983
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 hci_dev_put(hdev);
985 return err;
986}
987
988int hci_get_dev_list(void __user *arg)
989{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200990 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 struct hci_dev_list_req *dl;
992 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 int n = 0, size, err;
994 __u16 dev_num;
995
996 if (get_user(dev_num, (__u16 __user *) arg))
997 return -EFAULT;
998
999 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1000 return -EINVAL;
1001
1002 size = sizeof(*dl) + dev_num * sizeof(*dr);
1003
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001004 dl = kzalloc(size, GFP_KERNEL);
1005 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 return -ENOMEM;
1007
1008 dr = dl->dev_req;
1009
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001010 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001011 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001012 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001013 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001014
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001017
Linus Torvalds1da177e2005-04-16 15:20:36 -07001018 (dr + n)->dev_id = hdev->id;
1019 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001020
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 if (++n >= dev_num)
1022 break;
1023 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001024 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025
1026 dl->dev_num = n;
1027 size = sizeof(*dl) + n * sizeof(*dr);
1028
1029 err = copy_to_user(arg, dl, size);
1030 kfree(dl);
1031
1032 return err ? -EFAULT : 0;
1033}
1034
1035int hci_get_dev_info(void __user *arg)
1036{
1037 struct hci_dev *hdev;
1038 struct hci_dev_info di;
1039 int err = 0;
1040
1041 if (copy_from_user(&di, arg, sizeof(di)))
1042 return -EFAULT;
1043
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001044 hdev = hci_dev_get(di.dev_id);
1045 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 return -ENODEV;
1047
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001049 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001050
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001051 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1052 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 strcpy(di.name, hdev->name);
1055 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001056 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 di.flags = hdev->flags;
1058 di.pkt_type = hdev->pkt_type;
1059 di.acl_mtu = hdev->acl_mtu;
1060 di.acl_pkts = hdev->acl_pkts;
1061 di.sco_mtu = hdev->sco_mtu;
1062 di.sco_pkts = hdev->sco_pkts;
1063 di.link_policy = hdev->link_policy;
1064 di.link_mode = hdev->link_mode;
1065
1066 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1067 memcpy(&di.features, &hdev->features, sizeof(di.features));
1068
1069 if (copy_to_user(arg, &di, sizeof(di)))
1070 err = -EFAULT;
1071
1072 hci_dev_put(hdev);
1073
1074 return err;
1075}
1076
1077/* ---- Interface to HCI drivers ---- */
1078
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001079static int hci_rfkill_set_block(void *data, bool blocked)
1080{
1081 struct hci_dev *hdev = data;
1082
1083 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1084
1085 if (!blocked)
1086 return 0;
1087
1088 hci_dev_do_close(hdev);
1089
1090 return 0;
1091}
1092
1093static const struct rfkill_ops hci_rfkill_ops = {
1094 .set_block = hci_rfkill_set_block,
1095};
1096
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001097static void hci_power_on(struct work_struct *work)
1098{
1099 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1100
1101 BT_DBG("%s", hdev->name);
1102
1103 if (hci_dev_open(hdev->id) < 0)
1104 return;
1105
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001106 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001107 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001108 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001109
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001110 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001111 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001112}
1113
1114static void hci_power_off(struct work_struct *work)
1115{
Johan Hedberg32435532011-11-07 22:16:04 +02001116 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001117 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001118
1119 BT_DBG("%s", hdev->name);
1120
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001121 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001122}
1123
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001124static void hci_discov_off(struct work_struct *work)
1125{
1126 struct hci_dev *hdev;
1127 u8 scan = SCAN_PAGE;
1128
1129 hdev = container_of(work, struct hci_dev, discov_off.work);
1130
1131 BT_DBG("%s", hdev->name);
1132
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001133 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001134
1135 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1136
1137 hdev->discov_timeout = 0;
1138
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001139 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001140}
1141
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001142int hci_uuids_clear(struct hci_dev *hdev)
1143{
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->uuids) {
1147 struct bt_uuid *uuid;
1148
1149 uuid = list_entry(p, struct bt_uuid, list);
1150
1151 list_del(p);
1152 kfree(uuid);
1153 }
1154
1155 return 0;
1156}
1157
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001158int hci_link_keys_clear(struct hci_dev *hdev)
1159{
1160 struct list_head *p, *n;
1161
1162 list_for_each_safe(p, n, &hdev->link_keys) {
1163 struct link_key *key;
1164
1165 key = list_entry(p, struct link_key, list);
1166
1167 list_del(p);
1168 kfree(key);
1169 }
1170
1171 return 0;
1172}
1173
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001174int hci_smp_ltks_clear(struct hci_dev *hdev)
1175{
1176 struct smp_ltk *k, *tmp;
1177
1178 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1179 list_del(&k->list);
1180 kfree(k);
1181 }
1182
1183 return 0;
1184}
1185
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001186struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001188 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001189
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001190 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001191 if (bacmp(bdaddr, &k->bdaddr) == 0)
1192 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001193
1194 return NULL;
1195}
1196
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301197static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001198 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001199{
1200 /* Legacy key */
1201 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203
1204 /* Debug keys are insecure so don't store them persistently */
1205 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301206 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001207
1208 /* Changed combination key and there's no previous one */
1209 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301210 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001211
1212 /* Security mode 3 case */
1213 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301214 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001215
1216 /* Neither local nor remote side had no-bonding as requirement */
1217 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301218 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001219
1220 /* Local side had dedicated bonding as requirement */
1221 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301222 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001223
1224 /* Remote side had dedicated bonding as requirement */
1225 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301226 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001227
1228 /* If none of the above criteria match, then don't store the key
1229 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301230 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001231}
1232
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 list_for_each_entry(k, &hdev->long_term_keys, list) {
1238 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001239 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240 continue;
1241
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001242 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243 }
1244
1245 return NULL;
1246}
1247EXPORT_SYMBOL(hci_find_ltk);
1248
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001249struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001250 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001251{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001252 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001253
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001254 list_for_each_entry(k, &hdev->long_term_keys, list)
1255 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001256 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001257 return k;
1258
1259 return NULL;
1260}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001261EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001262
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001263int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001264 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001265{
1266 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301267 u8 old_key_type;
1268 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001269
1270 old_key = hci_find_link_key(hdev, bdaddr);
1271 if (old_key) {
1272 old_key_type = old_key->type;
1273 key = old_key;
1274 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001275 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001276 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1277 if (!key)
1278 return -ENOMEM;
1279 list_add(&key->list, &hdev->link_keys);
1280 }
1281
1282 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1283
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001284 /* Some buggy controller combinations generate a changed
1285 * combination key for legacy pairing even when there's no
1286 * previous key */
1287 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001288 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001289 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001290 if (conn)
1291 conn->key_type = type;
1292 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001293
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001294 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001295 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001296 key->pin_len = pin_len;
1297
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001298 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001299 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001300 else
1301 key->type = type;
1302
Johan Hedberg4df378a2011-04-28 11:29:03 -07001303 if (!new_key)
1304 return 0;
1305
1306 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1307
Johan Hedberg744cf192011-11-08 20:40:14 +02001308 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001309
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301310 if (conn)
1311 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001312
1313 return 0;
1314}
1315
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001316int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001317 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001318 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001319{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001320 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1323 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001327 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001328 else {
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001330 if (!key)
1331 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001332 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001333 }
1334
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1339 key->ediv = ediv;
1340 key->enc_size = enc_size;
1341 key->type = type;
1342 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001343
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001344 if (!new_key)
1345 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001346
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1349
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001350 return 0;
1351}
1352
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001353int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1354{
1355 struct link_key *key;
1356
1357 key = hci_find_link_key(hdev, bdaddr);
1358 if (!key)
1359 return -ENOENT;
1360
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1362
1363 list_del(&key->list);
1364 kfree(key);
1365
1366 return 0;
1367}
1368
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001369int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370{
1371 struct smp_ltk *k, *tmp;
1372
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1375 continue;
1376
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1378
1379 list_del(&k->list);
1380 kfree(k);
1381 }
1382
1383 return 0;
1384}
1385
Ville Tervo6bd32322011-02-16 16:32:41 +02001386/* HCI command timer function */
1387static void hci_cmd_timer(unsigned long arg)
1388{
1389 struct hci_dev *hdev = (void *) arg;
1390
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001393 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001394}
1395
Szymon Janc2763eda2011-03-22 13:12:22 +01001396struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001397 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001398{
1399 struct oob_data *data;
1400
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1403 return data;
1404
1405 return NULL;
1406}
1407
1408int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1409{
1410 struct oob_data *data;
1411
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 if (!data)
1414 return -ENOENT;
1415
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1417
1418 list_del(&data->list);
1419 kfree(data);
1420
1421 return 0;
1422}
1423
1424int hci_remote_oob_data_clear(struct hci_dev *hdev)
1425{
1426 struct oob_data *data, *n;
1427
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1430 kfree(data);
1431 }
1432
1433 return 0;
1434}
1435
1436int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001437 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001438{
1439 struct oob_data *data;
1440
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1442
1443 if (!data) {
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 if (!data)
1446 return -ENOMEM;
1447
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1450 }
1451
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1454
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1456
1457 return 0;
1458}
1459
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001460struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001461{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001462 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001463
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001464 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001465 if (bacmp(bdaddr, &b->bdaddr) == 0)
1466 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001467
1468 return NULL;
1469}
1470
1471int hci_blacklist_clear(struct hci_dev *hdev)
1472{
1473 struct list_head *p, *n;
1474
1475 list_for_each_safe(p, n, &hdev->blacklist) {
1476 struct bdaddr_list *b;
1477
1478 b = list_entry(p, struct bdaddr_list, list);
1479
1480 list_del(p);
1481 kfree(b);
1482 }
1483
1484 return 0;
1485}
1486
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001487int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488{
1489 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490
1491 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1492 return -EBADF;
1493
Antti Julku5e762442011-08-25 16:48:02 +03001494 if (hci_blacklist_lookup(hdev, bdaddr))
1495 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
1497 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001498 if (!entry)
1499 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001500
1501 bacpy(&entry->bdaddr, bdaddr);
1502
1503 list_add(&entry->list, &hdev->blacklist);
1504
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001505 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506}
1507
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001508int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509{
1510 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511
Szymon Janc1ec918c2011-11-16 09:32:21 +01001512 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001513 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001514
1515 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001516 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001517 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001518
1519 list_del(&entry->list);
1520 kfree(entry);
1521
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001522 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001523}
1524
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001525static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1526{
1527 struct le_scan_params *param = (struct le_scan_params *) opt;
1528 struct hci_cp_le_set_scan_param cp;
1529
1530 memset(&cp, 0, sizeof(cp));
1531 cp.type = param->type;
1532 cp.interval = cpu_to_le16(param->interval);
1533 cp.window = cpu_to_le16(param->window);
1534
1535 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1536}
1537
1538static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1539{
1540 struct hci_cp_le_set_scan_enable cp;
1541
1542 memset(&cp, 0, sizeof(cp));
1543 cp.enable = 1;
1544
1545 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1546}
1547
1548static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001549 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001550{
1551 long timeo = msecs_to_jiffies(3000);
1552 struct le_scan_params param;
1553 int err;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1558 return -EINPROGRESS;
1559
1560 param.type = type;
1561 param.interval = interval;
1562 param.window = window;
1563
1564 hci_req_lock(hdev);
1565
1566 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001567 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001568 if (!err)
1569 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1570
1571 hci_req_unlock(hdev);
1572
1573 if (err < 0)
1574 return err;
1575
1576 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001577 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001578
1579 return 0;
1580}
1581
Andre Guedes7dbfac12012-03-15 16:52:07 -03001582int hci_cancel_le_scan(struct hci_dev *hdev)
1583{
1584 BT_DBG("%s", hdev->name);
1585
1586 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1587 return -EALREADY;
1588
1589 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1590 struct hci_cp_le_set_scan_enable cp;
1591
1592 /* Send HCI command to disable LE Scan */
1593 memset(&cp, 0, sizeof(cp));
1594 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1595 }
1596
1597 return 0;
1598}
1599
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001600static void le_scan_disable_work(struct work_struct *work)
1601{
1602 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001603 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001604 struct hci_cp_le_set_scan_enable cp;
1605
1606 BT_DBG("%s", hdev->name);
1607
1608 memset(&cp, 0, sizeof(cp));
1609
1610 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1611}
1612
Andre Guedes28b75a82012-02-03 17:48:00 -03001613static void le_scan_work(struct work_struct *work)
1614{
1615 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1616 struct le_scan_params *param = &hdev->le_scan_params;
1617
1618 BT_DBG("%s", hdev->name);
1619
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001620 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1621 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001622}
1623
1624int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001625 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001626{
1627 struct le_scan_params *param = &hdev->le_scan_params;
1628
1629 BT_DBG("%s", hdev->name);
1630
1631 if (work_busy(&hdev->le_scan))
1632 return -EINPROGRESS;
1633
1634 param->type = type;
1635 param->interval = interval;
1636 param->window = window;
1637 param->timeout = timeout;
1638
1639 queue_work(system_long_wq, &hdev->le_scan);
1640
1641 return 0;
1642}
1643
David Herrmann9be0dab2012-04-22 14:39:57 +02001644/* Alloc HCI device */
1645struct hci_dev *hci_alloc_dev(void)
1646{
1647 struct hci_dev *hdev;
1648
1649 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1650 if (!hdev)
1651 return NULL;
1652
David Herrmannb1b813d2012-04-22 14:39:58 +02001653 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1654 hdev->esco_type = (ESCO_HV1);
1655 hdev->link_mode = (HCI_LM_ACCEPT);
1656 hdev->io_capability = 0x03; /* No Input No Output */
1657
David Herrmannb1b813d2012-04-22 14:39:58 +02001658 hdev->sniff_max_interval = 800;
1659 hdev->sniff_min_interval = 80;
1660
1661 mutex_init(&hdev->lock);
1662 mutex_init(&hdev->req_lock);
1663
1664 INIT_LIST_HEAD(&hdev->mgmt_pending);
1665 INIT_LIST_HEAD(&hdev->blacklist);
1666 INIT_LIST_HEAD(&hdev->uuids);
1667 INIT_LIST_HEAD(&hdev->link_keys);
1668 INIT_LIST_HEAD(&hdev->long_term_keys);
1669 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001670
1671 INIT_WORK(&hdev->rx_work, hci_rx_work);
1672 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1673 INIT_WORK(&hdev->tx_work, hci_tx_work);
1674 INIT_WORK(&hdev->power_on, hci_power_on);
1675 INIT_WORK(&hdev->le_scan, le_scan_work);
1676
David Herrmannb1b813d2012-04-22 14:39:58 +02001677 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1678 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1679 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1680
David Herrmann9be0dab2012-04-22 14:39:57 +02001681 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001682 skb_queue_head_init(&hdev->rx_q);
1683 skb_queue_head_init(&hdev->cmd_q);
1684 skb_queue_head_init(&hdev->raw_q);
1685
1686 init_waitqueue_head(&hdev->req_wait_q);
1687
1688 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1689
David Herrmannb1b813d2012-04-22 14:39:58 +02001690 hci_init_sysfs(hdev);
1691 discovery_init(hdev);
1692 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001693
1694 return hdev;
1695}
1696EXPORT_SYMBOL(hci_alloc_dev);
1697
1698/* Free HCI device */
1699void hci_free_dev(struct hci_dev *hdev)
1700{
1701 skb_queue_purge(&hdev->driver_init);
1702
1703 /* will free via device release */
1704 put_device(&hdev->dev);
1705}
1706EXPORT_SYMBOL(hci_free_dev);
1707
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708/* Register HCI device */
1709int hci_register_dev(struct hci_dev *hdev)
1710{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001711 struct list_head *head, *p;
David Herrmannb1b813d2012-04-22 14:39:58 +02001712 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
David Herrmann010666a2012-01-07 15:47:07 +01001714 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 return -EINVAL;
1716
Ulisses Furquimfc507442012-04-18 12:13:04 -03001717 write_lock(&hci_dev_list_lock);
1718
Mat Martineau08add512011-11-02 16:18:36 -07001719 /* Do not allow HCI_AMP devices to register at index 0,
1720 * so the index can be used as the AMP controller ID.
1721 */
1722 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001723 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 /* Find first available device id */
1726 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001727 int nid = list_entry(p, struct hci_dev, list)->id;
1728 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001730 if (nid == id)
1731 id++;
1732 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001734
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735 sprintf(hdev->name, "hci%d", id);
1736 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001737
1738 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1739
Ulisses Furquimfc507442012-04-18 12:13:04 -03001740 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001742 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001744 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001745 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001746 if (!hdev->workqueue) {
1747 error = -ENOMEM;
1748 goto err;
1749 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001750
David Herrmann33ca9542011-10-08 14:58:49 +02001751 error = hci_add_sysfs(hdev);
1752 if (error < 0)
1753 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001755 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001756 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1757 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001758 if (hdev->rfkill) {
1759 if (rfkill_register(hdev->rfkill) < 0) {
1760 rfkill_destroy(hdev->rfkill);
1761 hdev->rfkill = NULL;
1762 }
1763 }
1764
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001765 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1766 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001767 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001768
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001770 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
1772 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001773
David Herrmann33ca9542011-10-08 14:58:49 +02001774err_wqueue:
1775 destroy_workqueue(hdev->workqueue);
1776err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001777 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001778 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001779 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001780
David Herrmann33ca9542011-10-08 14:58:49 +02001781 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782}
1783EXPORT_SYMBOL(hci_register_dev);
1784
1785/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001786void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787{
Marcel Holtmannef222012007-07-11 06:42:04 +02001788 int i;
1789
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001790 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001791
Johan Hovold94324962012-03-15 14:48:41 +01001792 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1793
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001794 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001796 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001797
1798 hci_dev_do_close(hdev);
1799
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301800 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001801 kfree_skb(hdev->reassembly[i]);
1802
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001803 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001804 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001805 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001806 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001807 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001808 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001809
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001810 /* mgmt_index_removed should take care of emptying the
1811 * pending list */
1812 BUG_ON(!list_empty(&hdev->mgmt_pending));
1813
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814 hci_notify(hdev, HCI_DEV_UNREG);
1815
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001816 if (hdev->rfkill) {
1817 rfkill_unregister(hdev->rfkill);
1818 rfkill_destroy(hdev->rfkill);
1819 }
1820
David Herrmannce242972011-10-08 14:58:48 +02001821 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001822
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001823 destroy_workqueue(hdev->workqueue);
1824
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001825 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001826 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001827 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001828 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001829 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001830 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001831 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001832
David Herrmanndc946bd2012-01-07 15:47:24 +01001833 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001834}
1835EXPORT_SYMBOL(hci_unregister_dev);
1836
1837/* Suspend HCI device */
1838int hci_suspend_dev(struct hci_dev *hdev)
1839{
1840 hci_notify(hdev, HCI_DEV_SUSPEND);
1841 return 0;
1842}
1843EXPORT_SYMBOL(hci_suspend_dev);
1844
1845/* Resume HCI device */
1846int hci_resume_dev(struct hci_dev *hdev)
1847{
1848 hci_notify(hdev, HCI_DEV_RESUME);
1849 return 0;
1850}
1851EXPORT_SYMBOL(hci_resume_dev);
1852
Marcel Holtmann76bca882009-11-18 00:40:39 +01001853/* Receive frame from HCI drivers */
1854int hci_recv_frame(struct sk_buff *skb)
1855{
1856 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1857 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001858 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001859 kfree_skb(skb);
1860 return -ENXIO;
1861 }
1862
1863 /* Incomming skb */
1864 bt_cb(skb)->incoming = 1;
1865
1866 /* Time stamp */
1867 __net_timestamp(skb);
1868
Marcel Holtmann76bca882009-11-18 00:40:39 +01001869 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001870 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001871
Marcel Holtmann76bca882009-11-18 00:40:39 +01001872 return 0;
1873}
1874EXPORT_SYMBOL(hci_recv_frame);
1875
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301876static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001877 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301878{
1879 int len = 0;
1880 int hlen = 0;
1881 int remain = count;
1882 struct sk_buff *skb;
1883 struct bt_skb_cb *scb;
1884
1885 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001886 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301887 return -EILSEQ;
1888
1889 skb = hdev->reassembly[index];
1890
1891 if (!skb) {
1892 switch (type) {
1893 case HCI_ACLDATA_PKT:
1894 len = HCI_MAX_FRAME_SIZE;
1895 hlen = HCI_ACL_HDR_SIZE;
1896 break;
1897 case HCI_EVENT_PKT:
1898 len = HCI_MAX_EVENT_SIZE;
1899 hlen = HCI_EVENT_HDR_SIZE;
1900 break;
1901 case HCI_SCODATA_PKT:
1902 len = HCI_MAX_SCO_SIZE;
1903 hlen = HCI_SCO_HDR_SIZE;
1904 break;
1905 }
1906
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001907 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301908 if (!skb)
1909 return -ENOMEM;
1910
1911 scb = (void *) skb->cb;
1912 scb->expect = hlen;
1913 scb->pkt_type = type;
1914
1915 skb->dev = (void *) hdev;
1916 hdev->reassembly[index] = skb;
1917 }
1918
1919 while (count) {
1920 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001921 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301922
1923 memcpy(skb_put(skb, len), data, len);
1924
1925 count -= len;
1926 data += len;
1927 scb->expect -= len;
1928 remain = count;
1929
1930 switch (type) {
1931 case HCI_EVENT_PKT:
1932 if (skb->len == HCI_EVENT_HDR_SIZE) {
1933 struct hci_event_hdr *h = hci_event_hdr(skb);
1934 scb->expect = h->plen;
1935
1936 if (skb_tailroom(skb) < scb->expect) {
1937 kfree_skb(skb);
1938 hdev->reassembly[index] = NULL;
1939 return -ENOMEM;
1940 }
1941 }
1942 break;
1943
1944 case HCI_ACLDATA_PKT:
1945 if (skb->len == HCI_ACL_HDR_SIZE) {
1946 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1947 scb->expect = __le16_to_cpu(h->dlen);
1948
1949 if (skb_tailroom(skb) < scb->expect) {
1950 kfree_skb(skb);
1951 hdev->reassembly[index] = NULL;
1952 return -ENOMEM;
1953 }
1954 }
1955 break;
1956
1957 case HCI_SCODATA_PKT:
1958 if (skb->len == HCI_SCO_HDR_SIZE) {
1959 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1960 scb->expect = h->dlen;
1961
1962 if (skb_tailroom(skb) < scb->expect) {
1963 kfree_skb(skb);
1964 hdev->reassembly[index] = NULL;
1965 return -ENOMEM;
1966 }
1967 }
1968 break;
1969 }
1970
1971 if (scb->expect == 0) {
1972 /* Complete frame */
1973
1974 bt_cb(skb)->pkt_type = type;
1975 hci_recv_frame(skb);
1976
1977 hdev->reassembly[index] = NULL;
1978 return remain;
1979 }
1980 }
1981
1982 return remain;
1983}
1984
Marcel Holtmannef222012007-07-11 06:42:04 +02001985int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1986{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301987 int rem = 0;
1988
Marcel Holtmannef222012007-07-11 06:42:04 +02001989 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1990 return -EILSEQ;
1991
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001992 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001993 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301994 if (rem < 0)
1995 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001996
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301997 data += (count - rem);
1998 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001999 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002000
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302001 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002002}
2003EXPORT_SYMBOL(hci_recv_fragment);
2004
Suraj Sumangala99811512010-07-14 13:02:19 +05302005#define STREAM_REASSEMBLY 0
2006
2007int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2008{
2009 int type;
2010 int rem = 0;
2011
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002012 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302013 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2014
2015 if (!skb) {
2016 struct { char type; } *pkt;
2017
2018 /* Start of the frame */
2019 pkt = data;
2020 type = pkt->type;
2021
2022 data++;
2023 count--;
2024 } else
2025 type = bt_cb(skb)->pkt_type;
2026
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002027 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002028 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302029 if (rem < 0)
2030 return rem;
2031
2032 data += (count - rem);
2033 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002034 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302035
2036 return rem;
2037}
2038EXPORT_SYMBOL(hci_recv_stream_fragment);
2039
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040/* ---- Interface to upper protocols ---- */
2041
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042int hci_register_cb(struct hci_cb *cb)
2043{
2044 BT_DBG("%p name %s", cb, cb->name);
2045
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002046 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002048 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
2050 return 0;
2051}
2052EXPORT_SYMBOL(hci_register_cb);
2053
2054int hci_unregister_cb(struct hci_cb *cb)
2055{
2056 BT_DBG("%p name %s", cb, cb->name);
2057
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002058 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002060 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061
2062 return 0;
2063}
2064EXPORT_SYMBOL(hci_unregister_cb);
2065
2066static int hci_send_frame(struct sk_buff *skb)
2067{
2068 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2069
2070 if (!hdev) {
2071 kfree_skb(skb);
2072 return -ENODEV;
2073 }
2074
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002075 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002077 /* Time stamp */
2078 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002080 /* Send copy to monitor */
2081 hci_send_to_monitor(hdev, skb);
2082
2083 if (atomic_read(&hdev->promisc)) {
2084 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002085 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002086 }
2087
2088 /* Get rid of skb owner, prior to sending to the driver. */
2089 skb_orphan(skb);
2090
2091 return hdev->send(skb);
2092}
2093
2094/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002095int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096{
2097 int len = HCI_COMMAND_HDR_SIZE + plen;
2098 struct hci_command_hdr *hdr;
2099 struct sk_buff *skb;
2100
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002101 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 skb = bt_skb_alloc(len, GFP_ATOMIC);
2104 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002105 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 return -ENOMEM;
2107 }
2108
2109 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002110 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 hdr->plen = plen;
2112
2113 if (plen)
2114 memcpy(skb_put(skb, plen), param, plen);
2115
2116 BT_DBG("skb len %d", skb->len);
2117
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002118 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002120
Johan Hedberga5040ef2011-01-10 13:28:59 +02002121 if (test_bit(HCI_INIT, &hdev->flags))
2122 hdev->init_last_cmd = opcode;
2123
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002125 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126
2127 return 0;
2128}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129
2130/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002131void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132{
2133 struct hci_command_hdr *hdr;
2134
2135 if (!hdev->sent_cmd)
2136 return NULL;
2137
2138 hdr = (void *) hdev->sent_cmd->data;
2139
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002140 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002141 return NULL;
2142
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002143 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144
2145 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2146}
2147
2148/* Send ACL data */
2149static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2150{
2151 struct hci_acl_hdr *hdr;
2152 int len = skb->len;
2153
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002154 skb_push(skb, HCI_ACL_HDR_SIZE);
2155 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002156 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002157 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2158 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159}
2160
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002161static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002162 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163{
2164 struct hci_dev *hdev = conn->hdev;
2165 struct sk_buff *list;
2166
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002167 skb->len = skb_headlen(skb);
2168 skb->data_len = 0;
2169
2170 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2171 hci_add_acl_hdr(skb, conn->handle, flags);
2172
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002173 list = skb_shinfo(skb)->frag_list;
2174 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 /* Non fragmented */
2176 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2177
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002178 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 } else {
2180 /* Fragmented */
2181 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2182
2183 skb_shinfo(skb)->frag_list = NULL;
2184
2185 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002186 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002188 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002189
2190 flags &= ~ACL_START;
2191 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192 do {
2193 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002194
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002196 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002197 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2200
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002201 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202 } while (list);
2203
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002204 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002206}
2207
2208void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2209{
2210 struct hci_conn *conn = chan->conn;
2211 struct hci_dev *hdev = conn->hdev;
2212
2213 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2214
2215 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002216
2217 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002219 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220}
2221EXPORT_SYMBOL(hci_send_acl);
2222
2223/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002224void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225{
2226 struct hci_dev *hdev = conn->hdev;
2227 struct hci_sco_hdr hdr;
2228
2229 BT_DBG("%s len %d", hdev->name, skb->len);
2230
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002231 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 hdr.dlen = skb->len;
2233
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002234 skb_push(skb, HCI_SCO_HDR_SIZE);
2235 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002236 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
2238 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002239 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002240
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002242 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243}
2244EXPORT_SYMBOL(hci_send_sco);
2245
2246/* ---- HCI TX task (outgoing data) ---- */
2247
2248/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002249static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2250 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251{
2252 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002253 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002254 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002256 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002258
2259 rcu_read_lock();
2260
2261 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002262 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002264
2265 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2266 continue;
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 num++;
2269
2270 if (c->sent < min) {
2271 min = c->sent;
2272 conn = c;
2273 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002274
2275 if (hci_conn_num(hdev, type) == num)
2276 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 }
2278
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002279 rcu_read_unlock();
2280
Linus Torvalds1da177e2005-04-16 15:20:36 -07002281 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002282 int cnt, q;
2283
2284 switch (conn->type) {
2285 case ACL_LINK:
2286 cnt = hdev->acl_cnt;
2287 break;
2288 case SCO_LINK:
2289 case ESCO_LINK:
2290 cnt = hdev->sco_cnt;
2291 break;
2292 case LE_LINK:
2293 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2294 break;
2295 default:
2296 cnt = 0;
2297 BT_ERR("Unknown link type");
2298 }
2299
2300 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301 *quote = q ? q : 1;
2302 } else
2303 *quote = 0;
2304
2305 BT_DBG("conn %p quote %d", conn, *quote);
2306 return conn;
2307}
2308
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002309static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310{
2311 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002312 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313
Ville Tervobae1f5d92011-02-10 22:38:53 -03002314 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002316 rcu_read_lock();
2317
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002319 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002320 if (c->type == type && c->sent) {
2321 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002322 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323 hci_acl_disconn(c, 0x13);
2324 }
2325 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002326
2327 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002328}
2329
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002330static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2331 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002332{
2333 struct hci_conn_hash *h = &hdev->conn_hash;
2334 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002335 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002336 struct hci_conn *conn;
2337 int cnt, q, conn_num = 0;
2338
2339 BT_DBG("%s", hdev->name);
2340
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002341 rcu_read_lock();
2342
2343 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002344 struct hci_chan *tmp;
2345
2346 if (conn->type != type)
2347 continue;
2348
2349 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2350 continue;
2351
2352 conn_num++;
2353
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002354 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002355 struct sk_buff *skb;
2356
2357 if (skb_queue_empty(&tmp->data_q))
2358 continue;
2359
2360 skb = skb_peek(&tmp->data_q);
2361 if (skb->priority < cur_prio)
2362 continue;
2363
2364 if (skb->priority > cur_prio) {
2365 num = 0;
2366 min = ~0;
2367 cur_prio = skb->priority;
2368 }
2369
2370 num++;
2371
2372 if (conn->sent < min) {
2373 min = conn->sent;
2374 chan = tmp;
2375 }
2376 }
2377
2378 if (hci_conn_num(hdev, type) == conn_num)
2379 break;
2380 }
2381
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002382 rcu_read_unlock();
2383
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002384 if (!chan)
2385 return NULL;
2386
2387 switch (chan->conn->type) {
2388 case ACL_LINK:
2389 cnt = hdev->acl_cnt;
2390 break;
2391 case SCO_LINK:
2392 case ESCO_LINK:
2393 cnt = hdev->sco_cnt;
2394 break;
2395 case LE_LINK:
2396 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2397 break;
2398 default:
2399 cnt = 0;
2400 BT_ERR("Unknown link type");
2401 }
2402
2403 q = cnt / num;
2404 *quote = q ? q : 1;
2405 BT_DBG("chan %p quote %d", chan, *quote);
2406 return chan;
2407}
2408
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002409static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2410{
2411 struct hci_conn_hash *h = &hdev->conn_hash;
2412 struct hci_conn *conn;
2413 int num = 0;
2414
2415 BT_DBG("%s", hdev->name);
2416
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002417 rcu_read_lock();
2418
2419 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002420 struct hci_chan *chan;
2421
2422 if (conn->type != type)
2423 continue;
2424
2425 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2426 continue;
2427
2428 num++;
2429
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002430 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002431 struct sk_buff *skb;
2432
2433 if (chan->sent) {
2434 chan->sent = 0;
2435 continue;
2436 }
2437
2438 if (skb_queue_empty(&chan->data_q))
2439 continue;
2440
2441 skb = skb_peek(&chan->data_q);
2442 if (skb->priority >= HCI_PRIO_MAX - 1)
2443 continue;
2444
2445 skb->priority = HCI_PRIO_MAX - 1;
2446
2447 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002448 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002449 }
2450
2451 if (hci_conn_num(hdev, type) == num)
2452 break;
2453 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002454
2455 rcu_read_unlock();
2456
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002457}
2458
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002459static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2460{
2461 /* Calculate count of blocks used by this packet */
2462 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2463}
2464
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002465static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 if (!test_bit(HCI_RAW, &hdev->flags)) {
2468 /* ACL tx timeout must be longer than maximum
2469 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002470 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002471 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002472 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002473 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002474}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002475
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002476static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002477{
2478 unsigned int cnt = hdev->acl_cnt;
2479 struct hci_chan *chan;
2480 struct sk_buff *skb;
2481 int quote;
2482
2483 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002484
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002485 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002486 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002487 u32 priority = (skb_peek(&chan->data_q))->priority;
2488 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002489 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002490 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002491
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002492 /* Stop if priority has changed */
2493 if (skb->priority < priority)
2494 break;
2495
2496 skb = skb_dequeue(&chan->data_q);
2497
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002498 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002499 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002500
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 hci_send_frame(skb);
2502 hdev->acl_last_tx = jiffies;
2503
2504 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002505 chan->sent++;
2506 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002507 }
2508 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002509
2510 if (cnt != hdev->acl_cnt)
2511 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002512}
2513
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002514static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002515{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002516 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002517 struct hci_chan *chan;
2518 struct sk_buff *skb;
2519 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002520
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002521 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002522
2523 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002524 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002525 u32 priority = (skb_peek(&chan->data_q))->priority;
2526 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2527 int blocks;
2528
2529 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002530 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002531
2532 /* Stop if priority has changed */
2533 if (skb->priority < priority)
2534 break;
2535
2536 skb = skb_dequeue(&chan->data_q);
2537
2538 blocks = __get_blocks(hdev, skb);
2539 if (blocks > hdev->block_cnt)
2540 return;
2541
2542 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002543 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002544
2545 hci_send_frame(skb);
2546 hdev->acl_last_tx = jiffies;
2547
2548 hdev->block_cnt -= blocks;
2549 quote -= blocks;
2550
2551 chan->sent += blocks;
2552 chan->conn->sent += blocks;
2553 }
2554 }
2555
2556 if (cnt != hdev->block_cnt)
2557 hci_prio_recalculate(hdev, ACL_LINK);
2558}
2559
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002560static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002561{
2562 BT_DBG("%s", hdev->name);
2563
2564 if (!hci_conn_num(hdev, ACL_LINK))
2565 return;
2566
2567 switch (hdev->flow_ctl_mode) {
2568 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2569 hci_sched_acl_pkt(hdev);
2570 break;
2571
2572 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2573 hci_sched_acl_blk(hdev);
2574 break;
2575 }
2576}
2577
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002579static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002580{
2581 struct hci_conn *conn;
2582 struct sk_buff *skb;
2583 int quote;
2584
2585 BT_DBG("%s", hdev->name);
2586
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002587 if (!hci_conn_num(hdev, SCO_LINK))
2588 return;
2589
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2591 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2592 BT_DBG("skb %p len %d", skb, skb->len);
2593 hci_send_frame(skb);
2594
2595 conn->sent++;
2596 if (conn->sent == ~0)
2597 conn->sent = 0;
2598 }
2599 }
2600}
2601
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002602static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002603{
2604 struct hci_conn *conn;
2605 struct sk_buff *skb;
2606 int quote;
2607
2608 BT_DBG("%s", hdev->name);
2609
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002610 if (!hci_conn_num(hdev, ESCO_LINK))
2611 return;
2612
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002613 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2614 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2615 BT_DBG("skb %p len %d", skb, skb->len);
2616 hci_send_frame(skb);
2617
2618 conn->sent++;
2619 if (conn->sent == ~0)
2620 conn->sent = 0;
2621 }
2622 }
2623}
2624
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002625static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002626{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002627 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002628 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002629 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002630
2631 BT_DBG("%s", hdev->name);
2632
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002633 if (!hci_conn_num(hdev, LE_LINK))
2634 return;
2635
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002636 if (!test_bit(HCI_RAW, &hdev->flags)) {
2637 /* LE tx timeout must be longer than maximum
2638 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002639 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002640 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002641 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002642 }
2643
2644 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002645 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002646 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002647 u32 priority = (skb_peek(&chan->data_q))->priority;
2648 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002649 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002650 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002651
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002652 /* Stop if priority has changed */
2653 if (skb->priority < priority)
2654 break;
2655
2656 skb = skb_dequeue(&chan->data_q);
2657
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002658 hci_send_frame(skb);
2659 hdev->le_last_tx = jiffies;
2660
2661 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002662 chan->sent++;
2663 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002664 }
2665 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002666
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002667 if (hdev->le_pkts)
2668 hdev->le_cnt = cnt;
2669 else
2670 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002671
2672 if (cnt != tmp)
2673 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674}
2675
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002676static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002678 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679 struct sk_buff *skb;
2680
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002681 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002682 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683
2684 /* Schedule queues and send stuff to HCI driver */
2685
2686 hci_sched_acl(hdev);
2687
2688 hci_sched_sco(hdev);
2689
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002690 hci_sched_esco(hdev);
2691
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002692 hci_sched_le(hdev);
2693
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 /* Send next queued raw (unknown type) packet */
2695 while ((skb = skb_dequeue(&hdev->raw_q)))
2696 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697}
2698
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002699/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002702static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703{
2704 struct hci_acl_hdr *hdr = (void *) skb->data;
2705 struct hci_conn *conn;
2706 __u16 handle, flags;
2707
2708 skb_pull(skb, HCI_ACL_HDR_SIZE);
2709
2710 handle = __le16_to_cpu(hdr->handle);
2711 flags = hci_flags(handle);
2712 handle = hci_handle(handle);
2713
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002714 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2715 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716
2717 hdev->stat.acl_rx++;
2718
2719 hci_dev_lock(hdev);
2720 conn = hci_conn_hash_lookup_handle(hdev, handle);
2721 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002722
Linus Torvalds1da177e2005-04-16 15:20:36 -07002723 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002724 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002725
Johan Hedberg671267b2012-05-12 16:11:50 -03002726 hci_dev_lock(hdev);
2727 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2728 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2729 mgmt_device_connected(hdev, &conn->dst, conn->type,
2730 conn->dst_type, 0, NULL, 0,
2731 conn->dev_class);
2732 hci_dev_unlock(hdev);
2733
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002735 l2cap_recv_acldata(conn, skb, flags);
2736 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002738 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002739 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740 }
2741
2742 kfree_skb(skb);
2743}
2744
2745/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002746static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747{
2748 struct hci_sco_hdr *hdr = (void *) skb->data;
2749 struct hci_conn *conn;
2750 __u16 handle;
2751
2752 skb_pull(skb, HCI_SCO_HDR_SIZE);
2753
2754 handle = __le16_to_cpu(hdr->handle);
2755
2756 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2757
2758 hdev->stat.sco_rx++;
2759
2760 hci_dev_lock(hdev);
2761 conn = hci_conn_hash_lookup_handle(hdev, handle);
2762 hci_dev_unlock(hdev);
2763
2764 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002766 sco_recv_scodata(conn, skb);
2767 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002769 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002770 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 }
2772
2773 kfree_skb(skb);
2774}
2775
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002776static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002778 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 struct sk_buff *skb;
2780
2781 BT_DBG("%s", hdev->name);
2782
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002784 /* Send copy to monitor */
2785 hci_send_to_monitor(hdev, skb);
2786
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 if (atomic_read(&hdev->promisc)) {
2788 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002789 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 }
2791
2792 if (test_bit(HCI_RAW, &hdev->flags)) {
2793 kfree_skb(skb);
2794 continue;
2795 }
2796
2797 if (test_bit(HCI_INIT, &hdev->flags)) {
2798 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002799 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002800 case HCI_ACLDATA_PKT:
2801 case HCI_SCODATA_PKT:
2802 kfree_skb(skb);
2803 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002804 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 }
2806
2807 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002808 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002810 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002811 hci_event_packet(hdev, skb);
2812 break;
2813
2814 case HCI_ACLDATA_PKT:
2815 BT_DBG("%s ACL data packet", hdev->name);
2816 hci_acldata_packet(hdev, skb);
2817 break;
2818
2819 case HCI_SCODATA_PKT:
2820 BT_DBG("%s SCO data packet", hdev->name);
2821 hci_scodata_packet(hdev, skb);
2822 break;
2823
2824 default:
2825 kfree_skb(skb);
2826 break;
2827 }
2828 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829}
2830
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002831static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002833 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 struct sk_buff *skb;
2835
2836 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2837
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002839 if (atomic_read(&hdev->cmd_cnt)) {
2840 skb = skb_dequeue(&hdev->cmd_q);
2841 if (!skb)
2842 return;
2843
Wei Yongjun7585b972009-02-25 18:29:52 +08002844 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002846 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2847 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 atomic_dec(&hdev->cmd_cnt);
2849 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002850 if (test_bit(HCI_RESET, &hdev->flags))
2851 del_timer(&hdev->cmd_timer);
2852 else
2853 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002854 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002855 } else {
2856 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002857 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002858 }
2859 }
2860}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002861
2862int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2863{
2864 /* General inquiry access code (GIAC) */
2865 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2866 struct hci_cp_inquiry cp;
2867
2868 BT_DBG("%s", hdev->name);
2869
2870 if (test_bit(HCI_INQUIRY, &hdev->flags))
2871 return -EINPROGRESS;
2872
Johan Hedberg46632622012-01-02 16:06:08 +02002873 inquiry_cache_flush(hdev);
2874
Andre Guedes2519a1f2011-11-07 11:45:24 -03002875 memset(&cp, 0, sizeof(cp));
2876 memcpy(&cp.lap, lap, sizeof(cp.lap));
2877 cp.length = length;
2878
2879 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2880}
Andre Guedes023d50492011-11-04 14:16:52 -03002881
2882int hci_cancel_inquiry(struct hci_dev *hdev)
2883{
2884 BT_DBG("%s", hdev->name);
2885
2886 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002887 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002888
2889 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2890}
Andre Guedes31f79562012-04-24 21:02:53 -03002891
2892u8 bdaddr_to_le(u8 bdaddr_type)
2893{
2894 switch (bdaddr_type) {
2895 case BDADDR_LE_PUBLIC:
2896 return ADDR_LE_DEV_PUBLIC;
2897
2898 default:
2899 /* Fallback to LE Random address type */
2900 return ADDR_LE_DEV_RANDOM;
2901 }
2902}