blob: 027257d4b52a069ddfda4aad1a98d19660863dde [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
S.Çağlar Onur824530212008-02-17 23:25:57 -080028#include <linux/jiffies.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30#include <linux/kmod.h>
31
32#include <linux/types.h>
33#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/skbuff.h>
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +010041#include <linux/workqueue.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070042#include <linux/interrupt.h>
Marcel Holtmann611b30f2009-06-08 14:41:38 +020043#include <linux/rfkill.h>
Ville Tervo6bd32322011-02-16 16:32:41 +020044#include <linux/timer.h>
Vinicius Costa Gomes3a0259b2011-06-09 18:50:43 -030045#include <linux/crypto.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070046#include <net/sock.h>
47
Andrei Emeltchenko70f230202010-12-01 16:58:25 +020048#include <linux/uaccess.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070049#include <asm/unaligned.h>
50
51#include <net/bluetooth/bluetooth.h>
52#include <net/bluetooth/hci_core.h>
53
Johan Hedbergab81cbf2010-12-15 13:53:18 +020054#define AUTO_OFF_TIMEOUT 2000
55
Marcel Holtmannb78752c2010-08-08 23:06:53 -040056static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020057static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020058static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070059
Linus Torvalds1da177e2005-04-16 15:20:36 -070060/* HCI device list */
61LIST_HEAD(hci_dev_list);
62DEFINE_RWLOCK(hci_dev_list_lock);
63
64/* HCI callback list */
65LIST_HEAD(hci_cb_list);
66DEFINE_RWLOCK(hci_cb_list_lock);
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068/* ---- HCI notifications ---- */
69
Marcel Holtmann65164552005-10-28 19:20:48 +020070static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070071{
Marcel Holtmann040030e2012-02-20 14:50:37 +010072 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/* ---- HCI requests ---- */
76
Johan Hedberg23bb5762010-12-21 23:01:27 +020077void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
Johan Hedberg23bb5762010-12-21 23:01:27 +020079 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
Johan Hedberga5040ef2011-01-10 13:28:59 +020081 /* If this is the init phase check if the completed command matches
82 * the last init command, and if not just return.
83 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020084 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020086 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 struct sk_buff *skb;
88
89 /* Some CSR based controllers generate a spontaneous
90 * reset complete event during init and any pending
91 * command will never be completed. In such a case we
92 * need to resend whatever was the last sent
93 * command.
94 */
95
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020096 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020097 return;
98
99 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100 if (skb) {
101 skb_queue_head(&hdev->cmd_q, skb);
102 queue_work(hdev->workqueue, &hdev->cmd_work);
103 }
104
Johan Hedberg23bb5762010-12-21 23:01:27 +0200105 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +0200106 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = result;
110 hdev->req_status = HCI_REQ_DONE;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115static void hci_req_cancel(struct hci_dev *hdev, int err)
116{
117 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119 if (hdev->req_status == HCI_REQ_PEND) {
120 hdev->req_result = err;
121 hdev->req_status = HCI_REQ_CANCELED;
122 wake_up_interruptible(&hdev->req_wait_q);
123 }
124}
125
126/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300127static int __hci_request(struct hci_dev *hdev,
128 void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130{
131 DECLARE_WAITQUEUE(wait, current);
132 int err = 0;
133
134 BT_DBG("%s start", hdev->name);
135
136 hdev->req_status = HCI_REQ_PEND;
137
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
140
141 req(hdev, opt);
142 schedule_timeout(timeout);
143
144 remove_wait_queue(&hdev->req_wait_q, &wait);
145
146 if (signal_pending(current))
147 return -EINTR;
148
149 switch (hdev->req_status) {
150 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700151 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 break;
153
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
156 break;
157
158 default:
159 err = -ETIMEDOUT;
160 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700161 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162
Johan Hedberga5040ef2011-01-10 13:28:59 +0200163 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 return err;
168}
169
170static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
Szymon Janc01df8c32011-02-17 16:46:47 +0100171 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172{
173 int ret;
174
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200175 if (!test_bit(HCI_UP, &hdev->flags))
176 return -ENETDOWN;
177
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 /* Serialize all requests */
179 hci_req_lock(hdev);
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
182
183 return ret;
184}
185
186static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187{
188 BT_DBG("%s %ld", hdev->name, opt);
189
190 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200195static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200197 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800198 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200199 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200201 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* Mandatory initialization */
204
205 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200206 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200207 set_bit(HCI_RESET, &hdev->flags);
208 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300209 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200212 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200214 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200215 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200216
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200221 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
222
223 /* Read Class of Device */
224 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
225
226 /* Read Local Name */
227 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
229 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200230 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232 /* Optional initialization */
233
234 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200235 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200236 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238 /* Connection accept timeout ~20 secs */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -0700239 param = cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200240 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200241
242 bacpy(&cp.bdaddr, BDADDR_ANY);
243 cp.delete_all = 1;
244 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245}
246
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200247static void amp_init(struct hci_dev *hdev)
248{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200249 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
250
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200251 /* Reset */
252 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
253
254 /* Read Local Version */
255 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300256
257 /* Read Local AMP Info */
258 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200259}
260
261static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
262{
263 struct sk_buff *skb;
264
265 BT_DBG("%s %ld", hdev->name, opt);
266
267 /* Driver initialization */
268
269 /* Special commands */
270 while ((skb = skb_dequeue(&hdev->driver_init))) {
271 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
272 skb->dev = (void *) hdev;
273
274 skb_queue_tail(&hdev->cmd_q, skb);
275 queue_work(hdev->workqueue, &hdev->cmd_work);
276 }
277 skb_queue_purge(&hdev->driver_init);
278
279 switch (hdev->dev_type) {
280 case HCI_BREDR:
281 bredr_init(hdev);
282 break;
283
284 case HCI_AMP:
285 amp_init(hdev);
286 break;
287
288 default:
289 BT_ERR("Unknown device type %d", hdev->dev_type);
290 break;
291 }
292
293}
294
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300295static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
296{
297 BT_DBG("%s", hdev->name);
298
299 /* Read LE buffer size */
300 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
301}
302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
304{
305 __u8 scan = opt;
306
307 BT_DBG("%s %x", hdev->name, scan);
308
309 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200310 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
313static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
314{
315 __u8 auth = opt;
316
317 BT_DBG("%s %x", hdev->name, auth);
318
319 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200320 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
323static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
324{
325 __u8 encrypt = opt;
326
327 BT_DBG("%s %x", hdev->name, encrypt);
328
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200329 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200330 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200333static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
334{
335 __le16 policy = cpu_to_le16(opt);
336
Marcel Holtmanna418b892008-11-30 12:17:28 +0100337 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200338
339 /* Default link policy */
340 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
341}
342
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900343/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 * Device is held on return. */
345struct hci_dev *hci_dev_get(int index)
346{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200347 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348
349 BT_DBG("%d", index);
350
351 if (index < 0)
352 return NULL;
353
354 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200355 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 if (d->id == index) {
357 hdev = hci_dev_hold(d);
358 break;
359 }
360 }
361 read_unlock(&hci_dev_list_lock);
362 return hdev;
363}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200366
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200367bool hci_discovery_active(struct hci_dev *hdev)
368{
369 struct discovery_state *discov = &hdev->discovery;
370
Andre Guedes6fbe1952012-02-03 17:47:58 -0300371 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300372 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300373 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200374 return true;
375
Andre Guedes6fbe1952012-02-03 17:47:58 -0300376 default:
377 return false;
378 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200379}
380
Johan Hedbergff9ef572012-01-04 14:23:45 +0200381void hci_discovery_set_state(struct hci_dev *hdev, int state)
382{
383 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
384
385 if (hdev->discovery.state == state)
386 return;
387
388 switch (state) {
389 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300390 if (hdev->discovery.state != DISCOVERY_STARTING)
391 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200392 break;
393 case DISCOVERY_STARTING:
394 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300395 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200396 mgmt_discovering(hdev, 1);
397 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200398 case DISCOVERY_RESOLVING:
399 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200400 case DISCOVERY_STOPPING:
401 break;
402 }
403
404 hdev->discovery.state = state;
405}
406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407static void inquiry_cache_flush(struct hci_dev *hdev)
408{
Johan Hedberg30883512012-01-04 14:16:21 +0200409 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Johan Hedberg561aafb2012-01-04 13:31:59 +0200412 list_for_each_entry_safe(p, n, &cache->all, all) {
413 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200414 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200416
417 INIT_LIST_HEAD(&cache->unknown);
418 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
422 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423{
Johan Hedberg30883512012-01-04 14:16:21 +0200424 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 struct inquiry_entry *e;
426
427 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
428
Johan Hedberg561aafb2012-01-04 13:31:59 +0200429 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200431 return e;
432 }
433
434 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435}
436
Johan Hedberg561aafb2012-01-04 13:31:59 +0200437struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300438 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200439{
Johan Hedberg30883512012-01-04 14:16:21 +0200440 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200441 struct inquiry_entry *e;
442
443 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
444
445 list_for_each_entry(e, &cache->unknown, list) {
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
448 }
449
450 return NULL;
451}
452
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200453struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300454 bdaddr_t *bdaddr,
455 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200456{
457 struct discovery_state *cache = &hdev->discovery;
458 struct inquiry_entry *e;
459
460 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
461
462 list_for_each_entry(e, &cache->resolve, list) {
463 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
464 return e;
465 if (!bacmp(&e->data.bdaddr, bdaddr))
466 return e;
467 }
468
469 return NULL;
470}
471
Johan Hedberga3d4e202012-01-09 00:53:02 +0200472void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300473 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200474{
475 struct discovery_state *cache = &hdev->discovery;
476 struct list_head *pos = &cache->resolve;
477 struct inquiry_entry *p;
478
479 list_del(&ie->list);
480
481 list_for_each_entry(p, &cache->resolve, list) {
482 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300483 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200484 break;
485 pos = &p->list;
486 }
487
488 list_add(&ie->list, pos);
489}
490
Johan Hedberg31754052012-01-04 13:39:52 +0200491bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300492 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493{
Johan Hedberg30883512012-01-04 14:16:21 +0200494 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200495 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496
497 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
498
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200499 if (ssp)
500 *ssp = data->ssp_mode;
501
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200502 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200503 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200504 if (ie->data.ssp_mode && ssp)
505 *ssp = true;
506
Johan Hedberga3d4e202012-01-09 00:53:02 +0200507 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300508 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200509 ie->data.rssi = data->rssi;
510 hci_inquiry_cache_update_resolve(hdev, ie);
511 }
512
Johan Hedberg561aafb2012-01-04 13:31:59 +0200513 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200514 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200515
Johan Hedberg561aafb2012-01-04 13:31:59 +0200516 /* Entry not in the cache. Add new one. */
517 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
518 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200519 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200520
521 list_add(&ie->all, &cache->all);
522
523 if (name_known) {
524 ie->name_state = NAME_KNOWN;
525 } else {
526 ie->name_state = NAME_NOT_KNOWN;
527 list_add(&ie->list, &cache->unknown);
528 }
529
530update:
531 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300532 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200533 ie->name_state = NAME_KNOWN;
534 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700535 }
536
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200537 memcpy(&ie->data, data, sizeof(*data));
538 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200540
541 if (ie->name_state == NAME_NOT_KNOWN)
542 return false;
543
544 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545}
546
547static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
548{
Johan Hedberg30883512012-01-04 14:16:21 +0200549 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 struct inquiry_info *info = (struct inquiry_info *) buf;
551 struct inquiry_entry *e;
552 int copied = 0;
553
Johan Hedberg561aafb2012-01-04 13:31:59 +0200554 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200556
557 if (copied >= num)
558 break;
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 bacpy(&info->bdaddr, &data->bdaddr);
561 info->pscan_rep_mode = data->pscan_rep_mode;
562 info->pscan_period_mode = data->pscan_period_mode;
563 info->pscan_mode = data->pscan_mode;
564 memcpy(info->dev_class, data->dev_class, 3);
565 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200566
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200568 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 }
570
571 BT_DBG("cache %p, copied %d", cache, copied);
572 return copied;
573}
574
575static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
576{
577 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
578 struct hci_cp_inquiry cp;
579
580 BT_DBG("%s", hdev->name);
581
582 if (test_bit(HCI_INQUIRY, &hdev->flags))
583 return;
584
585 /* Start Inquiry */
586 memcpy(&cp.lap, &ir->lap, 3);
587 cp.length = ir->length;
588 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200589 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590}
591
592int hci_inquiry(void __user *arg)
593{
594 __u8 __user *ptr = arg;
595 struct hci_inquiry_req ir;
596 struct hci_dev *hdev;
597 int err = 0, do_inquiry = 0, max_rsp;
598 long timeo;
599 __u8 *buf;
600
601 if (copy_from_user(&ir, ptr, sizeof(ir)))
602 return -EFAULT;
603
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200604 hdev = hci_dev_get(ir.dev_id);
605 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return -ENODEV;
607
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300608 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900609 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300610 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700611 inquiry_cache_flush(hdev);
612 do_inquiry = 1;
613 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300614 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Marcel Holtmann04837f62006-07-03 10:02:33 +0200616 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200617
618 if (do_inquiry) {
619 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
620 if (err < 0)
621 goto done;
622 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
624 /* for unlimited number of responses we will use buffer with 255 entries */
625 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
626
627 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
628 * copy it to the user space.
629 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100630 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200631 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 err = -ENOMEM;
633 goto done;
634 }
635
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300636 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300638 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639
640 BT_DBG("num_rsp %d", ir.num_rsp);
641
642 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
643 ptr += sizeof(ir);
644 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300645 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900647 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 err = -EFAULT;
649
650 kfree(buf);
651
652done:
653 hci_dev_put(hdev);
654 return err;
655}
656
657/* ---- HCI ioctl helpers ---- */
658
659int hci_dev_open(__u16 dev)
660{
661 struct hci_dev *hdev;
662 int ret = 0;
663
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200664 hdev = hci_dev_get(dev);
665 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return -ENODEV;
667
668 BT_DBG("%s %p", hdev->name, hdev);
669
670 hci_req_lock(hdev);
671
Johan Hovold94324962012-03-15 14:48:41 +0100672 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
673 ret = -ENODEV;
674 goto done;
675 }
676
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200677 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
678 ret = -ERFKILL;
679 goto done;
680 }
681
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 if (test_bit(HCI_UP, &hdev->flags)) {
683 ret = -EALREADY;
684 goto done;
685 }
686
687 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
688 set_bit(HCI_RAW, &hdev->flags);
689
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200690 /* Treat all non BR/EDR controllers as raw devices if
691 enable_hs is not set */
692 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100693 set_bit(HCI_RAW, &hdev->flags);
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 if (hdev->open(hdev)) {
696 ret = -EIO;
697 goto done;
698 }
699
700 if (!test_bit(HCI_RAW, &hdev->flags)) {
701 atomic_set(&hdev->cmd_cnt, 1);
702 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200703 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700704
Marcel Holtmann04837f62006-07-03 10:02:33 +0200705 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300706 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Andre Guedeseead27d2011-06-30 19:20:55 -0300708 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300709 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300710 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300711
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 clear_bit(HCI_INIT, &hdev->flags);
713 }
714
715 if (!ret) {
716 hci_dev_hold(hdev);
717 set_bit(HCI_UP, &hdev->flags);
718 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200719 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300720 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200721 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300722 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200723 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900724 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200726 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200727 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400728 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700729
730 skb_queue_purge(&hdev->cmd_q);
731 skb_queue_purge(&hdev->rx_q);
732
733 if (hdev->flush)
734 hdev->flush(hdev);
735
736 if (hdev->sent_cmd) {
737 kfree_skb(hdev->sent_cmd);
738 hdev->sent_cmd = NULL;
739 }
740
741 hdev->close(hdev);
742 hdev->flags = 0;
743 }
744
745done:
746 hci_req_unlock(hdev);
747 hci_dev_put(hdev);
748 return ret;
749}
750
751static int hci_dev_do_close(struct hci_dev *hdev)
752{
753 BT_DBG("%s %p", hdev->name, hdev);
754
Andre Guedes28b75a82012-02-03 17:48:00 -0300755 cancel_work_sync(&hdev->le_scan);
756
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 hci_req_cancel(hdev, ENODEV);
758 hci_req_lock(hdev);
759
760 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300761 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762 hci_req_unlock(hdev);
763 return 0;
764 }
765
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200766 /* Flush RX and TX works */
767 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400768 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200770 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200771 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200772 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200773 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200774 }
775
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200776 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200777 cancel_delayed_work(&hdev->service_cache);
778
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300779 cancel_delayed_work_sync(&hdev->le_scan_disable);
780
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300781 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782 inquiry_cache_flush(hdev);
783 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300784 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785
786 hci_notify(hdev, HCI_DEV_DOWN);
787
788 if (hdev->flush)
789 hdev->flush(hdev);
790
791 /* Reset device */
792 skb_queue_purge(&hdev->cmd_q);
793 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200794 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200795 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200797 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300798 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 clear_bit(HCI_INIT, &hdev->flags);
800 }
801
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200802 /* flush cmd work */
803 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
805 /* Drop queues */
806 skb_queue_purge(&hdev->rx_q);
807 skb_queue_purge(&hdev->cmd_q);
808 skb_queue_purge(&hdev->raw_q);
809
810 /* Drop last sent command */
811 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300812 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 kfree_skb(hdev->sent_cmd);
814 hdev->sent_cmd = NULL;
815 }
816
817 /* After this point our queues are empty
818 * and no tasks are scheduled. */
819 hdev->close(hdev);
820
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100821 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
822 hci_dev_lock(hdev);
823 mgmt_powered(hdev, 0);
824 hci_dev_unlock(hdev);
825 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 /* Clear flags */
828 hdev->flags = 0;
829
Johan Hedberge59fda82012-02-22 18:11:53 +0200830 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200831 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 hci_req_unlock(hdev);
834
835 hci_dev_put(hdev);
836 return 0;
837}
838
839int hci_dev_close(__u16 dev)
840{
841 struct hci_dev *hdev;
842 int err;
843
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200844 hdev = hci_dev_get(dev);
845 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100847
848 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
849 cancel_delayed_work(&hdev->power_off);
850
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100852
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 hci_dev_put(hdev);
854 return err;
855}
856
857int hci_dev_reset(__u16 dev)
858{
859 struct hci_dev *hdev;
860 int ret = 0;
861
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200862 hdev = hci_dev_get(dev);
863 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 return -ENODEV;
865
866 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 if (!test_bit(HCI_UP, &hdev->flags))
869 goto done;
870
871 /* Drop queues */
872 skb_queue_purge(&hdev->rx_q);
873 skb_queue_purge(&hdev->cmd_q);
874
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300875 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 inquiry_cache_flush(hdev);
877 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300878 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879
880 if (hdev->flush)
881 hdev->flush(hdev);
882
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900883 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300884 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885
886 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200887 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300888 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889
890done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 hci_req_unlock(hdev);
892 hci_dev_put(hdev);
893 return ret;
894}
895
896int hci_dev_reset_stat(__u16 dev)
897{
898 struct hci_dev *hdev;
899 int ret = 0;
900
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200901 hdev = hci_dev_get(dev);
902 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return -ENODEV;
904
905 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
906
907 hci_dev_put(hdev);
908
909 return ret;
910}
911
912int hci_dev_cmd(unsigned int cmd, void __user *arg)
913{
914 struct hci_dev *hdev;
915 struct hci_dev_req dr;
916 int err = 0;
917
918 if (copy_from_user(&dr, arg, sizeof(dr)))
919 return -EFAULT;
920
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200921 hdev = hci_dev_get(dr.dev_id);
922 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 return -ENODEV;
924
925 switch (cmd) {
926 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 break;
930
931 case HCISETENCRYPT:
932 if (!lmp_encrypt_capable(hdev)) {
933 err = -EOPNOTSUPP;
934 break;
935 }
936
937 if (!test_bit(HCI_AUTH, &hdev->flags)) {
938 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200939 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300940 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700941 if (err)
942 break;
943 }
944
Marcel Holtmann04837f62006-07-03 10:02:33 +0200945 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300946 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700947 break;
948
949 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200950 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300951 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 break;
953
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200954 case HCISETLINKPOL:
955 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300956 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200957 break;
958
959 case HCISETLINKMODE:
960 hdev->link_mode = ((__u16) dr.dev_opt) &
961 (HCI_LM_MASTER | HCI_LM_ACCEPT);
962 break;
963
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 case HCISETPTYPE:
965 hdev->pkt_type = (__u16) dr.dev_opt;
966 break;
967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200969 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
970 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 break;
972
973 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200974 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
975 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 break;
977
978 default:
979 err = -EINVAL;
980 break;
981 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200982
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 hci_dev_put(hdev);
984 return err;
985}
986
987int hci_get_dev_list(void __user *arg)
988{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200989 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 struct hci_dev_list_req *dl;
991 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 int n = 0, size, err;
993 __u16 dev_num;
994
995 if (get_user(dev_num, (__u16 __user *) arg))
996 return -EFAULT;
997
998 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
999 return -EINVAL;
1000
1001 size = sizeof(*dl) + dev_num * sizeof(*dr);
1002
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001003 dl = kzalloc(size, GFP_KERNEL);
1004 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 return -ENOMEM;
1006
1007 dr = dl->dev_req;
1008
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001009 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001010 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001011 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001012 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001013
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001014 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1015 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001016
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017 (dr + n)->dev_id = hdev->id;
1018 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001019
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 if (++n >= dev_num)
1021 break;
1022 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001023 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024
1025 dl->dev_num = n;
1026 size = sizeof(*dl) + n * sizeof(*dr);
1027
1028 err = copy_to_user(arg, dl, size);
1029 kfree(dl);
1030
1031 return err ? -EFAULT : 0;
1032}
1033
1034int hci_get_dev_info(void __user *arg)
1035{
1036 struct hci_dev *hdev;
1037 struct hci_dev_info di;
1038 int err = 0;
1039
1040 if (copy_from_user(&di, arg, sizeof(di)))
1041 return -EFAULT;
1042
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001043 hdev = hci_dev_get(di.dev_id);
1044 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001045 return -ENODEV;
1046
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001047 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001048 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001049
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001050 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1051 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001052
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 strcpy(di.name, hdev->name);
1054 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001055 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 di.flags = hdev->flags;
1057 di.pkt_type = hdev->pkt_type;
1058 di.acl_mtu = hdev->acl_mtu;
1059 di.acl_pkts = hdev->acl_pkts;
1060 di.sco_mtu = hdev->sco_mtu;
1061 di.sco_pkts = hdev->sco_pkts;
1062 di.link_policy = hdev->link_policy;
1063 di.link_mode = hdev->link_mode;
1064
1065 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1066 memcpy(&di.features, &hdev->features, sizeof(di.features));
1067
1068 if (copy_to_user(arg, &di, sizeof(di)))
1069 err = -EFAULT;
1070
1071 hci_dev_put(hdev);
1072
1073 return err;
1074}
1075
1076/* ---- Interface to HCI drivers ---- */
1077
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001078static int hci_rfkill_set_block(void *data, bool blocked)
1079{
1080 struct hci_dev *hdev = data;
1081
1082 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1083
1084 if (!blocked)
1085 return 0;
1086
1087 hci_dev_do_close(hdev);
1088
1089 return 0;
1090}
1091
1092static const struct rfkill_ops hci_rfkill_ops = {
1093 .set_block = hci_rfkill_set_block,
1094};
1095
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096static void hci_power_on(struct work_struct *work)
1097{
1098 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1099
1100 BT_DBG("%s", hdev->name);
1101
1102 if (hci_dev_open(hdev->id) < 0)
1103 return;
1104
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001105 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001106 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001107 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001108
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001109 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001110 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001111}
1112
1113static void hci_power_off(struct work_struct *work)
1114{
Johan Hedberg32435532011-11-07 22:16:04 +02001115 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001116 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001117
1118 BT_DBG("%s", hdev->name);
1119
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001120 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001121}
1122
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001123static void hci_discov_off(struct work_struct *work)
1124{
1125 struct hci_dev *hdev;
1126 u8 scan = SCAN_PAGE;
1127
1128 hdev = container_of(work, struct hci_dev, discov_off.work);
1129
1130 BT_DBG("%s", hdev->name);
1131
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001132 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001133
1134 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1135
1136 hdev->discov_timeout = 0;
1137
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001138 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001139}
1140
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001141int hci_uuids_clear(struct hci_dev *hdev)
1142{
1143 struct list_head *p, *n;
1144
1145 list_for_each_safe(p, n, &hdev->uuids) {
1146 struct bt_uuid *uuid;
1147
1148 uuid = list_entry(p, struct bt_uuid, list);
1149
1150 list_del(p);
1151 kfree(uuid);
1152 }
1153
1154 return 0;
1155}
1156
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001157int hci_link_keys_clear(struct hci_dev *hdev)
1158{
1159 struct list_head *p, *n;
1160
1161 list_for_each_safe(p, n, &hdev->link_keys) {
1162 struct link_key *key;
1163
1164 key = list_entry(p, struct link_key, list);
1165
1166 list_del(p);
1167 kfree(key);
1168 }
1169
1170 return 0;
1171}
1172
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001173int hci_smp_ltks_clear(struct hci_dev *hdev)
1174{
1175 struct smp_ltk *k, *tmp;
1176
1177 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1178 list_del(&k->list);
1179 kfree(k);
1180 }
1181
1182 return 0;
1183}
1184
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001185struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1186{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001187 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001188
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001189 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001190 if (bacmp(bdaddr, &k->bdaddr) == 0)
1191 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001192
1193 return NULL;
1194}
1195
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301196static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001197 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001198{
1199 /* Legacy key */
1200 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202
1203 /* Debug keys are insecure so don't store them persistently */
1204 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301205 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001206
1207 /* Changed combination key and there's no previous one */
1208 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301209 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001210
1211 /* Security mode 3 case */
1212 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214
1215 /* Neither local nor remote side had no-bonding as requirement */
1216 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301217 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001218
1219 /* Local side had dedicated bonding as requirement */
1220 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301221 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001222
1223 /* Remote side had dedicated bonding as requirement */
1224 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301225 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001226
1227 /* If none of the above criteria match, then don't store the key
1228 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301229 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001230}
1231
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001232struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001233{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001234 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001235
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001236 list_for_each_entry(k, &hdev->long_term_keys, list) {
1237 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001238 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239 continue;
1240
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001241 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001242 }
1243
1244 return NULL;
1245}
1246EXPORT_SYMBOL(hci_find_ltk);
1247
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001248struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001249 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001250{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001251 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001252
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001253 list_for_each_entry(k, &hdev->long_term_keys, list)
1254 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001255 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001256 return k;
1257
1258 return NULL;
1259}
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001260EXPORT_SYMBOL(hci_find_ltk_by_addr);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001261
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001262int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001263 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001264{
1265 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301266 u8 old_key_type;
1267 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001268
1269 old_key = hci_find_link_key(hdev, bdaddr);
1270 if (old_key) {
1271 old_key_type = old_key->type;
1272 key = old_key;
1273 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001274 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001275 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1276 if (!key)
1277 return -ENOMEM;
1278 list_add(&key->list, &hdev->link_keys);
1279 }
1280
1281 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1282
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001283 /* Some buggy controller combinations generate a changed
1284 * combination key for legacy pairing even when there's no
1285 * previous key */
1286 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001287 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001288 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001289 if (conn)
1290 conn->key_type = type;
1291 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001292
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001294 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001295 key->pin_len = pin_len;
1296
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001297 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001298 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001299 else
1300 key->type = type;
1301
Johan Hedberg4df378a2011-04-28 11:29:03 -07001302 if (!new_key)
1303 return 0;
1304
1305 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1306
Johan Hedberg744cf192011-11-08 20:40:14 +02001307 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001308
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301309 if (conn)
1310 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001311
1312 return 0;
1313}
1314
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001315int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001316 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001317 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001319 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001320
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1322 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1325 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001327 else {
1328 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001329 if (!key)
1330 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001331 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 }
1333
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001334 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001335 key->bdaddr_type = addr_type;
1336 memcpy(key->val, tk, sizeof(key->val));
1337 key->authenticated = authenticated;
1338 key->ediv = ediv;
1339 key->enc_size = enc_size;
1340 key->type = type;
1341 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001342
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001343 if (!new_key)
1344 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001345
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001346 if (type & HCI_SMP_LTK)
1347 mgmt_new_ltk(hdev, key, 1);
1348
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001349 return 0;
1350}
1351
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001352int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1353{
1354 struct link_key *key;
1355
1356 key = hci_find_link_key(hdev, bdaddr);
1357 if (!key)
1358 return -ENOENT;
1359
1360 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1361
1362 list_del(&key->list);
1363 kfree(key);
1364
1365 return 0;
1366}
1367
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001368int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369{
1370 struct smp_ltk *k, *tmp;
1371
1372 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1373 if (bacmp(bdaddr, &k->bdaddr))
1374 continue;
1375
1376 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1377
1378 list_del(&k->list);
1379 kfree(k);
1380 }
1381
1382 return 0;
1383}
1384
Ville Tervo6bd32322011-02-16 16:32:41 +02001385/* HCI command timer function */
1386static void hci_cmd_timer(unsigned long arg)
1387{
1388 struct hci_dev *hdev = (void *) arg;
1389
1390 BT_ERR("%s command tx timeout", hdev->name);
1391 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001392 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001393}
1394
Szymon Janc2763eda2011-03-22 13:12:22 +01001395struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001396 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001397{
1398 struct oob_data *data;
1399
1400 list_for_each_entry(data, &hdev->remote_oob_data, list)
1401 if (bacmp(bdaddr, &data->bdaddr) == 0)
1402 return data;
1403
1404 return NULL;
1405}
1406
1407int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1408{
1409 struct oob_data *data;
1410
1411 data = hci_find_remote_oob_data(hdev, bdaddr);
1412 if (!data)
1413 return -ENOENT;
1414
1415 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1416
1417 list_del(&data->list);
1418 kfree(data);
1419
1420 return 0;
1421}
1422
1423int hci_remote_oob_data_clear(struct hci_dev *hdev)
1424{
1425 struct oob_data *data, *n;
1426
1427 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1428 list_del(&data->list);
1429 kfree(data);
1430 }
1431
1432 return 0;
1433}
1434
1435int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001436 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001437{
1438 struct oob_data *data;
1439
1440 data = hci_find_remote_oob_data(hdev, bdaddr);
1441
1442 if (!data) {
1443 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1444 if (!data)
1445 return -ENOMEM;
1446
1447 bacpy(&data->bdaddr, bdaddr);
1448 list_add(&data->list, &hdev->remote_oob_data);
1449 }
1450
1451 memcpy(data->hash, hash, sizeof(data->hash));
1452 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1453
1454 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1455
1456 return 0;
1457}
1458
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001459struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001460{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001461 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001463 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001464 if (bacmp(bdaddr, &b->bdaddr) == 0)
1465 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466
1467 return NULL;
1468}
1469
1470int hci_blacklist_clear(struct hci_dev *hdev)
1471{
1472 struct list_head *p, *n;
1473
1474 list_for_each_safe(p, n, &hdev->blacklist) {
1475 struct bdaddr_list *b;
1476
1477 b = list_entry(p, struct bdaddr_list, list);
1478
1479 list_del(p);
1480 kfree(b);
1481 }
1482
1483 return 0;
1484}
1485
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001486int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001487{
1488 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001489
1490 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1491 return -EBADF;
1492
Antti Julku5e762442011-08-25 16:48:02 +03001493 if (hci_blacklist_lookup(hdev, bdaddr))
1494 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
1496 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001497 if (!entry)
1498 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 bacpy(&entry->bdaddr, bdaddr);
1501
1502 list_add(&entry->list, &hdev->blacklist);
1503
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001504 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001505}
1506
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001507int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508{
1509 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510
Szymon Janc1ec918c2011-11-16 09:32:21 +01001511 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001512 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513
1514 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001515 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001516 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001517
1518 list_del(&entry->list);
1519 kfree(entry);
1520
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001521 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001522}
1523
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001524static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1525{
1526 struct le_scan_params *param = (struct le_scan_params *) opt;
1527 struct hci_cp_le_set_scan_param cp;
1528
1529 memset(&cp, 0, sizeof(cp));
1530 cp.type = param->type;
1531 cp.interval = cpu_to_le16(param->interval);
1532 cp.window = cpu_to_le16(param->window);
1533
1534 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1535}
1536
1537static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1538{
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 memset(&cp, 0, sizeof(cp));
1542 cp.enable = 1;
1543
1544 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1545}
1546
1547static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001548 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001549{
1550 long timeo = msecs_to_jiffies(3000);
1551 struct le_scan_params param;
1552 int err;
1553
1554 BT_DBG("%s", hdev->name);
1555
1556 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1557 return -EINPROGRESS;
1558
1559 param.type = type;
1560 param.interval = interval;
1561 param.window = window;
1562
1563 hci_req_lock(hdev);
1564
1565 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001566 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001567 if (!err)
1568 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1569
1570 hci_req_unlock(hdev);
1571
1572 if (err < 0)
1573 return err;
1574
1575 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001576 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001577
1578 return 0;
1579}
1580
Andre Guedes7dbfac12012-03-15 16:52:07 -03001581int hci_cancel_le_scan(struct hci_dev *hdev)
1582{
1583 BT_DBG("%s", hdev->name);
1584
1585 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1586 return -EALREADY;
1587
1588 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1589 struct hci_cp_le_set_scan_enable cp;
1590
1591 /* Send HCI command to disable LE Scan */
1592 memset(&cp, 0, sizeof(cp));
1593 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1594 }
1595
1596 return 0;
1597}
1598
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001599static void le_scan_disable_work(struct work_struct *work)
1600{
1601 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001602 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001603 struct hci_cp_le_set_scan_enable cp;
1604
1605 BT_DBG("%s", hdev->name);
1606
1607 memset(&cp, 0, sizeof(cp));
1608
1609 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1610}
1611
Andre Guedes28b75a82012-02-03 17:48:00 -03001612static void le_scan_work(struct work_struct *work)
1613{
1614 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1615 struct le_scan_params *param = &hdev->le_scan_params;
1616
1617 BT_DBG("%s", hdev->name);
1618
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001619 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1620 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001621}
1622
1623int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001624 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001625{
1626 struct le_scan_params *param = &hdev->le_scan_params;
1627
1628 BT_DBG("%s", hdev->name);
1629
1630 if (work_busy(&hdev->le_scan))
1631 return -EINPROGRESS;
1632
1633 param->type = type;
1634 param->interval = interval;
1635 param->window = window;
1636 param->timeout = timeout;
1637
1638 queue_work(system_long_wq, &hdev->le_scan);
1639
1640 return 0;
1641}
1642
David Herrmann9be0dab2012-04-22 14:39:57 +02001643/* Alloc HCI device */
1644struct hci_dev *hci_alloc_dev(void)
1645{
1646 struct hci_dev *hdev;
1647
1648 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1649 if (!hdev)
1650 return NULL;
1651
David Herrmannb1b813d2012-04-22 14:39:58 +02001652 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1653 hdev->esco_type = (ESCO_HV1);
1654 hdev->link_mode = (HCI_LM_ACCEPT);
1655 hdev->io_capability = 0x03; /* No Input No Output */
1656
David Herrmannb1b813d2012-04-22 14:39:58 +02001657 hdev->sniff_max_interval = 800;
1658 hdev->sniff_min_interval = 80;
1659
1660 mutex_init(&hdev->lock);
1661 mutex_init(&hdev->req_lock);
1662
1663 INIT_LIST_HEAD(&hdev->mgmt_pending);
1664 INIT_LIST_HEAD(&hdev->blacklist);
1665 INIT_LIST_HEAD(&hdev->uuids);
1666 INIT_LIST_HEAD(&hdev->link_keys);
1667 INIT_LIST_HEAD(&hdev->long_term_keys);
1668 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001669
1670 INIT_WORK(&hdev->rx_work, hci_rx_work);
1671 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1672 INIT_WORK(&hdev->tx_work, hci_tx_work);
1673 INIT_WORK(&hdev->power_on, hci_power_on);
1674 INIT_WORK(&hdev->le_scan, le_scan_work);
1675
David Herrmannb1b813d2012-04-22 14:39:58 +02001676 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1677 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1678 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1679
David Herrmann9be0dab2012-04-22 14:39:57 +02001680 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001681 skb_queue_head_init(&hdev->rx_q);
1682 skb_queue_head_init(&hdev->cmd_q);
1683 skb_queue_head_init(&hdev->raw_q);
1684
1685 init_waitqueue_head(&hdev->req_wait_q);
1686
1687 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1688
David Herrmannb1b813d2012-04-22 14:39:58 +02001689 hci_init_sysfs(hdev);
1690 discovery_init(hdev);
1691 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001692
1693 return hdev;
1694}
1695EXPORT_SYMBOL(hci_alloc_dev);
1696
1697/* Free HCI device */
1698void hci_free_dev(struct hci_dev *hdev)
1699{
1700 skb_queue_purge(&hdev->driver_init);
1701
1702 /* will free via device release */
1703 put_device(&hdev->dev);
1704}
1705EXPORT_SYMBOL(hci_free_dev);
1706
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707/* Register HCI device */
1708int hci_register_dev(struct hci_dev *hdev)
1709{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001710 struct list_head *head, *p;
David Herrmannb1b813d2012-04-22 14:39:58 +02001711 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712
David Herrmann010666a2012-01-07 15:47:07 +01001713 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 return -EINVAL;
1715
Ulisses Furquimfc507442012-04-18 12:13:04 -03001716 write_lock(&hci_dev_list_lock);
1717
Mat Martineau08add512011-11-02 16:18:36 -07001718 /* Do not allow HCI_AMP devices to register at index 0,
1719 * so the index can be used as the AMP controller ID.
1720 */
1721 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001722 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
1724 /* Find first available device id */
1725 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001726 int nid = list_entry(p, struct hci_dev, list)->id;
1727 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001729 if (nid == id)
1730 id++;
1731 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001733
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734 sprintf(hdev->name, "hci%d", id);
1735 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001736
1737 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1738
Ulisses Furquimfc507442012-04-18 12:13:04 -03001739 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001741 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001743 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001744 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001745 if (!hdev->workqueue) {
1746 error = -ENOMEM;
1747 goto err;
1748 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001749
David Herrmann33ca9542011-10-08 14:58:49 +02001750 error = hci_add_sysfs(hdev);
1751 if (error < 0)
1752 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001754 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001755 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1756 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001757 if (hdev->rfkill) {
1758 if (rfkill_register(hdev->rfkill) < 0) {
1759 rfkill_destroy(hdev->rfkill);
1760 hdev->rfkill = NULL;
1761 }
1762 }
1763
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001764 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1765 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001766 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001767
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001769 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770
1771 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001772
David Herrmann33ca9542011-10-08 14:58:49 +02001773err_wqueue:
1774 destroy_workqueue(hdev->workqueue);
1775err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001776 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001777 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001778 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001779
David Herrmann33ca9542011-10-08 14:58:49 +02001780 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781}
1782EXPORT_SYMBOL(hci_register_dev);
1783
1784/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001785void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786{
Marcel Holtmannef222012007-07-11 06:42:04 +02001787 int i;
1788
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001789 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Johan Hovold94324962012-03-15 14:48:41 +01001791 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1792
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001793 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001795 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001796
1797 hci_dev_do_close(hdev);
1798
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301799 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001800 kfree_skb(hdev->reassembly[i]);
1801
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001802 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001803 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001804 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001805 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001806 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001807 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001808
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001809 /* mgmt_index_removed should take care of emptying the
1810 * pending list */
1811 BUG_ON(!list_empty(&hdev->mgmt_pending));
1812
Linus Torvalds1da177e2005-04-16 15:20:36 -07001813 hci_notify(hdev, HCI_DEV_UNREG);
1814
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001815 if (hdev->rfkill) {
1816 rfkill_unregister(hdev->rfkill);
1817 rfkill_destroy(hdev->rfkill);
1818 }
1819
David Herrmannce242972011-10-08 14:58:48 +02001820 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001821
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001822 destroy_workqueue(hdev->workqueue);
1823
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001824 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001825 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001826 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001827 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001828 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001829 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001830 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001831
David Herrmanndc946bd2012-01-07 15:47:24 +01001832 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
1834EXPORT_SYMBOL(hci_unregister_dev);
1835
1836/* Suspend HCI device */
1837int hci_suspend_dev(struct hci_dev *hdev)
1838{
1839 hci_notify(hdev, HCI_DEV_SUSPEND);
1840 return 0;
1841}
1842EXPORT_SYMBOL(hci_suspend_dev);
1843
1844/* Resume HCI device */
1845int hci_resume_dev(struct hci_dev *hdev)
1846{
1847 hci_notify(hdev, HCI_DEV_RESUME);
1848 return 0;
1849}
1850EXPORT_SYMBOL(hci_resume_dev);
1851
Marcel Holtmann76bca882009-11-18 00:40:39 +01001852/* Receive frame from HCI drivers */
1853int hci_recv_frame(struct sk_buff *skb)
1854{
1855 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1856 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001857 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001858 kfree_skb(skb);
1859 return -ENXIO;
1860 }
1861
1862 /* Incomming skb */
1863 bt_cb(skb)->incoming = 1;
1864
1865 /* Time stamp */
1866 __net_timestamp(skb);
1867
Marcel Holtmann76bca882009-11-18 00:40:39 +01001868 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001869 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001870
Marcel Holtmann76bca882009-11-18 00:40:39 +01001871 return 0;
1872}
1873EXPORT_SYMBOL(hci_recv_frame);
1874
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301875static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001876 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301877{
1878 int len = 0;
1879 int hlen = 0;
1880 int remain = count;
1881 struct sk_buff *skb;
1882 struct bt_skb_cb *scb;
1883
1884 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001885 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301886 return -EILSEQ;
1887
1888 skb = hdev->reassembly[index];
1889
1890 if (!skb) {
1891 switch (type) {
1892 case HCI_ACLDATA_PKT:
1893 len = HCI_MAX_FRAME_SIZE;
1894 hlen = HCI_ACL_HDR_SIZE;
1895 break;
1896 case HCI_EVENT_PKT:
1897 len = HCI_MAX_EVENT_SIZE;
1898 hlen = HCI_EVENT_HDR_SIZE;
1899 break;
1900 case HCI_SCODATA_PKT:
1901 len = HCI_MAX_SCO_SIZE;
1902 hlen = HCI_SCO_HDR_SIZE;
1903 break;
1904 }
1905
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001906 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301907 if (!skb)
1908 return -ENOMEM;
1909
1910 scb = (void *) skb->cb;
1911 scb->expect = hlen;
1912 scb->pkt_type = type;
1913
1914 skb->dev = (void *) hdev;
1915 hdev->reassembly[index] = skb;
1916 }
1917
1918 while (count) {
1919 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001920 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301921
1922 memcpy(skb_put(skb, len), data, len);
1923
1924 count -= len;
1925 data += len;
1926 scb->expect -= len;
1927 remain = count;
1928
1929 switch (type) {
1930 case HCI_EVENT_PKT:
1931 if (skb->len == HCI_EVENT_HDR_SIZE) {
1932 struct hci_event_hdr *h = hci_event_hdr(skb);
1933 scb->expect = h->plen;
1934
1935 if (skb_tailroom(skb) < scb->expect) {
1936 kfree_skb(skb);
1937 hdev->reassembly[index] = NULL;
1938 return -ENOMEM;
1939 }
1940 }
1941 break;
1942
1943 case HCI_ACLDATA_PKT:
1944 if (skb->len == HCI_ACL_HDR_SIZE) {
1945 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1946 scb->expect = __le16_to_cpu(h->dlen);
1947
1948 if (skb_tailroom(skb) < scb->expect) {
1949 kfree_skb(skb);
1950 hdev->reassembly[index] = NULL;
1951 return -ENOMEM;
1952 }
1953 }
1954 break;
1955
1956 case HCI_SCODATA_PKT:
1957 if (skb->len == HCI_SCO_HDR_SIZE) {
1958 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1959 scb->expect = h->dlen;
1960
1961 if (skb_tailroom(skb) < scb->expect) {
1962 kfree_skb(skb);
1963 hdev->reassembly[index] = NULL;
1964 return -ENOMEM;
1965 }
1966 }
1967 break;
1968 }
1969
1970 if (scb->expect == 0) {
1971 /* Complete frame */
1972
1973 bt_cb(skb)->pkt_type = type;
1974 hci_recv_frame(skb);
1975
1976 hdev->reassembly[index] = NULL;
1977 return remain;
1978 }
1979 }
1980
1981 return remain;
1982}
1983
Marcel Holtmannef222012007-07-11 06:42:04 +02001984int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1985{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301986 int rem = 0;
1987
Marcel Holtmannef222012007-07-11 06:42:04 +02001988 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1989 return -EILSEQ;
1990
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001991 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001992 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301993 if (rem < 0)
1994 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001995
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301996 data += (count - rem);
1997 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001998 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001999
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302000 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002001}
2002EXPORT_SYMBOL(hci_recv_fragment);
2003
Suraj Sumangala99811512010-07-14 13:02:19 +05302004#define STREAM_REASSEMBLY 0
2005
2006int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2007{
2008 int type;
2009 int rem = 0;
2010
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002011 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302012 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2013
2014 if (!skb) {
2015 struct { char type; } *pkt;
2016
2017 /* Start of the frame */
2018 pkt = data;
2019 type = pkt->type;
2020
2021 data++;
2022 count--;
2023 } else
2024 type = bt_cb(skb)->pkt_type;
2025
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002026 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002027 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302028 if (rem < 0)
2029 return rem;
2030
2031 data += (count - rem);
2032 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002033 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302034
2035 return rem;
2036}
2037EXPORT_SYMBOL(hci_recv_stream_fragment);
2038
Linus Torvalds1da177e2005-04-16 15:20:36 -07002039/* ---- Interface to upper protocols ---- */
2040
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041int hci_register_cb(struct hci_cb *cb)
2042{
2043 BT_DBG("%p name %s", cb, cb->name);
2044
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002045 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002047 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048
2049 return 0;
2050}
2051EXPORT_SYMBOL(hci_register_cb);
2052
2053int hci_unregister_cb(struct hci_cb *cb)
2054{
2055 BT_DBG("%p name %s", cb, cb->name);
2056
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002057 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002058 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002059 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 return 0;
2062}
2063EXPORT_SYMBOL(hci_unregister_cb);
2064
2065static int hci_send_frame(struct sk_buff *skb)
2066{
2067 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2068
2069 if (!hdev) {
2070 kfree_skb(skb);
2071 return -ENODEV;
2072 }
2073
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002074 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002076 /* Time stamp */
2077 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002079 /* Send copy to monitor */
2080 hci_send_to_monitor(hdev, skb);
2081
2082 if (atomic_read(&hdev->promisc)) {
2083 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002084 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085 }
2086
2087 /* Get rid of skb owner, prior to sending to the driver. */
2088 skb_orphan(skb);
2089
2090 return hdev->send(skb);
2091}
2092
2093/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002094int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095{
2096 int len = HCI_COMMAND_HDR_SIZE + plen;
2097 struct hci_command_hdr *hdr;
2098 struct sk_buff *skb;
2099
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002100 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 skb = bt_skb_alloc(len, GFP_ATOMIC);
2103 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002104 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 return -ENOMEM;
2106 }
2107
2108 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002109 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 hdr->plen = plen;
2111
2112 if (plen)
2113 memcpy(skb_put(skb, plen), param, plen);
2114
2115 BT_DBG("skb len %d", skb->len);
2116
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002117 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002119
Johan Hedberga5040ef2011-01-10 13:28:59 +02002120 if (test_bit(HCI_INIT, &hdev->flags))
2121 hdev->init_last_cmd = opcode;
2122
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002124 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 return 0;
2127}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002130void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131{
2132 struct hci_command_hdr *hdr;
2133
2134 if (!hdev->sent_cmd)
2135 return NULL;
2136
2137 hdr = (void *) hdev->sent_cmd->data;
2138
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002139 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 return NULL;
2141
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002142 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002143
2144 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2145}
2146
2147/* Send ACL data */
2148static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2149{
2150 struct hci_acl_hdr *hdr;
2151 int len = skb->len;
2152
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002153 skb_push(skb, HCI_ACL_HDR_SIZE);
2154 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002155 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002156 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2157 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158}
2159
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002160static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002161 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162{
2163 struct hci_dev *hdev = conn->hdev;
2164 struct sk_buff *list;
2165
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002166 skb->len = skb_headlen(skb);
2167 skb->data_len = 0;
2168
2169 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2170 hci_add_acl_hdr(skb, conn->handle, flags);
2171
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002172 list = skb_shinfo(skb)->frag_list;
2173 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 /* Non fragmented */
2175 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2176
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002177 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 } else {
2179 /* Fragmented */
2180 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2181
2182 skb_shinfo(skb)->frag_list = NULL;
2183
2184 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002185 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002187 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002188
2189 flags &= ~ACL_START;
2190 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 do {
2192 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002195 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002196 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197
2198 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2199
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002200 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201 } while (list);
2202
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002203 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002204 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002205}
2206
2207void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2208{
2209 struct hci_conn *conn = chan->conn;
2210 struct hci_dev *hdev = conn->hdev;
2211
2212 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2213
2214 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002215
2216 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002218 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219}
2220EXPORT_SYMBOL(hci_send_acl);
2221
2222/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002223void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224{
2225 struct hci_dev *hdev = conn->hdev;
2226 struct hci_sco_hdr hdr;
2227
2228 BT_DBG("%s len %d", hdev->name, skb->len);
2229
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002230 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 hdr.dlen = skb->len;
2232
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002233 skb_push(skb, HCI_SCO_HDR_SIZE);
2234 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002235 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
2237 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002238 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002239
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002241 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242}
2243EXPORT_SYMBOL(hci_send_sco);
2244
2245/* ---- HCI TX task (outgoing data) ---- */
2246
2247/* HCI Connection scheduler */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002248static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2249 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250{
2251 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002252 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002253 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002255 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002257
2258 rcu_read_lock();
2259
2260 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002261 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002263
2264 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2265 continue;
2266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 num++;
2268
2269 if (c->sent < min) {
2270 min = c->sent;
2271 conn = c;
2272 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002273
2274 if (hci_conn_num(hdev, type) == num)
2275 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002276 }
2277
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002278 rcu_read_unlock();
2279
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002281 int cnt, q;
2282
2283 switch (conn->type) {
2284 case ACL_LINK:
2285 cnt = hdev->acl_cnt;
2286 break;
2287 case SCO_LINK:
2288 case ESCO_LINK:
2289 cnt = hdev->sco_cnt;
2290 break;
2291 case LE_LINK:
2292 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2293 break;
2294 default:
2295 cnt = 0;
2296 BT_ERR("Unknown link type");
2297 }
2298
2299 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300 *quote = q ? q : 1;
2301 } else
2302 *quote = 0;
2303
2304 BT_DBG("conn %p quote %d", conn, *quote);
2305 return conn;
2306}
2307
Ville Tervobae1f5d92011-02-10 22:38:53 -03002308static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
2310 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002311 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312
Ville Tervobae1f5d92011-02-10 22:38:53 -03002313 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002315 rcu_read_lock();
2316
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002318 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002319 if (c->type == type && c->sent) {
2320 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002321 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002322 hci_acl_disconn(c, 0x13);
2323 }
2324 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002325
2326 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327}
2328
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002329static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002330 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002331{
2332 struct hci_conn_hash *h = &hdev->conn_hash;
2333 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002334 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002335 struct hci_conn *conn;
2336 int cnt, q, conn_num = 0;
2337
2338 BT_DBG("%s", hdev->name);
2339
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002340 rcu_read_lock();
2341
2342 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002343 struct hci_chan *tmp;
2344
2345 if (conn->type != type)
2346 continue;
2347
2348 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2349 continue;
2350
2351 conn_num++;
2352
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002353 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002354 struct sk_buff *skb;
2355
2356 if (skb_queue_empty(&tmp->data_q))
2357 continue;
2358
2359 skb = skb_peek(&tmp->data_q);
2360 if (skb->priority < cur_prio)
2361 continue;
2362
2363 if (skb->priority > cur_prio) {
2364 num = 0;
2365 min = ~0;
2366 cur_prio = skb->priority;
2367 }
2368
2369 num++;
2370
2371 if (conn->sent < min) {
2372 min = conn->sent;
2373 chan = tmp;
2374 }
2375 }
2376
2377 if (hci_conn_num(hdev, type) == conn_num)
2378 break;
2379 }
2380
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002381 rcu_read_unlock();
2382
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002383 if (!chan)
2384 return NULL;
2385
2386 switch (chan->conn->type) {
2387 case ACL_LINK:
2388 cnt = hdev->acl_cnt;
2389 break;
2390 case SCO_LINK:
2391 case ESCO_LINK:
2392 cnt = hdev->sco_cnt;
2393 break;
2394 case LE_LINK:
2395 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2396 break;
2397 default:
2398 cnt = 0;
2399 BT_ERR("Unknown link type");
2400 }
2401
2402 q = cnt / num;
2403 *quote = q ? q : 1;
2404 BT_DBG("chan %p quote %d", chan, *quote);
2405 return chan;
2406}
2407
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002408static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2409{
2410 struct hci_conn_hash *h = &hdev->conn_hash;
2411 struct hci_conn *conn;
2412 int num = 0;
2413
2414 BT_DBG("%s", hdev->name);
2415
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002416 rcu_read_lock();
2417
2418 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002419 struct hci_chan *chan;
2420
2421 if (conn->type != type)
2422 continue;
2423
2424 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2425 continue;
2426
2427 num++;
2428
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002429 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002430 struct sk_buff *skb;
2431
2432 if (chan->sent) {
2433 chan->sent = 0;
2434 continue;
2435 }
2436
2437 if (skb_queue_empty(&chan->data_q))
2438 continue;
2439
2440 skb = skb_peek(&chan->data_q);
2441 if (skb->priority >= HCI_PRIO_MAX - 1)
2442 continue;
2443
2444 skb->priority = HCI_PRIO_MAX - 1;
2445
2446 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002447 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002448 }
2449
2450 if (hci_conn_num(hdev, type) == num)
2451 break;
2452 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002453
2454 rcu_read_unlock();
2455
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002456}
2457
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002458static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2459{
2460 /* Calculate count of blocks used by this packet */
2461 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2462}
2463
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002464static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 if (!test_bit(HCI_RAW, &hdev->flags)) {
2467 /* ACL tx timeout must be longer than maximum
2468 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002469 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002470 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002471 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002472 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002473}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002475static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2476{
2477 unsigned int cnt = hdev->acl_cnt;
2478 struct hci_chan *chan;
2479 struct sk_buff *skb;
2480 int quote;
2481
2482 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002483
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002484 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002485 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002486 u32 priority = (skb_peek(&chan->data_q))->priority;
2487 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002488 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002489 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002490
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002491 /* Stop if priority has changed */
2492 if (skb->priority < priority)
2493 break;
2494
2495 skb = skb_dequeue(&chan->data_q);
2496
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002497 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002498 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002499
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 hci_send_frame(skb);
2501 hdev->acl_last_tx = jiffies;
2502
2503 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002504 chan->sent++;
2505 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506 }
2507 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002508
2509 if (cnt != hdev->acl_cnt)
2510 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002511}
2512
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002513static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2514{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002515 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002516 struct hci_chan *chan;
2517 struct sk_buff *skb;
2518 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002519
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002520 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002521
2522 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002523 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002524 u32 priority = (skb_peek(&chan->data_q))->priority;
2525 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2526 int blocks;
2527
2528 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002529 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002530
2531 /* Stop if priority has changed */
2532 if (skb->priority < priority)
2533 break;
2534
2535 skb = skb_dequeue(&chan->data_q);
2536
2537 blocks = __get_blocks(hdev, skb);
2538 if (blocks > hdev->block_cnt)
2539 return;
2540
2541 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002542 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002543
2544 hci_send_frame(skb);
2545 hdev->acl_last_tx = jiffies;
2546
2547 hdev->block_cnt -= blocks;
2548 quote -= blocks;
2549
2550 chan->sent += blocks;
2551 chan->conn->sent += blocks;
2552 }
2553 }
2554
2555 if (cnt != hdev->block_cnt)
2556 hci_prio_recalculate(hdev, ACL_LINK);
2557}
2558
2559static inline void hci_sched_acl(struct hci_dev *hdev)
2560{
2561 BT_DBG("%s", hdev->name);
2562
2563 if (!hci_conn_num(hdev, ACL_LINK))
2564 return;
2565
2566 switch (hdev->flow_ctl_mode) {
2567 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2568 hci_sched_acl_pkt(hdev);
2569 break;
2570
2571 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2572 hci_sched_acl_blk(hdev);
2573 break;
2574 }
2575}
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577/* Schedule SCO */
2578static inline void hci_sched_sco(struct hci_dev *hdev)
2579{
2580 struct hci_conn *conn;
2581 struct sk_buff *skb;
2582 int quote;
2583
2584 BT_DBG("%s", hdev->name);
2585
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002586 if (!hci_conn_num(hdev, SCO_LINK))
2587 return;
2588
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2590 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2591 BT_DBG("skb %p len %d", skb, skb->len);
2592 hci_send_frame(skb);
2593
2594 conn->sent++;
2595 if (conn->sent == ~0)
2596 conn->sent = 0;
2597 }
2598 }
2599}
2600
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002601static inline void hci_sched_esco(struct hci_dev *hdev)
2602{
2603 struct hci_conn *conn;
2604 struct sk_buff *skb;
2605 int quote;
2606
2607 BT_DBG("%s", hdev->name);
2608
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002609 if (!hci_conn_num(hdev, ESCO_LINK))
2610 return;
2611
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002612 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2613 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2614 BT_DBG("skb %p len %d", skb, skb->len);
2615 hci_send_frame(skb);
2616
2617 conn->sent++;
2618 if (conn->sent == ~0)
2619 conn->sent = 0;
2620 }
2621 }
2622}
2623
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002624static inline void hci_sched_le(struct hci_dev *hdev)
2625{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002626 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002627 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002628 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002629
2630 BT_DBG("%s", hdev->name);
2631
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002632 if (!hci_conn_num(hdev, LE_LINK))
2633 return;
2634
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002635 if (!test_bit(HCI_RAW, &hdev->flags)) {
2636 /* LE tx timeout must be longer than maximum
2637 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002638 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002639 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002640 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002641 }
2642
2643 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002644 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002645 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002646 u32 priority = (skb_peek(&chan->data_q))->priority;
2647 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002648 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002649 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002650
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002651 /* Stop if priority has changed */
2652 if (skb->priority < priority)
2653 break;
2654
2655 skb = skb_dequeue(&chan->data_q);
2656
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657 hci_send_frame(skb);
2658 hdev->le_last_tx = jiffies;
2659
2660 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002661 chan->sent++;
2662 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002663 }
2664 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002665
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002666 if (hdev->le_pkts)
2667 hdev->le_cnt = cnt;
2668 else
2669 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002670
2671 if (cnt != tmp)
2672 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002673}
2674
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002675static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002677 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 struct sk_buff *skb;
2679
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002680 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002681 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682
2683 /* Schedule queues and send stuff to HCI driver */
2684
2685 hci_sched_acl(hdev);
2686
2687 hci_sched_sco(hdev);
2688
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002689 hci_sched_esco(hdev);
2690
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002691 hci_sched_le(hdev);
2692
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693 /* Send next queued raw (unknown type) packet */
2694 while ((skb = skb_dequeue(&hdev->raw_q)))
2695 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696}
2697
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002698/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002699
2700/* ACL data packet */
2701static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2702{
2703 struct hci_acl_hdr *hdr = (void *) skb->data;
2704 struct hci_conn *conn;
2705 __u16 handle, flags;
2706
2707 skb_pull(skb, HCI_ACL_HDR_SIZE);
2708
2709 handle = __le16_to_cpu(hdr->handle);
2710 flags = hci_flags(handle);
2711 handle = hci_handle(handle);
2712
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002713 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2714 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715
2716 hdev->stat.acl_rx++;
2717
2718 hci_dev_lock(hdev);
2719 conn = hci_conn_hash_lookup_handle(hdev, handle);
2720 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002723 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002724
Johan Hedberg671267b2012-05-12 16:11:50 -03002725 hci_dev_lock(hdev);
2726 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2727 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2728 mgmt_device_connected(hdev, &conn->dst, conn->type,
2729 conn->dst_type, 0, NULL, 0,
2730 conn->dev_class);
2731 hci_dev_unlock(hdev);
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002734 l2cap_recv_acldata(conn, skb, flags);
2735 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002736 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002737 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002738 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 }
2740
2741 kfree_skb(skb);
2742}
2743
2744/* SCO data packet */
2745static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2746{
2747 struct hci_sco_hdr *hdr = (void *) skb->data;
2748 struct hci_conn *conn;
2749 __u16 handle;
2750
2751 skb_pull(skb, HCI_SCO_HDR_SIZE);
2752
2753 handle = __le16_to_cpu(hdr->handle);
2754
2755 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2756
2757 hdev->stat.sco_rx++;
2758
2759 hci_dev_lock(hdev);
2760 conn = hci_conn_hash_lookup_handle(hdev, handle);
2761 hci_dev_unlock(hdev);
2762
2763 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002765 sco_recv_scodata(conn, skb);
2766 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002768 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002769 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 }
2771
2772 kfree_skb(skb);
2773}
2774
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002775static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002777 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 struct sk_buff *skb;
2779
2780 BT_DBG("%s", hdev->name);
2781
Linus Torvalds1da177e2005-04-16 15:20:36 -07002782 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002783 /* Send copy to monitor */
2784 hci_send_to_monitor(hdev, skb);
2785
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 if (atomic_read(&hdev->promisc)) {
2787 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002788 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
2790
2791 if (test_bit(HCI_RAW, &hdev->flags)) {
2792 kfree_skb(skb);
2793 continue;
2794 }
2795
2796 if (test_bit(HCI_INIT, &hdev->flags)) {
2797 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002798 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 case HCI_ACLDATA_PKT:
2800 case HCI_SCODATA_PKT:
2801 kfree_skb(skb);
2802 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002803 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 }
2805
2806 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002807 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002809 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810 hci_event_packet(hdev, skb);
2811 break;
2812
2813 case HCI_ACLDATA_PKT:
2814 BT_DBG("%s ACL data packet", hdev->name);
2815 hci_acldata_packet(hdev, skb);
2816 break;
2817
2818 case HCI_SCODATA_PKT:
2819 BT_DBG("%s SCO data packet", hdev->name);
2820 hci_scodata_packet(hdev, skb);
2821 break;
2822
2823 default:
2824 kfree_skb(skb);
2825 break;
2826 }
2827 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828}
2829
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002830static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002832 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 struct sk_buff *skb;
2834
2835 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2836
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002838 if (atomic_read(&hdev->cmd_cnt)) {
2839 skb = skb_dequeue(&hdev->cmd_q);
2840 if (!skb)
2841 return;
2842
Wei Yongjun7585b972009-02-25 18:29:52 +08002843 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002845 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2846 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 atomic_dec(&hdev->cmd_cnt);
2848 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002849 if (test_bit(HCI_RESET, &hdev->flags))
2850 del_timer(&hdev->cmd_timer);
2851 else
2852 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002853 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854 } else {
2855 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002856 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 }
2858 }
2859}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002860
2861int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2862{
2863 /* General inquiry access code (GIAC) */
2864 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2865 struct hci_cp_inquiry cp;
2866
2867 BT_DBG("%s", hdev->name);
2868
2869 if (test_bit(HCI_INQUIRY, &hdev->flags))
2870 return -EINPROGRESS;
2871
Johan Hedberg46632622012-01-02 16:06:08 +02002872 inquiry_cache_flush(hdev);
2873
Andre Guedes2519a1f2011-11-07 11:45:24 -03002874 memset(&cp, 0, sizeof(cp));
2875 memcpy(&cp.lap, lap, sizeof(cp.lap));
2876 cp.length = length;
2877
2878 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2879}
Andre Guedes023d50492011-11-04 14:16:52 -03002880
2881int hci_cancel_inquiry(struct hci_dev *hdev)
2882{
2883 BT_DBG("%s", hdev->name);
2884
2885 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002886 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002887
2888 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2889}
Andre Guedes31f79562012-04-24 21:02:53 -03002890
2891u8 bdaddr_to_le(u8 bdaddr_type)
2892{
2893 switch (bdaddr_type) {
2894 case BDADDR_LE_PUBLIC:
2895 return ADDR_LE_DEV_PUBLIC;
2896
2897 default:
2898 /* Fallback to LE Random address type */
2899 return ADDR_LE_DEV_RANDOM;
2900 }
2901}