blob: 73b459658ccc24c014921a16118a0cb17ceb40dc [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Marcel Holtmann611b30f2009-06-08 14:41:38 +020030#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
32#include <net/bluetooth/bluetooth.h>
33#include <net/bluetooth/hci_core.h>
34
Johan Hedbergab81cbf2010-12-15 13:53:18 +020035#define AUTO_OFF_TIMEOUT 2000
36
Marcel Holtmannb78752c2010-08-08 23:06:53 -040037static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020038static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020039static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Linus Torvalds1da177e2005-04-16 15:20:36 -070041/* HCI device list */
42LIST_HEAD(hci_dev_list);
43DEFINE_RWLOCK(hci_dev_list_lock);
44
45/* HCI callback list */
46LIST_HEAD(hci_cb_list);
47DEFINE_RWLOCK(hci_cb_list_lock);
48
Linus Torvalds1da177e2005-04-16 15:20:36 -070049/* ---- HCI notifications ---- */
50
Marcel Holtmann65164552005-10-28 19:20:48 +020051static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070052{
Marcel Holtmann040030e2012-02-20 14:50:37 +010053 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070054}
55
56/* ---- HCI requests ---- */
57
Johan Hedberg23bb5762010-12-21 23:01:27 +020058void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070059{
Johan Hedberg23bb5762010-12-21 23:01:27 +020060 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
61
Johan Hedberga5040ef2011-01-10 13:28:59 +020062 /* If this is the init phase check if the completed command matches
63 * the last init command, and if not just return.
64 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020065 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
66 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020067 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020068 struct sk_buff *skb;
69
70 /* Some CSR based controllers generate a spontaneous
71 * reset complete event during init and any pending
72 * command will never be completed. In such a case we
73 * need to resend whatever was the last sent
74 * command.
75 */
76
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020077 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020078 return;
79
80 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
81 if (skb) {
82 skb_queue_head(&hdev->cmd_q, skb);
83 queue_work(hdev->workqueue, &hdev->cmd_work);
84 }
85
Johan Hedberg23bb5762010-12-21 23:01:27 +020086 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020087 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89 if (hdev->req_status == HCI_REQ_PEND) {
90 hdev->req_result = result;
91 hdev->req_status = HCI_REQ_DONE;
92 wake_up_interruptible(&hdev->req_wait_q);
93 }
94}
95
96static void hci_req_cancel(struct hci_dev *hdev, int err)
97{
98 BT_DBG("%s err 0x%2.2x", hdev->name, err);
99
100 if (hdev->req_status == HCI_REQ_PEND) {
101 hdev->req_result = err;
102 hdev->req_status = HCI_REQ_CANCELED;
103 wake_up_interruptible(&hdev->req_wait_q);
104 }
105}
106
107/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300108static int __hci_request(struct hci_dev *hdev,
109 void (*req)(struct hci_dev *hdev, unsigned long opt),
110 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
112 DECLARE_WAITQUEUE(wait, current);
113 int err = 0;
114
115 BT_DBG("%s start", hdev->name);
116
117 hdev->req_status = HCI_REQ_PEND;
118
119 add_wait_queue(&hdev->req_wait_q, &wait);
120 set_current_state(TASK_INTERRUPTIBLE);
121
122 req(hdev, opt);
123 schedule_timeout(timeout);
124
125 remove_wait_queue(&hdev->req_wait_q, &wait);
126
127 if (signal_pending(current))
128 return -EINTR;
129
130 switch (hdev->req_status) {
131 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700132 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 break;
134
135 case HCI_REQ_CANCELED:
136 err = -hdev->req_result;
137 break;
138
139 default:
140 err = -ETIMEDOUT;
141 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700142 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143
Johan Hedberga5040ef2011-01-10 13:28:59 +0200144 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 BT_DBG("%s end: err %d", hdev->name, err);
147
148 return err;
149}
150
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300151static int hci_request(struct hci_dev *hdev,
152 void (*req)(struct hci_dev *hdev, unsigned long opt),
153 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{
155 int ret;
156
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200157 if (!test_bit(HCI_UP, &hdev->flags))
158 return -ENETDOWN;
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /* Serialize all requests */
161 hci_req_lock(hdev);
162 ret = __hci_request(hdev, req, opt, timeout);
163 hci_req_unlock(hdev);
164
165 return ret;
166}
167
168static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
169{
170 BT_DBG("%s %ld", hdev->name, opt);
171
172 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300173 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200174 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175}
176
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200177static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200179 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800180 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200181 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200183 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 /* Mandatory initialization */
186
187 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200188 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300191 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200196 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200197 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200200 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200203 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
204
205 /* Read Class of Device */
206 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
207
208 /* Read Local Name */
209 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200212 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
214 /* Optional initialization */
215
216 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200217 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300221 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200223
224 bacpy(&cp.bdaddr, BDADDR_ANY);
225 cp.delete_all = 1;
226 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229static void amp_init(struct hci_dev *hdev)
230{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200231 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200233 /* Reset */
234 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
235
236 /* Read Local Version */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300238
239 /* Read Local AMP Info */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200241}
242
243static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
244{
245 struct sk_buff *skb;
246
247 BT_DBG("%s %ld", hdev->name, opt);
248
249 /* Driver initialization */
250
251 /* Special commands */
252 while ((skb = skb_dequeue(&hdev->driver_init))) {
253 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
254 skb->dev = (void *) hdev;
255
256 skb_queue_tail(&hdev->cmd_q, skb);
257 queue_work(hdev->workqueue, &hdev->cmd_work);
258 }
259 skb_queue_purge(&hdev->driver_init);
260
261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
274
275}
276
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300277static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
278{
279 BT_DBG("%s", hdev->name);
280
281 /* Read LE buffer size */
282 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
283}
284
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
286{
287 __u8 scan = opt;
288
289 BT_DBG("%s %x", hdev->name, scan);
290
291 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200292 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293}
294
295static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
296{
297 __u8 auth = opt;
298
299 BT_DBG("%s %x", hdev->name, auth);
300
301 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200302 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303}
304
305static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
306{
307 __u8 encrypt = opt;
308
309 BT_DBG("%s %x", hdev->name, encrypt);
310
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200311 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200312 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313}
314
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
316{
317 __le16 policy = cpu_to_le16(opt);
318
Marcel Holtmanna418b892008-11-30 12:17:28 +0100319 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200320
321 /* Default link policy */
322 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
323}
324
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900325/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 * Device is held on return. */
327struct hci_dev *hci_dev_get(int index)
328{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200329 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
331 BT_DBG("%d", index);
332
333 if (index < 0)
334 return NULL;
335
336 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200337 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 if (d->id == index) {
339 hdev = hci_dev_hold(d);
340 break;
341 }
342 }
343 read_unlock(&hci_dev_list_lock);
344 return hdev;
345}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346
347/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200348
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200349bool hci_discovery_active(struct hci_dev *hdev)
350{
351 struct discovery_state *discov = &hdev->discovery;
352
Andre Guedes6fbe1952012-02-03 17:47:58 -0300353 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300354 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300355 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200356 return true;
357
Andre Guedes6fbe1952012-02-03 17:47:58 -0300358 default:
359 return false;
360 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200361}
362
Johan Hedbergff9ef572012-01-04 14:23:45 +0200363void hci_discovery_set_state(struct hci_dev *hdev, int state)
364{
365 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
366
367 if (hdev->discovery.state == state)
368 return;
369
370 switch (state) {
371 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300372 if (hdev->discovery.state != DISCOVERY_STARTING)
373 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200374 break;
375 case DISCOVERY_STARTING:
376 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300377 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 mgmt_discovering(hdev, 1);
379 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200380 case DISCOVERY_RESOLVING:
381 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382 case DISCOVERY_STOPPING:
383 break;
384 }
385
386 hdev->discovery.state = state;
387}
388
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389static void inquiry_cache_flush(struct hci_dev *hdev)
390{
Johan Hedberg30883512012-01-04 14:16:21 +0200391 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200392 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393
Johan Hedberg561aafb2012-01-04 13:31:59 +0200394 list_for_each_entry_safe(p, n, &cache->all, all) {
395 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200396 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200398
399 INIT_LIST_HEAD(&cache->unknown);
400 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300403struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
404 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Johan Hedberg30883512012-01-04 14:16:21 +0200406 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407 struct inquiry_entry *e;
408
409 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
410
Johan Hedberg561aafb2012-01-04 13:31:59 +0200411 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200413 return e;
414 }
415
416 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417}
418
Johan Hedberg561aafb2012-01-04 13:31:59 +0200419struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300420 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200421{
Johan Hedberg30883512012-01-04 14:16:21 +0200422 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423 struct inquiry_entry *e;
424
425 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
426
427 list_for_each_entry(e, &cache->unknown, list) {
428 if (!bacmp(&e->data.bdaddr, bdaddr))
429 return e;
430 }
431
432 return NULL;
433}
434
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200435struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300436 bdaddr_t *bdaddr,
437 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200438{
439 struct discovery_state *cache = &hdev->discovery;
440 struct inquiry_entry *e;
441
442 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
443
444 list_for_each_entry(e, &cache->resolve, list) {
445 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
446 return e;
447 if (!bacmp(&e->data.bdaddr, bdaddr))
448 return e;
449 }
450
451 return NULL;
452}
453
Johan Hedberga3d4e202012-01-09 00:53:02 +0200454void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300455 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200456{
457 struct discovery_state *cache = &hdev->discovery;
458 struct list_head *pos = &cache->resolve;
459 struct inquiry_entry *p;
460
461 list_del(&ie->list);
462
463 list_for_each_entry(p, &cache->resolve, list) {
464 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300465 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200466 break;
467 pos = &p->list;
468 }
469
470 list_add(&ie->list, pos);
471}
472
Johan Hedberg31754052012-01-04 13:39:52 +0200473bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300474 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475{
Johan Hedberg30883512012-01-04 14:16:21 +0200476 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200477 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
480
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200481 if (ssp)
482 *ssp = data->ssp_mode;
483
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200484 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200485 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200486 if (ie->data.ssp_mode && ssp)
487 *ssp = true;
488
Johan Hedberga3d4e202012-01-09 00:53:02 +0200489 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300490 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
493 }
494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200496 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200497
Johan Hedberg561aafb2012-01-04 13:31:59 +0200498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200501 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502
503 list_add(&ie->all, &cache->all);
504
505 if (name_known) {
506 ie->name_state = NAME_KNOWN;
507 } else {
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
510 }
511
512update:
513 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300514 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200515 ie->name_state = NAME_KNOWN;
516 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517 }
518
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200522
523 if (ie->name_state == NAME_NOT_KNOWN)
524 return false;
525
526 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700527}
528
529static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530{
Johan Hedberg30883512012-01-04 14:16:21 +0200531 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
534 int copied = 0;
535
Johan Hedberg561aafb2012-01-04 13:31:59 +0200536 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200538
539 if (copied >= num)
540 break;
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200548
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200550 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 }
552
553 BT_DBG("cache %p, copied %d", cache, copied);
554 return copied;
555}
556
557static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558{
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
561
562 BT_DBG("%s", hdev->name);
563
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
565 return;
566
567 /* Start Inquiry */
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572}
573
574int hci_inquiry(void __user *arg)
575{
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
580 long timeo;
581 __u8 *buf;
582
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
584 return -EFAULT;
585
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200586 hdev = hci_dev_get(ir.dev_id);
587 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 return -ENODEV;
589
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300590 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300592 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 inquiry_cache_flush(hdev);
594 do_inquiry = 1;
595 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300596 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597
Marcel Holtmann04837f62006-07-03 10:02:33 +0200598 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200599
600 if (do_inquiry) {
601 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
602 if (err < 0)
603 goto done;
604 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300606 /* for unlimited number of responses we will use buffer with
607 * 255 entries
608 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610
611 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
612 * copy it to the user space.
613 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100614 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200615 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700616 err = -ENOMEM;
617 goto done;
618 }
619
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300620 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300622 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623
624 BT_DBG("num_rsp %d", ir.num_rsp);
625
626 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
627 ptr += sizeof(ir);
628 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300629 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900631 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 err = -EFAULT;
633
634 kfree(buf);
635
636done:
637 hci_dev_put(hdev);
638 return err;
639}
640
641/* ---- HCI ioctl helpers ---- */
642
643int hci_dev_open(__u16 dev)
644{
645 struct hci_dev *hdev;
646 int ret = 0;
647
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200648 hdev = hci_dev_get(dev);
649 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650 return -ENODEV;
651
652 BT_DBG("%s %p", hdev->name, hdev);
653
654 hci_req_lock(hdev);
655
Johan Hovold94324962012-03-15 14:48:41 +0100656 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
657 ret = -ENODEV;
658 goto done;
659 }
660
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200661 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
662 ret = -ERFKILL;
663 goto done;
664 }
665
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 if (test_bit(HCI_UP, &hdev->flags)) {
667 ret = -EALREADY;
668 goto done;
669 }
670
671 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
672 set_bit(HCI_RAW, &hdev->flags);
673
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200674 /* Treat all non BR/EDR controllers as raw devices if
675 enable_hs is not set */
676 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100677 set_bit(HCI_RAW, &hdev->flags);
678
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if (hdev->open(hdev)) {
680 ret = -EIO;
681 goto done;
682 }
683
684 if (!test_bit(HCI_RAW, &hdev->flags)) {
685 atomic_set(&hdev->cmd_cnt, 1);
686 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200687 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688
Marcel Holtmann04837f62006-07-03 10:02:33 +0200689 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691
Andre Guedeseead27d2011-06-30 19:20:55 -0300692 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 clear_bit(HCI_INIT, &hdev->flags);
697 }
698
699 if (!ret) {
700 hci_dev_hold(hdev);
701 set_bit(HCI_UP, &hdev->flags);
702 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200703 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300704 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200705 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300706 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200707 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900708 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200710 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200711 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400712 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713
714 skb_queue_purge(&hdev->cmd_q);
715 skb_queue_purge(&hdev->rx_q);
716
717 if (hdev->flush)
718 hdev->flush(hdev);
719
720 if (hdev->sent_cmd) {
721 kfree_skb(hdev->sent_cmd);
722 hdev->sent_cmd = NULL;
723 }
724
725 hdev->close(hdev);
726 hdev->flags = 0;
727 }
728
729done:
730 hci_req_unlock(hdev);
731 hci_dev_put(hdev);
732 return ret;
733}
734
735static int hci_dev_do_close(struct hci_dev *hdev)
736{
737 BT_DBG("%s %p", hdev->name, hdev);
738
Andre Guedes28b75a82012-02-03 17:48:00 -0300739 cancel_work_sync(&hdev->le_scan);
740
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741 hci_req_cancel(hdev, ENODEV);
742 hci_req_lock(hdev);
743
744 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300745 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700746 hci_req_unlock(hdev);
747 return 0;
748 }
749
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200750 /* Flush RX and TX works */
751 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400752 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700753
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200754 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200755 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200756 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200757 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200758 }
759
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200760 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200761 cancel_delayed_work(&hdev->service_cache);
762
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300763 cancel_delayed_work_sync(&hdev->le_scan_disable);
764
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300765 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 inquiry_cache_flush(hdev);
767 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300768 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769
770 hci_notify(hdev, HCI_DEV_DOWN);
771
772 if (hdev->flush)
773 hdev->flush(hdev);
774
775 /* Reset device */
776 skb_queue_purge(&hdev->cmd_q);
777 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200778 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200779 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200781 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300782 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 clear_bit(HCI_INIT, &hdev->flags);
784 }
785
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200786 /* flush cmd work */
787 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 /* Drop queues */
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
793
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300796 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
799 }
800
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
803 hdev->close(hdev);
804
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
806 hci_dev_lock(hdev);
807 mgmt_powered(hdev, 0);
808 hci_dev_unlock(hdev);
809 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200810
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /* Clear flags */
812 hdev->flags = 0;
813
Johan Hedberge59fda82012-02-22 18:11:53 +0200814 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200815 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200816
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 hci_req_unlock(hdev);
818
819 hci_dev_put(hdev);
820 return 0;
821}
822
823int hci_dev_close(__u16 dev)
824{
825 struct hci_dev *hdev;
826 int err;
827
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200828 hdev = hci_dev_get(dev);
829 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100831
832 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
833 cancel_delayed_work(&hdev->power_off);
834
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 hci_dev_put(hdev);
838 return err;
839}
840
841int hci_dev_reset(__u16 dev)
842{
843 struct hci_dev *hdev;
844 int ret = 0;
845
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200846 hdev = hci_dev_get(dev);
847 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 return -ENODEV;
849
850 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700851
852 if (!test_bit(HCI_UP, &hdev->flags))
853 goto done;
854
855 /* Drop queues */
856 skb_queue_purge(&hdev->rx_q);
857 skb_queue_purge(&hdev->cmd_q);
858
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300859 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 inquiry_cache_flush(hdev);
861 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300862 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 if (hdev->flush)
865 hdev->flush(hdev);
866
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900867 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300868 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200871 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300872 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 hci_req_unlock(hdev);
876 hci_dev_put(hdev);
877 return ret;
878}
879
880int hci_dev_reset_stat(__u16 dev)
881{
882 struct hci_dev *hdev;
883 int ret = 0;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 hdev = hci_dev_get(dev);
886 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENODEV;
888
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891 hci_dev_put(hdev);
892
893 return ret;
894}
895
896int hci_dev_cmd(unsigned int cmd, void __user *arg)
897{
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
900 int err = 0;
901
902 if (copy_from_user(&dr, arg, sizeof(dr)))
903 return -EFAULT;
904
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200905 hdev = hci_dev_get(dr.dev_id);
906 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return -ENODEV;
908
909 switch (cmd) {
910 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300912 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 case HCISETENCRYPT:
916 if (!lmp_encrypt_capable(hdev)) {
917 err = -EOPNOTSUPP;
918 break;
919 }
920
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 if (err)
926 break;
927 }
928
Marcel Holtmann04837f62006-07-03 10:02:33 +0200929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300930 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 break;
932
933 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300935 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 break;
937
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200938 case HCISETLINKPOL:
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300940 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200941 break;
942
943 case HCISETLINKMODE:
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
946 break;
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 case HCISETPTYPE:
949 hdev->pkt_type = (__u16) dr.dev_opt;
950 break;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 break;
956
957 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
961
962 default:
963 err = -EINVAL;
964 break;
965 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 hci_dev_put(hdev);
968 return err;
969}
970
971int hci_get_dev_list(void __user *arg)
972{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200973 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 int n = 0, size, err;
977 __u16 dev_num;
978
979 if (get_user(dev_num, (__u16 __user *) arg))
980 return -EFAULT;
981
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 return -EINVAL;
984
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
986
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200987 dl = kzalloc(size, GFP_KERNEL);
988 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 return -ENOMEM;
990
991 dr = dl->dev_req;
992
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200993 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200994 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200996 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200997
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 if (++n >= dev_num)
1005 break;
1006 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001007 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 dl->dev_num = n;
1010 size = sizeof(*dl) + n * sizeof(*dr);
1011
1012 err = copy_to_user(arg, dl, size);
1013 kfree(dl);
1014
1015 return err ? -EFAULT : 0;
1016}
1017
1018int hci_get_dev_info(void __user *arg)
1019{
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1022 int err = 0;
1023
1024 if (copy_from_user(&di, arg, sizeof(di)))
1025 return -EFAULT;
1026
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001027 hdev = hci_dev_get(di.dev_id);
1028 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return -ENODEV;
1030
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001032 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001033
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1048
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052 if (copy_to_user(arg, &di, sizeof(di)))
1053 err = -EFAULT;
1054
1055 hci_dev_put(hdev);
1056
1057 return err;
1058}
1059
1060/* ---- Interface to HCI drivers ---- */
1061
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001062static int hci_rfkill_set_block(void *data, bool blocked)
1063{
1064 struct hci_dev *hdev = data;
1065
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068 if (!blocked)
1069 return 0;
1070
1071 hci_dev_do_close(hdev);
1072
1073 return 0;
1074}
1075
1076static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1078};
1079
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001080static void hci_power_on(struct work_struct *work)
1081{
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084 BT_DBG("%s", hdev->name);
1085
1086 if (hci_dev_open(hdev->id) < 0)
1087 return;
1088
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001090 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001091 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001093 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001094 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001095}
1096
1097static void hci_power_off(struct work_struct *work)
1098{
Johan Hedberg32435532011-11-07 22:16:04 +02001099 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001100 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001101
1102 BT_DBG("%s", hdev->name);
1103
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001104 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105}
1106
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001107static void hci_discov_off(struct work_struct *work)
1108{
1109 struct hci_dev *hdev;
1110 u8 scan = SCAN_PAGE;
1111
1112 hdev = container_of(work, struct hci_dev, discov_off.work);
1113
1114 BT_DBG("%s", hdev->name);
1115
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001116 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001117
1118 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1119
1120 hdev->discov_timeout = 0;
1121
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001122 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001123}
1124
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001125int hci_uuids_clear(struct hci_dev *hdev)
1126{
1127 struct list_head *p, *n;
1128
1129 list_for_each_safe(p, n, &hdev->uuids) {
1130 struct bt_uuid *uuid;
1131
1132 uuid = list_entry(p, struct bt_uuid, list);
1133
1134 list_del(p);
1135 kfree(uuid);
1136 }
1137
1138 return 0;
1139}
1140
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001141int hci_link_keys_clear(struct hci_dev *hdev)
1142{
1143 struct list_head *p, *n;
1144
1145 list_for_each_safe(p, n, &hdev->link_keys) {
1146 struct link_key *key;
1147
1148 key = list_entry(p, struct link_key, list);
1149
1150 list_del(p);
1151 kfree(key);
1152 }
1153
1154 return 0;
1155}
1156
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001157int hci_smp_ltks_clear(struct hci_dev *hdev)
1158{
1159 struct smp_ltk *k, *tmp;
1160
1161 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1162 list_del(&k->list);
1163 kfree(k);
1164 }
1165
1166 return 0;
1167}
1168
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001169struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001171 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001172
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001173 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001174 if (bacmp(bdaddr, &k->bdaddr) == 0)
1175 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
1177 return NULL;
1178}
1179
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301180static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001182{
1183 /* Legacy key */
1184 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301185 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186
1187 /* Debug keys are insecure so don't store them persistently */
1188 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301189 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001190
1191 /* Changed combination key and there's no previous one */
1192 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301193 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001194
1195 /* Security mode 3 case */
1196 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301197 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001198
1199 /* Neither local nor remote side had no-bonding as requirement */
1200 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202
1203 /* Local side had dedicated bonding as requirement */
1204 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301205 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001206
1207 /* Remote side had dedicated bonding as requirement */
1208 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301209 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001210
1211 /* If none of the above criteria match, then don't store the key
1212 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301213 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214}
1215
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001216struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001217{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001218 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220 list_for_each_entry(k, &hdev->long_term_keys, list) {
1221 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001222 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223 continue;
1224
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001225 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001226 }
1227
1228 return NULL;
1229}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001232 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001233{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001234 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001235
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001236 list_for_each_entry(k, &hdev->long_term_keys, list)
1237 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001238 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239 return k;
1240
1241 return NULL;
1242}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001244int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001245 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001246{
1247 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301248 u8 old_key_type;
1249 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001250
1251 old_key = hci_find_link_key(hdev, bdaddr);
1252 if (old_key) {
1253 old_key_type = old_key->type;
1254 key = old_key;
1255 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001256 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001257 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1258 if (!key)
1259 return -ENOMEM;
1260 list_add(&key->list, &hdev->link_keys);
1261 }
1262
1263 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1264
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001265 /* Some buggy controller combinations generate a changed
1266 * combination key for legacy pairing even when there's no
1267 * previous key */
1268 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001269 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001270 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001271 if (conn)
1272 conn->key_type = type;
1273 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001274
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001275 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001276 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001277 key->pin_len = pin_len;
1278
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001279 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001280 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001281 else
1282 key->type = type;
1283
Johan Hedberg4df378a2011-04-28 11:29:03 -07001284 if (!new_key)
1285 return 0;
1286
1287 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1288
Johan Hedberg744cf192011-11-08 20:40:14 +02001289 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001290
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301291 if (conn)
1292 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293
1294 return 0;
1295}
1296
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001297int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001298 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001299 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001300{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001301 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001302
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001303 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1304 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001305
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001306 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1307 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001308 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001309 else {
1310 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001311 if (!key)
1312 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001313 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001314 }
1315
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001316 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 key->bdaddr_type = addr_type;
1318 memcpy(key->val, tk, sizeof(key->val));
1319 key->authenticated = authenticated;
1320 key->ediv = ediv;
1321 key->enc_size = enc_size;
1322 key->type = type;
1323 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001325 if (!new_key)
1326 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001327
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001328 if (type & HCI_SMP_LTK)
1329 mgmt_new_ltk(hdev, key, 1);
1330
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001331 return 0;
1332}
1333
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001334int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1335{
1336 struct link_key *key;
1337
1338 key = hci_find_link_key(hdev, bdaddr);
1339 if (!key)
1340 return -ENOENT;
1341
1342 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1343
1344 list_del(&key->list);
1345 kfree(key);
1346
1347 return 0;
1348}
1349
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001350int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351{
1352 struct smp_ltk *k, *tmp;
1353
1354 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1355 if (bacmp(bdaddr, &k->bdaddr))
1356 continue;
1357
1358 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1359
1360 list_del(&k->list);
1361 kfree(k);
1362 }
1363
1364 return 0;
1365}
1366
Ville Tervo6bd32322011-02-16 16:32:41 +02001367/* HCI command timer function */
1368static void hci_cmd_timer(unsigned long arg)
1369{
1370 struct hci_dev *hdev = (void *) arg;
1371
1372 BT_ERR("%s command tx timeout", hdev->name);
1373 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001374 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001375}
1376
Szymon Janc2763eda2011-03-22 13:12:22 +01001377struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001378 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001379{
1380 struct oob_data *data;
1381
1382 list_for_each_entry(data, &hdev->remote_oob_data, list)
1383 if (bacmp(bdaddr, &data->bdaddr) == 0)
1384 return data;
1385
1386 return NULL;
1387}
1388
1389int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1390{
1391 struct oob_data *data;
1392
1393 data = hci_find_remote_oob_data(hdev, bdaddr);
1394 if (!data)
1395 return -ENOENT;
1396
1397 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1398
1399 list_del(&data->list);
1400 kfree(data);
1401
1402 return 0;
1403}
1404
1405int hci_remote_oob_data_clear(struct hci_dev *hdev)
1406{
1407 struct oob_data *data, *n;
1408
1409 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1410 list_del(&data->list);
1411 kfree(data);
1412 }
1413
1414 return 0;
1415}
1416
1417int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001418 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001419{
1420 struct oob_data *data;
1421
1422 data = hci_find_remote_oob_data(hdev, bdaddr);
1423
1424 if (!data) {
1425 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1426 if (!data)
1427 return -ENOMEM;
1428
1429 bacpy(&data->bdaddr, bdaddr);
1430 list_add(&data->list, &hdev->remote_oob_data);
1431 }
1432
1433 memcpy(data->hash, hash, sizeof(data->hash));
1434 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1435
1436 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1437
1438 return 0;
1439}
1440
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001441struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001442{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001443 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001444
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001445 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001446 if (bacmp(bdaddr, &b->bdaddr) == 0)
1447 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448
1449 return NULL;
1450}
1451
1452int hci_blacklist_clear(struct hci_dev *hdev)
1453{
1454 struct list_head *p, *n;
1455
1456 list_for_each_safe(p, n, &hdev->blacklist) {
1457 struct bdaddr_list *b;
1458
1459 b = list_entry(p, struct bdaddr_list, list);
1460
1461 list_del(p);
1462 kfree(b);
1463 }
1464
1465 return 0;
1466}
1467
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001468int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001469{
1470 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471
1472 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1473 return -EBADF;
1474
Antti Julku5e762442011-08-25 16:48:02 +03001475 if (hci_blacklist_lookup(hdev, bdaddr))
1476 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001477
1478 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001479 if (!entry)
1480 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001481
1482 bacpy(&entry->bdaddr, bdaddr);
1483
1484 list_add(&entry->list, &hdev->blacklist);
1485
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001486 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001487}
1488
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001489int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490{
1491 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492
Szymon Janc1ec918c2011-11-16 09:32:21 +01001493 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001494 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
1496 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001497 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001498 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 list_del(&entry->list);
1501 kfree(entry);
1502
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001503 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504}
1505
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001506static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1507{
1508 struct le_scan_params *param = (struct le_scan_params *) opt;
1509 struct hci_cp_le_set_scan_param cp;
1510
1511 memset(&cp, 0, sizeof(cp));
1512 cp.type = param->type;
1513 cp.interval = cpu_to_le16(param->interval);
1514 cp.window = cpu_to_le16(param->window);
1515
1516 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1517}
1518
1519static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1520{
1521 struct hci_cp_le_set_scan_enable cp;
1522
1523 memset(&cp, 0, sizeof(cp));
1524 cp.enable = 1;
1525
1526 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1527}
1528
1529static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001530 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001531{
1532 long timeo = msecs_to_jiffies(3000);
1533 struct le_scan_params param;
1534 int err;
1535
1536 BT_DBG("%s", hdev->name);
1537
1538 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1539 return -EINPROGRESS;
1540
1541 param.type = type;
1542 param.interval = interval;
1543 param.window = window;
1544
1545 hci_req_lock(hdev);
1546
1547 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001548 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001549 if (!err)
1550 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1551
1552 hci_req_unlock(hdev);
1553
1554 if (err < 0)
1555 return err;
1556
1557 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001558 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001559
1560 return 0;
1561}
1562
Andre Guedes7dbfac12012-03-15 16:52:07 -03001563int hci_cancel_le_scan(struct hci_dev *hdev)
1564{
1565 BT_DBG("%s", hdev->name);
1566
1567 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1568 return -EALREADY;
1569
1570 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1571 struct hci_cp_le_set_scan_enable cp;
1572
1573 /* Send HCI command to disable LE Scan */
1574 memset(&cp, 0, sizeof(cp));
1575 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1576 }
1577
1578 return 0;
1579}
1580
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001581static void le_scan_disable_work(struct work_struct *work)
1582{
1583 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001584 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001585 struct hci_cp_le_set_scan_enable cp;
1586
1587 BT_DBG("%s", hdev->name);
1588
1589 memset(&cp, 0, sizeof(cp));
1590
1591 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1592}
1593
Andre Guedes28b75a82012-02-03 17:48:00 -03001594static void le_scan_work(struct work_struct *work)
1595{
1596 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1597 struct le_scan_params *param = &hdev->le_scan_params;
1598
1599 BT_DBG("%s", hdev->name);
1600
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001601 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1602 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001603}
1604
1605int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001606 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001607{
1608 struct le_scan_params *param = &hdev->le_scan_params;
1609
1610 BT_DBG("%s", hdev->name);
1611
1612 if (work_busy(&hdev->le_scan))
1613 return -EINPROGRESS;
1614
1615 param->type = type;
1616 param->interval = interval;
1617 param->window = window;
1618 param->timeout = timeout;
1619
1620 queue_work(system_long_wq, &hdev->le_scan);
1621
1622 return 0;
1623}
1624
David Herrmann9be0dab2012-04-22 14:39:57 +02001625/* Alloc HCI device */
1626struct hci_dev *hci_alloc_dev(void)
1627{
1628 struct hci_dev *hdev;
1629
1630 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1631 if (!hdev)
1632 return NULL;
1633
David Herrmannb1b813d2012-04-22 14:39:58 +02001634 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1635 hdev->esco_type = (ESCO_HV1);
1636 hdev->link_mode = (HCI_LM_ACCEPT);
1637 hdev->io_capability = 0x03; /* No Input No Output */
1638
David Herrmannb1b813d2012-04-22 14:39:58 +02001639 hdev->sniff_max_interval = 800;
1640 hdev->sniff_min_interval = 80;
1641
1642 mutex_init(&hdev->lock);
1643 mutex_init(&hdev->req_lock);
1644
1645 INIT_LIST_HEAD(&hdev->mgmt_pending);
1646 INIT_LIST_HEAD(&hdev->blacklist);
1647 INIT_LIST_HEAD(&hdev->uuids);
1648 INIT_LIST_HEAD(&hdev->link_keys);
1649 INIT_LIST_HEAD(&hdev->long_term_keys);
1650 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001651
1652 INIT_WORK(&hdev->rx_work, hci_rx_work);
1653 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1654 INIT_WORK(&hdev->tx_work, hci_tx_work);
1655 INIT_WORK(&hdev->power_on, hci_power_on);
1656 INIT_WORK(&hdev->le_scan, le_scan_work);
1657
David Herrmannb1b813d2012-04-22 14:39:58 +02001658 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1659 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1660 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1661
David Herrmann9be0dab2012-04-22 14:39:57 +02001662 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001663 skb_queue_head_init(&hdev->rx_q);
1664 skb_queue_head_init(&hdev->cmd_q);
1665 skb_queue_head_init(&hdev->raw_q);
1666
1667 init_waitqueue_head(&hdev->req_wait_q);
1668
1669 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1670
David Herrmannb1b813d2012-04-22 14:39:58 +02001671 hci_init_sysfs(hdev);
1672 discovery_init(hdev);
1673 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001674
1675 return hdev;
1676}
1677EXPORT_SYMBOL(hci_alloc_dev);
1678
1679/* Free HCI device */
1680void hci_free_dev(struct hci_dev *hdev)
1681{
1682 skb_queue_purge(&hdev->driver_init);
1683
1684 /* will free via device release */
1685 put_device(&hdev->dev);
1686}
1687EXPORT_SYMBOL(hci_free_dev);
1688
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689/* Register HCI device */
1690int hci_register_dev(struct hci_dev *hdev)
1691{
Ulisses Furquimfc507442012-04-18 12:13:04 -03001692 struct list_head *head, *p;
David Herrmannb1b813d2012-04-22 14:39:58 +02001693 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
David Herrmann010666a2012-01-07 15:47:07 +01001695 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 return -EINVAL;
1697
Ulisses Furquimfc507442012-04-18 12:13:04 -03001698 write_lock(&hci_dev_list_lock);
1699
Mat Martineau08add512011-11-02 16:18:36 -07001700 /* Do not allow HCI_AMP devices to register at index 0,
1701 * so the index can be used as the AMP controller ID.
1702 */
1703 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001704 head = &hci_dev_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706 /* Find first available device id */
1707 list_for_each(p, &hci_dev_list) {
Ulisses Furquimfc507442012-04-18 12:13:04 -03001708 int nid = list_entry(p, struct hci_dev, list)->id;
1709 if (nid > id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 break;
Ulisses Furquimfc507442012-04-18 12:13:04 -03001711 if (nid == id)
1712 id++;
1713 head = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001715
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 sprintf(hdev->name, "hci%d", id);
1717 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001718
1719 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1720
Ulisses Furquimfc507442012-04-18 12:13:04 -03001721 list_add(&hdev->list, head);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001723 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001725 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001726 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001727 if (!hdev->workqueue) {
1728 error = -ENOMEM;
1729 goto err;
1730 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001731
David Herrmann33ca9542011-10-08 14:58:49 +02001732 error = hci_add_sysfs(hdev);
1733 if (error < 0)
1734 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001735
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001736 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001737 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1738 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001739 if (hdev->rfkill) {
1740 if (rfkill_register(hdev->rfkill) < 0) {
1741 rfkill_destroy(hdev->rfkill);
1742 hdev->rfkill = NULL;
1743 }
1744 }
1745
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001746 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1747 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001748 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001749
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001751 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752
1753 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001754
David Herrmann33ca9542011-10-08 14:58:49 +02001755err_wqueue:
1756 destroy_workqueue(hdev->workqueue);
1757err:
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001758 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001759 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001760 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001761
David Herrmann33ca9542011-10-08 14:58:49 +02001762 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763}
1764EXPORT_SYMBOL(hci_register_dev);
1765
1766/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001767void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
Marcel Holtmannef222012007-07-11 06:42:04 +02001769 int i;
1770
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001771 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772
Johan Hovold94324962012-03-15 14:48:41 +01001773 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1774
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001775 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001777 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
1779 hci_dev_do_close(hdev);
1780
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301781 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001782 kfree_skb(hdev->reassembly[i]);
1783
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001784 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001785 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001786 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001787 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001788 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001789 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001790
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001791 /* mgmt_index_removed should take care of emptying the
1792 * pending list */
1793 BUG_ON(!list_empty(&hdev->mgmt_pending));
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 hci_notify(hdev, HCI_DEV_UNREG);
1796
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001797 if (hdev->rfkill) {
1798 rfkill_unregister(hdev->rfkill);
1799 rfkill_destroy(hdev->rfkill);
1800 }
1801
David Herrmannce242972011-10-08 14:58:48 +02001802 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001803
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001804 destroy_workqueue(hdev->workqueue);
1805
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001806 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001807 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001808 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001809 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001810 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001811 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001812 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001813
David Herrmanndc946bd2012-01-07 15:47:24 +01001814 hci_dev_put(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815}
1816EXPORT_SYMBOL(hci_unregister_dev);
1817
1818/* Suspend HCI device */
1819int hci_suspend_dev(struct hci_dev *hdev)
1820{
1821 hci_notify(hdev, HCI_DEV_SUSPEND);
1822 return 0;
1823}
1824EXPORT_SYMBOL(hci_suspend_dev);
1825
1826/* Resume HCI device */
1827int hci_resume_dev(struct hci_dev *hdev)
1828{
1829 hci_notify(hdev, HCI_DEV_RESUME);
1830 return 0;
1831}
1832EXPORT_SYMBOL(hci_resume_dev);
1833
Marcel Holtmann76bca882009-11-18 00:40:39 +01001834/* Receive frame from HCI drivers */
1835int hci_recv_frame(struct sk_buff *skb)
1836{
1837 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1838 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001839 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001840 kfree_skb(skb);
1841 return -ENXIO;
1842 }
1843
1844 /* Incomming skb */
1845 bt_cb(skb)->incoming = 1;
1846
1847 /* Time stamp */
1848 __net_timestamp(skb);
1849
Marcel Holtmann76bca882009-11-18 00:40:39 +01001850 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001851 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001852
Marcel Holtmann76bca882009-11-18 00:40:39 +01001853 return 0;
1854}
1855EXPORT_SYMBOL(hci_recv_frame);
1856
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301857static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001858 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301859{
1860 int len = 0;
1861 int hlen = 0;
1862 int remain = count;
1863 struct sk_buff *skb;
1864 struct bt_skb_cb *scb;
1865
1866 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001867 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301868 return -EILSEQ;
1869
1870 skb = hdev->reassembly[index];
1871
1872 if (!skb) {
1873 switch (type) {
1874 case HCI_ACLDATA_PKT:
1875 len = HCI_MAX_FRAME_SIZE;
1876 hlen = HCI_ACL_HDR_SIZE;
1877 break;
1878 case HCI_EVENT_PKT:
1879 len = HCI_MAX_EVENT_SIZE;
1880 hlen = HCI_EVENT_HDR_SIZE;
1881 break;
1882 case HCI_SCODATA_PKT:
1883 len = HCI_MAX_SCO_SIZE;
1884 hlen = HCI_SCO_HDR_SIZE;
1885 break;
1886 }
1887
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001888 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301889 if (!skb)
1890 return -ENOMEM;
1891
1892 scb = (void *) skb->cb;
1893 scb->expect = hlen;
1894 scb->pkt_type = type;
1895
1896 skb->dev = (void *) hdev;
1897 hdev->reassembly[index] = skb;
1898 }
1899
1900 while (count) {
1901 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001902 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301903
1904 memcpy(skb_put(skb, len), data, len);
1905
1906 count -= len;
1907 data += len;
1908 scb->expect -= len;
1909 remain = count;
1910
1911 switch (type) {
1912 case HCI_EVENT_PKT:
1913 if (skb->len == HCI_EVENT_HDR_SIZE) {
1914 struct hci_event_hdr *h = hci_event_hdr(skb);
1915 scb->expect = h->plen;
1916
1917 if (skb_tailroom(skb) < scb->expect) {
1918 kfree_skb(skb);
1919 hdev->reassembly[index] = NULL;
1920 return -ENOMEM;
1921 }
1922 }
1923 break;
1924
1925 case HCI_ACLDATA_PKT:
1926 if (skb->len == HCI_ACL_HDR_SIZE) {
1927 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1928 scb->expect = __le16_to_cpu(h->dlen);
1929
1930 if (skb_tailroom(skb) < scb->expect) {
1931 kfree_skb(skb);
1932 hdev->reassembly[index] = NULL;
1933 return -ENOMEM;
1934 }
1935 }
1936 break;
1937
1938 case HCI_SCODATA_PKT:
1939 if (skb->len == HCI_SCO_HDR_SIZE) {
1940 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1941 scb->expect = h->dlen;
1942
1943 if (skb_tailroom(skb) < scb->expect) {
1944 kfree_skb(skb);
1945 hdev->reassembly[index] = NULL;
1946 return -ENOMEM;
1947 }
1948 }
1949 break;
1950 }
1951
1952 if (scb->expect == 0) {
1953 /* Complete frame */
1954
1955 bt_cb(skb)->pkt_type = type;
1956 hci_recv_frame(skb);
1957
1958 hdev->reassembly[index] = NULL;
1959 return remain;
1960 }
1961 }
1962
1963 return remain;
1964}
1965
Marcel Holtmannef222012007-07-11 06:42:04 +02001966int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1967{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301968 int rem = 0;
1969
Marcel Holtmannef222012007-07-11 06:42:04 +02001970 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1971 return -EILSEQ;
1972
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001973 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001974 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301975 if (rem < 0)
1976 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001977
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301978 data += (count - rem);
1979 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001980 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001981
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301982 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001983}
1984EXPORT_SYMBOL(hci_recv_fragment);
1985
Suraj Sumangala99811512010-07-14 13:02:19 +05301986#define STREAM_REASSEMBLY 0
1987
1988int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1989{
1990 int type;
1991 int rem = 0;
1992
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001993 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301994 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1995
1996 if (!skb) {
1997 struct { char type; } *pkt;
1998
1999 /* Start of the frame */
2000 pkt = data;
2001 type = pkt->type;
2002
2003 data++;
2004 count--;
2005 } else
2006 type = bt_cb(skb)->pkt_type;
2007
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002008 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002009 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302010 if (rem < 0)
2011 return rem;
2012
2013 data += (count - rem);
2014 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002015 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302016
2017 return rem;
2018}
2019EXPORT_SYMBOL(hci_recv_stream_fragment);
2020
Linus Torvalds1da177e2005-04-16 15:20:36 -07002021/* ---- Interface to upper protocols ---- */
2022
Linus Torvalds1da177e2005-04-16 15:20:36 -07002023int hci_register_cb(struct hci_cb *cb)
2024{
2025 BT_DBG("%p name %s", cb, cb->name);
2026
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002027 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002029 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030
2031 return 0;
2032}
2033EXPORT_SYMBOL(hci_register_cb);
2034
2035int hci_unregister_cb(struct hci_cb *cb)
2036{
2037 BT_DBG("%p name %s", cb, cb->name);
2038
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002039 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002041 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
2043 return 0;
2044}
2045EXPORT_SYMBOL(hci_unregister_cb);
2046
2047static int hci_send_frame(struct sk_buff *skb)
2048{
2049 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2050
2051 if (!hdev) {
2052 kfree_skb(skb);
2053 return -ENODEV;
2054 }
2055
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002056 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002058 /* Time stamp */
2059 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002061 /* Send copy to monitor */
2062 hci_send_to_monitor(hdev, skb);
2063
2064 if (atomic_read(&hdev->promisc)) {
2065 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002066 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067 }
2068
2069 /* Get rid of skb owner, prior to sending to the driver. */
2070 skb_orphan(skb);
2071
2072 return hdev->send(skb);
2073}
2074
2075/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002076int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077{
2078 int len = HCI_COMMAND_HDR_SIZE + plen;
2079 struct hci_command_hdr *hdr;
2080 struct sk_buff *skb;
2081
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002082 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
2084 skb = bt_skb_alloc(len, GFP_ATOMIC);
2085 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002086 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087 return -ENOMEM;
2088 }
2089
2090 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002091 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092 hdr->plen = plen;
2093
2094 if (plen)
2095 memcpy(skb_put(skb, plen), param, plen);
2096
2097 BT_DBG("skb len %d", skb->len);
2098
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002099 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002101
Johan Hedberga5040ef2011-01-10 13:28:59 +02002102 if (test_bit(HCI_INIT, &hdev->flags))
2103 hdev->init_last_cmd = opcode;
2104
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002106 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107
2108 return 0;
2109}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110
2111/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002112void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113{
2114 struct hci_command_hdr *hdr;
2115
2116 if (!hdev->sent_cmd)
2117 return NULL;
2118
2119 hdr = (void *) hdev->sent_cmd->data;
2120
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002121 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122 return NULL;
2123
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002124 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
2126 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2127}
2128
2129/* Send ACL data */
2130static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2131{
2132 struct hci_acl_hdr *hdr;
2133 int len = skb->len;
2134
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002135 skb_push(skb, HCI_ACL_HDR_SIZE);
2136 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002137 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002138 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2139 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140}
2141
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002142static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002143 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144{
2145 struct hci_dev *hdev = conn->hdev;
2146 struct sk_buff *list;
2147
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002148 skb->len = skb_headlen(skb);
2149 skb->data_len = 0;
2150
2151 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2152 hci_add_acl_hdr(skb, conn->handle, flags);
2153
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002154 list = skb_shinfo(skb)->frag_list;
2155 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 /* Non fragmented */
2157 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2158
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002159 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 } else {
2161 /* Fragmented */
2162 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2163
2164 skb_shinfo(skb)->frag_list = NULL;
2165
2166 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002167 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002169 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002170
2171 flags &= ~ACL_START;
2172 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 do {
2174 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002175
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002177 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002178 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179
2180 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2181
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002182 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 } while (list);
2184
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002185 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002187}
2188
2189void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2190{
2191 struct hci_conn *conn = chan->conn;
2192 struct hci_dev *hdev = conn->hdev;
2193
2194 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2195
2196 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002197
2198 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002200 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002201}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
2203/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002204void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205{
2206 struct hci_dev *hdev = conn->hdev;
2207 struct hci_sco_hdr hdr;
2208
2209 BT_DBG("%s len %d", hdev->name, skb->len);
2210
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002211 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212 hdr.dlen = skb->len;
2213
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002214 skb_push(skb, HCI_SCO_HDR_SIZE);
2215 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002216 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217
2218 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002219 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002220
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002222 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002223}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225/* ---- HCI TX task (outgoing data) ---- */
2226
2227/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002228static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2229 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230{
2231 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002232 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002233 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002235 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002237
2238 rcu_read_lock();
2239
2240 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002241 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002243
2244 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2245 continue;
2246
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247 num++;
2248
2249 if (c->sent < min) {
2250 min = c->sent;
2251 conn = c;
2252 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002253
2254 if (hci_conn_num(hdev, type) == num)
2255 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 }
2257
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002258 rcu_read_unlock();
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002261 int cnt, q;
2262
2263 switch (conn->type) {
2264 case ACL_LINK:
2265 cnt = hdev->acl_cnt;
2266 break;
2267 case SCO_LINK:
2268 case ESCO_LINK:
2269 cnt = hdev->sco_cnt;
2270 break;
2271 case LE_LINK:
2272 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2273 break;
2274 default:
2275 cnt = 0;
2276 BT_ERR("Unknown link type");
2277 }
2278
2279 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280 *quote = q ? q : 1;
2281 } else
2282 *quote = 0;
2283
2284 BT_DBG("conn %p quote %d", conn, *quote);
2285 return conn;
2286}
2287
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002288static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289{
2290 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002291 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292
Ville Tervobae1f5d92011-02-10 22:38:53 -03002293 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002295 rcu_read_lock();
2296
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002298 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002299 if (c->type == type && c->sent) {
2300 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002301 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302 hci_acl_disconn(c, 0x13);
2303 }
2304 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002305
2306 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
2308
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002309static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2310 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002311{
2312 struct hci_conn_hash *h = &hdev->conn_hash;
2313 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002314 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002315 struct hci_conn *conn;
2316 int cnt, q, conn_num = 0;
2317
2318 BT_DBG("%s", hdev->name);
2319
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002320 rcu_read_lock();
2321
2322 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002323 struct hci_chan *tmp;
2324
2325 if (conn->type != type)
2326 continue;
2327
2328 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2329 continue;
2330
2331 conn_num++;
2332
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002333 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002334 struct sk_buff *skb;
2335
2336 if (skb_queue_empty(&tmp->data_q))
2337 continue;
2338
2339 skb = skb_peek(&tmp->data_q);
2340 if (skb->priority < cur_prio)
2341 continue;
2342
2343 if (skb->priority > cur_prio) {
2344 num = 0;
2345 min = ~0;
2346 cur_prio = skb->priority;
2347 }
2348
2349 num++;
2350
2351 if (conn->sent < min) {
2352 min = conn->sent;
2353 chan = tmp;
2354 }
2355 }
2356
2357 if (hci_conn_num(hdev, type) == conn_num)
2358 break;
2359 }
2360
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002361 rcu_read_unlock();
2362
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002363 if (!chan)
2364 return NULL;
2365
2366 switch (chan->conn->type) {
2367 case ACL_LINK:
2368 cnt = hdev->acl_cnt;
2369 break;
2370 case SCO_LINK:
2371 case ESCO_LINK:
2372 cnt = hdev->sco_cnt;
2373 break;
2374 case LE_LINK:
2375 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2376 break;
2377 default:
2378 cnt = 0;
2379 BT_ERR("Unknown link type");
2380 }
2381
2382 q = cnt / num;
2383 *quote = q ? q : 1;
2384 BT_DBG("chan %p quote %d", chan, *quote);
2385 return chan;
2386}
2387
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002388static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2389{
2390 struct hci_conn_hash *h = &hdev->conn_hash;
2391 struct hci_conn *conn;
2392 int num = 0;
2393
2394 BT_DBG("%s", hdev->name);
2395
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002396 rcu_read_lock();
2397
2398 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002399 struct hci_chan *chan;
2400
2401 if (conn->type != type)
2402 continue;
2403
2404 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2405 continue;
2406
2407 num++;
2408
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002409 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002410 struct sk_buff *skb;
2411
2412 if (chan->sent) {
2413 chan->sent = 0;
2414 continue;
2415 }
2416
2417 if (skb_queue_empty(&chan->data_q))
2418 continue;
2419
2420 skb = skb_peek(&chan->data_q);
2421 if (skb->priority >= HCI_PRIO_MAX - 1)
2422 continue;
2423
2424 skb->priority = HCI_PRIO_MAX - 1;
2425
2426 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002427 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002428 }
2429
2430 if (hci_conn_num(hdev, type) == num)
2431 break;
2432 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002433
2434 rcu_read_unlock();
2435
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002436}
2437
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002438static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2439{
2440 /* Calculate count of blocks used by this packet */
2441 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2442}
2443
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002444static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002446 if (!test_bit(HCI_RAW, &hdev->flags)) {
2447 /* ACL tx timeout must be longer than maximum
2448 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002449 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002450 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002451 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002453}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002455static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002456{
2457 unsigned int cnt = hdev->acl_cnt;
2458 struct hci_chan *chan;
2459 struct sk_buff *skb;
2460 int quote;
2461
2462 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002463
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002464 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002465 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002466 u32 priority = (skb_peek(&chan->data_q))->priority;
2467 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002468 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002469 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002470
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002471 /* Stop if priority has changed */
2472 if (skb->priority < priority)
2473 break;
2474
2475 skb = skb_dequeue(&chan->data_q);
2476
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002477 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002478 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002479
Linus Torvalds1da177e2005-04-16 15:20:36 -07002480 hci_send_frame(skb);
2481 hdev->acl_last_tx = jiffies;
2482
2483 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002484 chan->sent++;
2485 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002486 }
2487 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002488
2489 if (cnt != hdev->acl_cnt)
2490 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491}
2492
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002493static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002494{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002495 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002496 struct hci_chan *chan;
2497 struct sk_buff *skb;
2498 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002499
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002500 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002501
2502 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002503 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002504 u32 priority = (skb_peek(&chan->data_q))->priority;
2505 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2506 int blocks;
2507
2508 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002509 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002510
2511 /* Stop if priority has changed */
2512 if (skb->priority < priority)
2513 break;
2514
2515 skb = skb_dequeue(&chan->data_q);
2516
2517 blocks = __get_blocks(hdev, skb);
2518 if (blocks > hdev->block_cnt)
2519 return;
2520
2521 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002522 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002523
2524 hci_send_frame(skb);
2525 hdev->acl_last_tx = jiffies;
2526
2527 hdev->block_cnt -= blocks;
2528 quote -= blocks;
2529
2530 chan->sent += blocks;
2531 chan->conn->sent += blocks;
2532 }
2533 }
2534
2535 if (cnt != hdev->block_cnt)
2536 hci_prio_recalculate(hdev, ACL_LINK);
2537}
2538
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002539static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002540{
2541 BT_DBG("%s", hdev->name);
2542
2543 if (!hci_conn_num(hdev, ACL_LINK))
2544 return;
2545
2546 switch (hdev->flow_ctl_mode) {
2547 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2548 hci_sched_acl_pkt(hdev);
2549 break;
2550
2551 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2552 hci_sched_acl_blk(hdev);
2553 break;
2554 }
2555}
2556
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002558static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002559{
2560 struct hci_conn *conn;
2561 struct sk_buff *skb;
2562 int quote;
2563
2564 BT_DBG("%s", hdev->name);
2565
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002566 if (!hci_conn_num(hdev, SCO_LINK))
2567 return;
2568
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2570 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2571 BT_DBG("skb %p len %d", skb, skb->len);
2572 hci_send_frame(skb);
2573
2574 conn->sent++;
2575 if (conn->sent == ~0)
2576 conn->sent = 0;
2577 }
2578 }
2579}
2580
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002581static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002582{
2583 struct hci_conn *conn;
2584 struct sk_buff *skb;
2585 int quote;
2586
2587 BT_DBG("%s", hdev->name);
2588
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002589 if (!hci_conn_num(hdev, ESCO_LINK))
2590 return;
2591
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002592 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2593 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002594 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2595 BT_DBG("skb %p len %d", skb, skb->len);
2596 hci_send_frame(skb);
2597
2598 conn->sent++;
2599 if (conn->sent == ~0)
2600 conn->sent = 0;
2601 }
2602 }
2603}
2604
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002605static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002606{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002607 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002608 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002609 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002610
2611 BT_DBG("%s", hdev->name);
2612
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002613 if (!hci_conn_num(hdev, LE_LINK))
2614 return;
2615
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002616 if (!test_bit(HCI_RAW, &hdev->flags)) {
2617 /* LE tx timeout must be longer than maximum
2618 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002619 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002620 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002621 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002622 }
2623
2624 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002625 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002626 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002627 u32 priority = (skb_peek(&chan->data_q))->priority;
2628 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002629 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002630 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002631
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002632 /* Stop if priority has changed */
2633 if (skb->priority < priority)
2634 break;
2635
2636 skb = skb_dequeue(&chan->data_q);
2637
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002638 hci_send_frame(skb);
2639 hdev->le_last_tx = jiffies;
2640
2641 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002642 chan->sent++;
2643 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002644 }
2645 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002646
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002647 if (hdev->le_pkts)
2648 hdev->le_cnt = cnt;
2649 else
2650 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002651
2652 if (cnt != tmp)
2653 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002654}
2655
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002656static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002658 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659 struct sk_buff *skb;
2660
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002661 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002662 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663
2664 /* Schedule queues and send stuff to HCI driver */
2665
2666 hci_sched_acl(hdev);
2667
2668 hci_sched_sco(hdev);
2669
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002670 hci_sched_esco(hdev);
2671
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002672 hci_sched_le(hdev);
2673
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674 /* Send next queued raw (unknown type) packet */
2675 while ((skb = skb_dequeue(&hdev->raw_q)))
2676 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677}
2678
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002679/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002680
2681/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002682static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683{
2684 struct hci_acl_hdr *hdr = (void *) skb->data;
2685 struct hci_conn *conn;
2686 __u16 handle, flags;
2687
2688 skb_pull(skb, HCI_ACL_HDR_SIZE);
2689
2690 handle = __le16_to_cpu(hdr->handle);
2691 flags = hci_flags(handle);
2692 handle = hci_handle(handle);
2693
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002694 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2695 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696
2697 hdev->stat.acl_rx++;
2698
2699 hci_dev_lock(hdev);
2700 conn = hci_conn_hash_lookup_handle(hdev, handle);
2701 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002702
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002704 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002705
Johan Hedberg671267b2012-05-12 16:11:50 -03002706 hci_dev_lock(hdev);
2707 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2708 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2709 mgmt_device_connected(hdev, &conn->dst, conn->type,
2710 conn->dst_type, 0, NULL, 0,
2711 conn->dev_class);
2712 hci_dev_unlock(hdev);
2713
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002715 l2cap_recv_acldata(conn, skb, flags);
2716 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002718 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002719 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 }
2721
2722 kfree_skb(skb);
2723}
2724
2725/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002726static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727{
2728 struct hci_sco_hdr *hdr = (void *) skb->data;
2729 struct hci_conn *conn;
2730 __u16 handle;
2731
2732 skb_pull(skb, HCI_SCO_HDR_SIZE);
2733
2734 handle = __le16_to_cpu(hdr->handle);
2735
2736 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2737
2738 hdev->stat.sco_rx++;
2739
2740 hci_dev_lock(hdev);
2741 conn = hci_conn_hash_lookup_handle(hdev, handle);
2742 hci_dev_unlock(hdev);
2743
2744 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002746 sco_recv_scodata(conn, skb);
2747 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002749 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002750 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751 }
2752
2753 kfree_skb(skb);
2754}
2755
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002756static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002758 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 struct sk_buff *skb;
2760
2761 BT_DBG("%s", hdev->name);
2762
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002764 /* Send copy to monitor */
2765 hci_send_to_monitor(hdev, skb);
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 if (atomic_read(&hdev->promisc)) {
2768 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002769 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 }
2771
2772 if (test_bit(HCI_RAW, &hdev->flags)) {
2773 kfree_skb(skb);
2774 continue;
2775 }
2776
2777 if (test_bit(HCI_INIT, &hdev->flags)) {
2778 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002779 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 case HCI_ACLDATA_PKT:
2781 case HCI_SCODATA_PKT:
2782 kfree_skb(skb);
2783 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785 }
2786
2787 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002788 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002790 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 hci_event_packet(hdev, skb);
2792 break;
2793
2794 case HCI_ACLDATA_PKT:
2795 BT_DBG("%s ACL data packet", hdev->name);
2796 hci_acldata_packet(hdev, skb);
2797 break;
2798
2799 case HCI_SCODATA_PKT:
2800 BT_DBG("%s SCO data packet", hdev->name);
2801 hci_scodata_packet(hdev, skb);
2802 break;
2803
2804 default:
2805 kfree_skb(skb);
2806 break;
2807 }
2808 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002809}
2810
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002811static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002813 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 struct sk_buff *skb;
2815
2816 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2817
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002819 if (atomic_read(&hdev->cmd_cnt)) {
2820 skb = skb_dequeue(&hdev->cmd_q);
2821 if (!skb)
2822 return;
2823
Wei Yongjun7585b972009-02-25 18:29:52 +08002824 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002826 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2827 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 atomic_dec(&hdev->cmd_cnt);
2829 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002830 if (test_bit(HCI_RESET, &hdev->flags))
2831 del_timer(&hdev->cmd_timer);
2832 else
2833 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002834 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 } else {
2836 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002837 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838 }
2839 }
2840}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002841
2842int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2843{
2844 /* General inquiry access code (GIAC) */
2845 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2846 struct hci_cp_inquiry cp;
2847
2848 BT_DBG("%s", hdev->name);
2849
2850 if (test_bit(HCI_INQUIRY, &hdev->flags))
2851 return -EINPROGRESS;
2852
Johan Hedberg46632622012-01-02 16:06:08 +02002853 inquiry_cache_flush(hdev);
2854
Andre Guedes2519a1f2011-11-07 11:45:24 -03002855 memset(&cp, 0, sizeof(cp));
2856 memcpy(&cp.lap, lap, sizeof(cp.lap));
2857 cp.length = length;
2858
2859 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2860}
Andre Guedes023d50492011-11-04 14:16:52 -03002861
2862int hci_cancel_inquiry(struct hci_dev *hdev)
2863{
2864 BT_DBG("%s", hdev->name);
2865
2866 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002867 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002868
2869 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2870}
Andre Guedes31f79562012-04-24 21:02:53 -03002871
2872u8 bdaddr_to_le(u8 bdaddr_type)
2873{
2874 switch (bdaddr_type) {
2875 case BDADDR_LE_PUBLIC:
2876 return ADDR_LE_DEV_PUBLIC;
2877
2878 default:
2879 /* Fallback to LE Random address type */
2880 return ADDR_LE_DEV_RANDOM;
2881 }
2882}