blob: 88858963ec21b4cf4cce14bb2288dbd0129f8a47 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Johan Hedbergab81cbf2010-12-15 13:53:18 +020036#define AUTO_OFF_TIMEOUT 2000
37
Marcel Holtmannb78752c2010-08-08 23:06:53 -040038static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020039static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020040static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
Sasha Levin3df92b32012-05-27 22:36:56 +020050/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053/* ---- HCI notifications ---- */
54
Marcel Holtmann65164552005-10-28 19:20:48 +020055static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Marcel Holtmann040030e2012-02-20 14:50:37 +010057 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
60/* ---- HCI requests ---- */
61
Johan Hedberg23bb5762010-12-21 23:01:27 +020062void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Johan Hedberg23bb5762010-12-21 23:01:27 +020064 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
Johan Hedberga5040ef2011-01-10 13:28:59 +020066 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020069 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020071 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020072 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020081 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020082 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
Johan Hedberg23bb5762010-12-21 23:01:27 +020090 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98}
99
100static void hci_req_cancel(struct hci_dev *hdev, int err)
101{
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300112static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700136 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Johan Hedberga5040ef2011-01-10 13:28:59 +0200148 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153}
154
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300155static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 int ret;
160
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170}
171
172static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173{
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300177 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200181static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200183 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800184 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200185 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Mandatory initialization */
190
191 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200200 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200221 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300225 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200233static void amp_init(struct hci_dev *hdev)
234{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200245}
246
247static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279}
280
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300281static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282{
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200319static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __le16 policy = cpu_to_le16(opt);
322
Marcel Holtmanna418b892008-11-30 12:17:28 +0100323 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327}
328
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900329/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 * Device is held on return. */
331struct hci_dev *hci_dev_get(int index)
332{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200333 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200341 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200352
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200353bool hci_discovery_active(struct hci_dev *hdev)
354{
355 struct discovery_state *discov = &hdev->discovery;
356
Andre Guedes6fbe1952012-02-03 17:47:58 -0300357 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300358 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300359 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200360 return true;
361
Andre Guedes6fbe1952012-02-03 17:47:58 -0300362 default:
363 return false;
364 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200365}
366
Johan Hedbergff9ef572012-01-04 14:23:45 +0200367void hci_discovery_set_state(struct hci_dev *hdev, int state)
368{
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 break;
379 case DISCOVERY_STARTING:
380 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300381 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382 mgmt_discovering(hdev, 1);
383 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200384 case DISCOVERY_RESOLVING:
385 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391}
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393static void inquiry_cache_flush(struct hci_dev *hdev)
394{
Johan Hedberg30883512012-01-04 14:16:21 +0200395 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200396 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Johan Hedberg561aafb2012-01-04 13:31:59 +0200398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200400 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
Johan Hedberg30883512012-01-04 14:16:21 +0200410 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200417 return e;
418 }
419
420 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300424 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425{
Johan Hedberg30883512012-01-04 14:16:21 +0200426 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
432 if (!bacmp(&e->data.bdaddr, bdaddr))
433 return e;
434 }
435
436 return NULL;
437}
438
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200439struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300440 bdaddr_t *bdaddr,
441 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200442{
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456}
457
Johan Hedberga3d4e202012-01-09 00:53:02 +0200458void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300459 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200460{
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300469 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475}
476
Johan Hedberg31754052012-01-04 13:39:52 +0200477bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300478 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Johan Hedberg30883512012-01-04 14:16:21 +0200480 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200481 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200485 if (ssp)
486 *ssp = data->ssp_mode;
487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200489 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300494 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
Johan Hedberg561aafb2012-01-04 13:31:59 +0200499 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200500 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200501
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200505 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
515
516update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300518 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
533static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534{
Johan Hedberg30883512012-01-04 14:16:21 +0200535 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
Johan Hedberg561aafb2012-01-04 13:31:59 +0200540 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200542
543 if (copied >= num)
544 break;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559}
560
561static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562{
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
578int hci_inquiry(void __user *arg)
579{
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return -ENODEV;
593
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300594 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300600 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Marcel Holtmann04837f62006-07-03 10:02:33 +0200602 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200619 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 err = -ENOMEM;
621 goto done;
622 }
623
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300624 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300633 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
Johan Hovold94324962012-03-15 14:48:41 +0100660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100681 set_bit(HCI_RAW, &hdev->flags);
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200691 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300693 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694
Andre Guedeseead27d2011-06-30 19:20:55 -0300695 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300696 ret = __hci_request(hdev, hci_le_init_req, 0,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300697 HCI_INIT_TIMEOUT);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 clear_bit(HCI_INIT, &hdev->flags);
700 }
701
702 if (!ret) {
703 hci_dev_hold(hdev);
704 set_bit(HCI_UP, &hdev->flags);
705 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200706 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300707 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200708 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300709 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200710 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900711 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200713 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200714 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400715 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
717 skb_queue_purge(&hdev->cmd_q);
718 skb_queue_purge(&hdev->rx_q);
719
720 if (hdev->flush)
721 hdev->flush(hdev);
722
723 if (hdev->sent_cmd) {
724 kfree_skb(hdev->sent_cmd);
725 hdev->sent_cmd = NULL;
726 }
727
728 hdev->close(hdev);
729 hdev->flags = 0;
730 }
731
732done:
733 hci_req_unlock(hdev);
734 hci_dev_put(hdev);
735 return ret;
736}
737
738static int hci_dev_do_close(struct hci_dev *hdev)
739{
740 BT_DBG("%s %p", hdev->name, hdev);
741
Andre Guedes28b75a82012-02-03 17:48:00 -0300742 cancel_work_sync(&hdev->le_scan);
743
Linus Torvalds1da177e2005-04-16 15:20:36 -0700744 hci_req_cancel(hdev, ENODEV);
745 hci_req_lock(hdev);
746
747 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300748 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749 hci_req_unlock(hdev);
750 return 0;
751 }
752
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200753 /* Flush RX and TX works */
754 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400755 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200757 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200758 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200759 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200760 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200761 }
762
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200763 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200764 cancel_delayed_work(&hdev->service_cache);
765
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300766 cancel_delayed_work_sync(&hdev->le_scan_disable);
767
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300768 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 inquiry_cache_flush(hdev);
770 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300771 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700772
773 hci_notify(hdev, HCI_DEV_DOWN);
774
775 if (hdev->flush)
776 hdev->flush(hdev);
777
778 /* Reset device */
779 skb_queue_purge(&hdev->cmd_q);
780 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200781 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200782 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300784 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700785 clear_bit(HCI_INIT, &hdev->flags);
786 }
787
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200788 /* flush cmd work */
789 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790
791 /* Drop queues */
792 skb_queue_purge(&hdev->rx_q);
793 skb_queue_purge(&hdev->cmd_q);
794 skb_queue_purge(&hdev->raw_q);
795
796 /* Drop last sent command */
797 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300798 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 kfree_skb(hdev->sent_cmd);
800 hdev->sent_cmd = NULL;
801 }
802
803 /* After this point our queues are empty
804 * and no tasks are scheduled. */
805 hdev->close(hdev);
806
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100807 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
808 hci_dev_lock(hdev);
809 mgmt_powered(hdev, 0);
810 hci_dev_unlock(hdev);
811 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200812
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 /* Clear flags */
814 hdev->flags = 0;
815
Johan Hedberge59fda82012-02-22 18:11:53 +0200816 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200817 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200818
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819 hci_req_unlock(hdev);
820
821 hci_dev_put(hdev);
822 return 0;
823}
824
825int hci_dev_close(__u16 dev)
826{
827 struct hci_dev *hdev;
828 int err;
829
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200830 hdev = hci_dev_get(dev);
831 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100833
834 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
835 cancel_delayed_work(&hdev->power_off);
836
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 hci_dev_put(hdev);
840 return err;
841}
842
843int hci_dev_reset(__u16 dev)
844{
845 struct hci_dev *hdev;
846 int ret = 0;
847
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200848 hdev = hci_dev_get(dev);
849 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 return -ENODEV;
851
852 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853
854 if (!test_bit(HCI_UP, &hdev->flags))
855 goto done;
856
857 /* Drop queues */
858 skb_queue_purge(&hdev->rx_q);
859 skb_queue_purge(&hdev->cmd_q);
860
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300861 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 inquiry_cache_flush(hdev);
863 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300864 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865
866 if (hdev->flush)
867 hdev->flush(hdev);
868
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900869 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300870 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300873 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700874
875done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 hci_req_unlock(hdev);
877 hci_dev_put(hdev);
878 return ret;
879}
880
881int hci_dev_reset_stat(__u16 dev)
882{
883 struct hci_dev *hdev;
884 int ret = 0;
885
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200886 hdev = hci_dev_get(dev);
887 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888 return -ENODEV;
889
890 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
891
892 hci_dev_put(hdev);
893
894 return ret;
895}
896
897int hci_dev_cmd(unsigned int cmd, void __user *arg)
898{
899 struct hci_dev *hdev;
900 struct hci_dev_req dr;
901 int err = 0;
902
903 if (copy_from_user(&dr, arg, sizeof(dr)))
904 return -EFAULT;
905
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200906 hdev = hci_dev_get(dr.dev_id);
907 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 return -ENODEV;
909
910 switch (cmd) {
911 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300913 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 break;
915
916 case HCISETENCRYPT:
917 if (!lmp_encrypt_capable(hdev)) {
918 err = -EOPNOTSUPP;
919 break;
920 }
921
922 if (!test_bit(HCI_AUTH, &hdev->flags)) {
923 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200924 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300925 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926 if (err)
927 break;
928 }
929
Marcel Holtmann04837f62006-07-03 10:02:33 +0200930 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300931 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933
934 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200935 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300936 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937 break;
938
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200939 case HCISETLINKPOL:
940 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300941 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 break;
943
944 case HCISETLINKMODE:
945 hdev->link_mode = ((__u16) dr.dev_opt) &
946 (HCI_LM_MASTER | HCI_LM_ACCEPT);
947 break;
948
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 case HCISETPTYPE:
950 hdev->pkt_type = (__u16) dr.dev_opt;
951 break;
952
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200954 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
955 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 break;
957
958 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200959 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
960 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961 break;
962
963 default:
964 err = -EINVAL;
965 break;
966 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200967
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968 hci_dev_put(hdev);
969 return err;
970}
971
972int hci_get_dev_list(void __user *arg)
973{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200974 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 struct hci_dev_list_req *dl;
976 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 int n = 0, size, err;
978 __u16 dev_num;
979
980 if (get_user(dev_num, (__u16 __user *) arg))
981 return -EFAULT;
982
983 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
984 return -EINVAL;
985
986 size = sizeof(*dl) + dev_num * sizeof(*dr);
987
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200988 dl = kzalloc(size, GFP_KERNEL);
989 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 return -ENOMEM;
991
992 dr = dl->dev_req;
993
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200994 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200995 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200996 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200997 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200998
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200999 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1000 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 (dr + n)->dev_id = hdev->id;
1003 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 if (++n >= dev_num)
1006 break;
1007 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001008 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009
1010 dl->dev_num = n;
1011 size = sizeof(*dl) + n * sizeof(*dr);
1012
1013 err = copy_to_user(arg, dl, size);
1014 kfree(dl);
1015
1016 return err ? -EFAULT : 0;
1017}
1018
1019int hci_get_dev_info(void __user *arg)
1020{
1021 struct hci_dev *hdev;
1022 struct hci_dev_info di;
1023 int err = 0;
1024
1025 if (copy_from_user(&di, arg, sizeof(di)))
1026 return -EFAULT;
1027
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001028 hdev = hci_dev_get(di.dev_id);
1029 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001030 return -ENODEV;
1031
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001032 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001033 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001034
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001035 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1036 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001037
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 strcpy(di.name, hdev->name);
1039 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001040 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 di.flags = hdev->flags;
1042 di.pkt_type = hdev->pkt_type;
1043 di.acl_mtu = hdev->acl_mtu;
1044 di.acl_pkts = hdev->acl_pkts;
1045 di.sco_mtu = hdev->sco_mtu;
1046 di.sco_pkts = hdev->sco_pkts;
1047 di.link_policy = hdev->link_policy;
1048 di.link_mode = hdev->link_mode;
1049
1050 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1051 memcpy(&di.features, &hdev->features, sizeof(di.features));
1052
1053 if (copy_to_user(arg, &di, sizeof(di)))
1054 err = -EFAULT;
1055
1056 hci_dev_put(hdev);
1057
1058 return err;
1059}
1060
1061/* ---- Interface to HCI drivers ---- */
1062
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001063static int hci_rfkill_set_block(void *data, bool blocked)
1064{
1065 struct hci_dev *hdev = data;
1066
1067 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1068
1069 if (!blocked)
1070 return 0;
1071
1072 hci_dev_do_close(hdev);
1073
1074 return 0;
1075}
1076
1077static const struct rfkill_ops hci_rfkill_ops = {
1078 .set_block = hci_rfkill_set_block,
1079};
1080
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001081static void hci_power_on(struct work_struct *work)
1082{
1083 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1084
1085 BT_DBG("%s", hdev->name);
1086
1087 if (hci_dev_open(hdev->id) < 0)
1088 return;
1089
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001090 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001091 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001092 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001093
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001094 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001095 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096}
1097
1098static void hci_power_off(struct work_struct *work)
1099{
Johan Hedberg32435532011-11-07 22:16:04 +02001100 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001101 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001102
1103 BT_DBG("%s", hdev->name);
1104
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001105 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001106}
1107
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001108static void hci_discov_off(struct work_struct *work)
1109{
1110 struct hci_dev *hdev;
1111 u8 scan = SCAN_PAGE;
1112
1113 hdev = container_of(work, struct hci_dev, discov_off.work);
1114
1115 BT_DBG("%s", hdev->name);
1116
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001117 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001118
1119 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1120
1121 hdev->discov_timeout = 0;
1122
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001123 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001124}
1125
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001126int hci_uuids_clear(struct hci_dev *hdev)
1127{
1128 struct list_head *p, *n;
1129
1130 list_for_each_safe(p, n, &hdev->uuids) {
1131 struct bt_uuid *uuid;
1132
1133 uuid = list_entry(p, struct bt_uuid, list);
1134
1135 list_del(p);
1136 kfree(uuid);
1137 }
1138
1139 return 0;
1140}
1141
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001142int hci_link_keys_clear(struct hci_dev *hdev)
1143{
1144 struct list_head *p, *n;
1145
1146 list_for_each_safe(p, n, &hdev->link_keys) {
1147 struct link_key *key;
1148
1149 key = list_entry(p, struct link_key, list);
1150
1151 list_del(p);
1152 kfree(key);
1153 }
1154
1155 return 0;
1156}
1157
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001158int hci_smp_ltks_clear(struct hci_dev *hdev)
1159{
1160 struct smp_ltk *k, *tmp;
1161
1162 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1163 list_del(&k->list);
1164 kfree(k);
1165 }
1166
1167 return 0;
1168}
1169
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001170struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1171{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001172 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001174 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001175 if (bacmp(bdaddr, &k->bdaddr) == 0)
1176 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001177
1178 return NULL;
1179}
1180
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301181static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001182 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001183{
1184 /* Legacy key */
1185 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301186 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001187
1188 /* Debug keys are insecure so don't store them persistently */
1189 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301190 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001191
1192 /* Changed combination key and there's no previous one */
1193 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301194 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001195
1196 /* Security mode 3 case */
1197 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301198 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001199
1200 /* Neither local nor remote side had no-bonding as requirement */
1201 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203
1204 /* Local side had dedicated bonding as requirement */
1205 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301206 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001207
1208 /* Remote side had dedicated bonding as requirement */
1209 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301210 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001211
1212 /* If none of the above criteria match, then don't store the key
1213 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301214 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001215}
1216
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001217struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001218{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001219 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001220
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001221 list_for_each_entry(k, &hdev->long_term_keys, list) {
1222 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001223 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001224 continue;
1225
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001226 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227 }
1228
1229 return NULL;
1230}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001231
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001232struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001233 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001237 list_for_each_entry(k, &hdev->long_term_keys, list)
1238 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001239 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240 return k;
1241
1242 return NULL;
1243}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001244
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001245int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001246 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001247{
1248 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301249 u8 old_key_type;
1250 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001251
1252 old_key = hci_find_link_key(hdev, bdaddr);
1253 if (old_key) {
1254 old_key_type = old_key->type;
1255 key = old_key;
1256 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001257 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001258 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1259 if (!key)
1260 return -ENOMEM;
1261 list_add(&key->list, &hdev->link_keys);
1262 }
1263
1264 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1265
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001266 /* Some buggy controller combinations generate a changed
1267 * combination key for legacy pairing even when there's no
1268 * previous key */
1269 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001270 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001271 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001272 if (conn)
1273 conn->key_type = type;
1274 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001275
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001276 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001277 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001278 key->pin_len = pin_len;
1279
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001280 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001281 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001282 else
1283 key->type = type;
1284
Johan Hedberg4df378a2011-04-28 11:29:03 -07001285 if (!new_key)
1286 return 0;
1287
1288 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1289
Johan Hedberg744cf192011-11-08 20:40:14 +02001290 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001291
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301292 if (conn)
1293 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001294
1295 return 0;
1296}
1297
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001298int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001299 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001300 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001301{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001302 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001303
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001304 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1305 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001306
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001307 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1308 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001309 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310 else {
1311 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001312 if (!key)
1313 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001314 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315 }
1316
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001317 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001318 key->bdaddr_type = addr_type;
1319 memcpy(key->val, tk, sizeof(key->val));
1320 key->authenticated = authenticated;
1321 key->ediv = ediv;
1322 key->enc_size = enc_size;
1323 key->type = type;
1324 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001325
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001326 if (!new_key)
1327 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001329 if (type & HCI_SMP_LTK)
1330 mgmt_new_ltk(hdev, key, 1);
1331
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001332 return 0;
1333}
1334
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001335int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336{
1337 struct link_key *key;
1338
1339 key = hci_find_link_key(hdev, bdaddr);
1340 if (!key)
1341 return -ENOENT;
1342
1343 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1344
1345 list_del(&key->list);
1346 kfree(key);
1347
1348 return 0;
1349}
1350
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001351int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352{
1353 struct smp_ltk *k, *tmp;
1354
1355 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1356 if (bacmp(bdaddr, &k->bdaddr))
1357 continue;
1358
1359 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1360
1361 list_del(&k->list);
1362 kfree(k);
1363 }
1364
1365 return 0;
1366}
1367
Ville Tervo6bd32322011-02-16 16:32:41 +02001368/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001369static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001370{
1371 struct hci_dev *hdev = (void *) arg;
1372
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001373 if (hdev->sent_cmd) {
1374 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1375 u16 opcode = __le16_to_cpu(sent->opcode);
1376
1377 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1378 } else {
1379 BT_ERR("%s command tx timeout", hdev->name);
1380 }
1381
Ville Tervo6bd32322011-02-16 16:32:41 +02001382 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001383 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001384}
1385
Szymon Janc2763eda2011-03-22 13:12:22 +01001386struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001387 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001388{
1389 struct oob_data *data;
1390
1391 list_for_each_entry(data, &hdev->remote_oob_data, list)
1392 if (bacmp(bdaddr, &data->bdaddr) == 0)
1393 return data;
1394
1395 return NULL;
1396}
1397
1398int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1399{
1400 struct oob_data *data;
1401
1402 data = hci_find_remote_oob_data(hdev, bdaddr);
1403 if (!data)
1404 return -ENOENT;
1405
1406 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1407
1408 list_del(&data->list);
1409 kfree(data);
1410
1411 return 0;
1412}
1413
1414int hci_remote_oob_data_clear(struct hci_dev *hdev)
1415{
1416 struct oob_data *data, *n;
1417
1418 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1419 list_del(&data->list);
1420 kfree(data);
1421 }
1422
1423 return 0;
1424}
1425
1426int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001427 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001428{
1429 struct oob_data *data;
1430
1431 data = hci_find_remote_oob_data(hdev, bdaddr);
1432
1433 if (!data) {
1434 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1435 if (!data)
1436 return -ENOMEM;
1437
1438 bacpy(&data->bdaddr, bdaddr);
1439 list_add(&data->list, &hdev->remote_oob_data);
1440 }
1441
1442 memcpy(data->hash, hash, sizeof(data->hash));
1443 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1444
1445 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1446
1447 return 0;
1448}
1449
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001450struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001451{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001452 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001453
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001454 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455 if (bacmp(bdaddr, &b->bdaddr) == 0)
1456 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457
1458 return NULL;
1459}
1460
1461int hci_blacklist_clear(struct hci_dev *hdev)
1462{
1463 struct list_head *p, *n;
1464
1465 list_for_each_safe(p, n, &hdev->blacklist) {
1466 struct bdaddr_list *b;
1467
1468 b = list_entry(p, struct bdaddr_list, list);
1469
1470 list_del(p);
1471 kfree(b);
1472 }
1473
1474 return 0;
1475}
1476
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001477int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001478{
1479 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480
1481 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1482 return -EBADF;
1483
Antti Julku5e762442011-08-25 16:48:02 +03001484 if (hci_blacklist_lookup(hdev, bdaddr))
1485 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
1487 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001488 if (!entry)
1489 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490
1491 bacpy(&entry->bdaddr, bdaddr);
1492
1493 list_add(&entry->list, &hdev->blacklist);
1494
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001495 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496}
1497
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001498int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499{
1500 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001501
Szymon Janc1ec918c2011-11-16 09:32:21 +01001502 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001503 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504
1505 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001506 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001507 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508
1509 list_del(&entry->list);
1510 kfree(entry);
1511
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001512 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001513}
1514
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001515static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1516{
1517 struct le_scan_params *param = (struct le_scan_params *) opt;
1518 struct hci_cp_le_set_scan_param cp;
1519
1520 memset(&cp, 0, sizeof(cp));
1521 cp.type = param->type;
1522 cp.interval = cpu_to_le16(param->interval);
1523 cp.window = cpu_to_le16(param->window);
1524
1525 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1526}
1527
1528static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1529{
1530 struct hci_cp_le_set_scan_enable cp;
1531
1532 memset(&cp, 0, sizeof(cp));
1533 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001534 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001535
1536 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1537}
1538
1539static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001540 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001541{
1542 long timeo = msecs_to_jiffies(3000);
1543 struct le_scan_params param;
1544 int err;
1545
1546 BT_DBG("%s", hdev->name);
1547
1548 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1549 return -EINPROGRESS;
1550
1551 param.type = type;
1552 param.interval = interval;
1553 param.window = window;
1554
1555 hci_req_lock(hdev);
1556
1557 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001558 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001559 if (!err)
1560 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1561
1562 hci_req_unlock(hdev);
1563
1564 if (err < 0)
1565 return err;
1566
1567 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001568 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001569
1570 return 0;
1571}
1572
Andre Guedes7dbfac12012-03-15 16:52:07 -03001573int hci_cancel_le_scan(struct hci_dev *hdev)
1574{
1575 BT_DBG("%s", hdev->name);
1576
1577 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1578 return -EALREADY;
1579
1580 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1581 struct hci_cp_le_set_scan_enable cp;
1582
1583 /* Send HCI command to disable LE Scan */
1584 memset(&cp, 0, sizeof(cp));
1585 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1586 }
1587
1588 return 0;
1589}
1590
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001591static void le_scan_disable_work(struct work_struct *work)
1592{
1593 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001594 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001595 struct hci_cp_le_set_scan_enable cp;
1596
1597 BT_DBG("%s", hdev->name);
1598
1599 memset(&cp, 0, sizeof(cp));
1600
1601 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1602}
1603
Andre Guedes28b75a82012-02-03 17:48:00 -03001604static void le_scan_work(struct work_struct *work)
1605{
1606 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1607 struct le_scan_params *param = &hdev->le_scan_params;
1608
1609 BT_DBG("%s", hdev->name);
1610
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001611 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1612 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001613}
1614
1615int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001616 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001617{
1618 struct le_scan_params *param = &hdev->le_scan_params;
1619
1620 BT_DBG("%s", hdev->name);
1621
1622 if (work_busy(&hdev->le_scan))
1623 return -EINPROGRESS;
1624
1625 param->type = type;
1626 param->interval = interval;
1627 param->window = window;
1628 param->timeout = timeout;
1629
1630 queue_work(system_long_wq, &hdev->le_scan);
1631
1632 return 0;
1633}
1634
David Herrmann9be0dab2012-04-22 14:39:57 +02001635/* Alloc HCI device */
1636struct hci_dev *hci_alloc_dev(void)
1637{
1638 struct hci_dev *hdev;
1639
1640 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1641 if (!hdev)
1642 return NULL;
1643
David Herrmannb1b813d2012-04-22 14:39:58 +02001644 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1645 hdev->esco_type = (ESCO_HV1);
1646 hdev->link_mode = (HCI_LM_ACCEPT);
1647 hdev->io_capability = 0x03; /* No Input No Output */
1648
David Herrmannb1b813d2012-04-22 14:39:58 +02001649 hdev->sniff_max_interval = 800;
1650 hdev->sniff_min_interval = 80;
1651
1652 mutex_init(&hdev->lock);
1653 mutex_init(&hdev->req_lock);
1654
1655 INIT_LIST_HEAD(&hdev->mgmt_pending);
1656 INIT_LIST_HEAD(&hdev->blacklist);
1657 INIT_LIST_HEAD(&hdev->uuids);
1658 INIT_LIST_HEAD(&hdev->link_keys);
1659 INIT_LIST_HEAD(&hdev->long_term_keys);
1660 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001661
1662 INIT_WORK(&hdev->rx_work, hci_rx_work);
1663 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1664 INIT_WORK(&hdev->tx_work, hci_tx_work);
1665 INIT_WORK(&hdev->power_on, hci_power_on);
1666 INIT_WORK(&hdev->le_scan, le_scan_work);
1667
David Herrmannb1b813d2012-04-22 14:39:58 +02001668 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1669 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1670 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1671
David Herrmann9be0dab2012-04-22 14:39:57 +02001672 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001673 skb_queue_head_init(&hdev->rx_q);
1674 skb_queue_head_init(&hdev->cmd_q);
1675 skb_queue_head_init(&hdev->raw_q);
1676
1677 init_waitqueue_head(&hdev->req_wait_q);
1678
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001679 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001680
David Herrmannb1b813d2012-04-22 14:39:58 +02001681 hci_init_sysfs(hdev);
1682 discovery_init(hdev);
1683 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001684
1685 return hdev;
1686}
1687EXPORT_SYMBOL(hci_alloc_dev);
1688
1689/* Free HCI device */
1690void hci_free_dev(struct hci_dev *hdev)
1691{
1692 skb_queue_purge(&hdev->driver_init);
1693
1694 /* will free via device release */
1695 put_device(&hdev->dev);
1696}
1697EXPORT_SYMBOL(hci_free_dev);
1698
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699/* Register HCI device */
1700int hci_register_dev(struct hci_dev *hdev)
1701{
David Herrmannb1b813d2012-04-22 14:39:58 +02001702 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
David Herrmann010666a2012-01-07 15:47:07 +01001704 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 return -EINVAL;
1706
Mat Martineau08add512011-11-02 16:18:36 -07001707 /* Do not allow HCI_AMP devices to register at index 0,
1708 * so the index can be used as the AMP controller ID.
1709 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001710 switch (hdev->dev_type) {
1711 case HCI_BREDR:
1712 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1713 break;
1714 case HCI_AMP:
1715 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1716 break;
1717 default:
1718 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001720
Sasha Levin3df92b32012-05-27 22:36:56 +02001721 if (id < 0)
1722 return id;
1723
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724 sprintf(hdev->name, "hci%d", id);
1725 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001726
1727 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1728
Sasha Levin3df92b32012-05-27 22:36:56 +02001729 write_lock(&hci_dev_list_lock);
1730 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001731 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001733 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001734 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001735 if (!hdev->workqueue) {
1736 error = -ENOMEM;
1737 goto err;
1738 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001739
David Herrmann33ca9542011-10-08 14:58:49 +02001740 error = hci_add_sysfs(hdev);
1741 if (error < 0)
1742 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001744 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001745 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1746 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001747 if (hdev->rfkill) {
1748 if (rfkill_register(hdev->rfkill) < 0) {
1749 rfkill_destroy(hdev->rfkill);
1750 hdev->rfkill = NULL;
1751 }
1752 }
1753
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001754 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1755 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001756 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001757
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001759 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
1761 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001762
David Herrmann33ca9542011-10-08 14:58:49 +02001763err_wqueue:
1764 destroy_workqueue(hdev->workqueue);
1765err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001766 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001767 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001768 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001769 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001770
David Herrmann33ca9542011-10-08 14:58:49 +02001771 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772}
1773EXPORT_SYMBOL(hci_register_dev);
1774
1775/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001776void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001777{
Sasha Levin3df92b32012-05-27 22:36:56 +02001778 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001779
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001780 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781
Johan Hovold94324962012-03-15 14:48:41 +01001782 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1783
Sasha Levin3df92b32012-05-27 22:36:56 +02001784 id = hdev->id;
1785
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001786 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001787 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001788 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
1790 hci_dev_do_close(hdev);
1791
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301792 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001793 kfree_skb(hdev->reassembly[i]);
1794
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001795 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001796 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001797 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001798 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001799 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001800 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001801
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001802 /* mgmt_index_removed should take care of emptying the
1803 * pending list */
1804 BUG_ON(!list_empty(&hdev->mgmt_pending));
1805
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806 hci_notify(hdev, HCI_DEV_UNREG);
1807
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001808 if (hdev->rfkill) {
1809 rfkill_unregister(hdev->rfkill);
1810 rfkill_destroy(hdev->rfkill);
1811 }
1812
David Herrmannce242972011-10-08 14:58:48 +02001813 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001814
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001815 destroy_workqueue(hdev->workqueue);
1816
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001817 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001818 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001819 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001820 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001821 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001822 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001823 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001824
David Herrmanndc946bd2012-01-07 15:47:24 +01001825 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001826
1827 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828}
1829EXPORT_SYMBOL(hci_unregister_dev);
1830
1831/* Suspend HCI device */
1832int hci_suspend_dev(struct hci_dev *hdev)
1833{
1834 hci_notify(hdev, HCI_DEV_SUSPEND);
1835 return 0;
1836}
1837EXPORT_SYMBOL(hci_suspend_dev);
1838
1839/* Resume HCI device */
1840int hci_resume_dev(struct hci_dev *hdev)
1841{
1842 hci_notify(hdev, HCI_DEV_RESUME);
1843 return 0;
1844}
1845EXPORT_SYMBOL(hci_resume_dev);
1846
Marcel Holtmann76bca882009-11-18 00:40:39 +01001847/* Receive frame from HCI drivers */
1848int hci_recv_frame(struct sk_buff *skb)
1849{
1850 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1851 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001852 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001853 kfree_skb(skb);
1854 return -ENXIO;
1855 }
1856
1857 /* Incomming skb */
1858 bt_cb(skb)->incoming = 1;
1859
1860 /* Time stamp */
1861 __net_timestamp(skb);
1862
Marcel Holtmann76bca882009-11-18 00:40:39 +01001863 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001864 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001865
Marcel Holtmann76bca882009-11-18 00:40:39 +01001866 return 0;
1867}
1868EXPORT_SYMBOL(hci_recv_frame);
1869
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301870static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001871 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301872{
1873 int len = 0;
1874 int hlen = 0;
1875 int remain = count;
1876 struct sk_buff *skb;
1877 struct bt_skb_cb *scb;
1878
1879 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001880 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301881 return -EILSEQ;
1882
1883 skb = hdev->reassembly[index];
1884
1885 if (!skb) {
1886 switch (type) {
1887 case HCI_ACLDATA_PKT:
1888 len = HCI_MAX_FRAME_SIZE;
1889 hlen = HCI_ACL_HDR_SIZE;
1890 break;
1891 case HCI_EVENT_PKT:
1892 len = HCI_MAX_EVENT_SIZE;
1893 hlen = HCI_EVENT_HDR_SIZE;
1894 break;
1895 case HCI_SCODATA_PKT:
1896 len = HCI_MAX_SCO_SIZE;
1897 hlen = HCI_SCO_HDR_SIZE;
1898 break;
1899 }
1900
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001901 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301902 if (!skb)
1903 return -ENOMEM;
1904
1905 scb = (void *) skb->cb;
1906 scb->expect = hlen;
1907 scb->pkt_type = type;
1908
1909 skb->dev = (void *) hdev;
1910 hdev->reassembly[index] = skb;
1911 }
1912
1913 while (count) {
1914 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001915 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301916
1917 memcpy(skb_put(skb, len), data, len);
1918
1919 count -= len;
1920 data += len;
1921 scb->expect -= len;
1922 remain = count;
1923
1924 switch (type) {
1925 case HCI_EVENT_PKT:
1926 if (skb->len == HCI_EVENT_HDR_SIZE) {
1927 struct hci_event_hdr *h = hci_event_hdr(skb);
1928 scb->expect = h->plen;
1929
1930 if (skb_tailroom(skb) < scb->expect) {
1931 kfree_skb(skb);
1932 hdev->reassembly[index] = NULL;
1933 return -ENOMEM;
1934 }
1935 }
1936 break;
1937
1938 case HCI_ACLDATA_PKT:
1939 if (skb->len == HCI_ACL_HDR_SIZE) {
1940 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1941 scb->expect = __le16_to_cpu(h->dlen);
1942
1943 if (skb_tailroom(skb) < scb->expect) {
1944 kfree_skb(skb);
1945 hdev->reassembly[index] = NULL;
1946 return -ENOMEM;
1947 }
1948 }
1949 break;
1950
1951 case HCI_SCODATA_PKT:
1952 if (skb->len == HCI_SCO_HDR_SIZE) {
1953 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1954 scb->expect = h->dlen;
1955
1956 if (skb_tailroom(skb) < scb->expect) {
1957 kfree_skb(skb);
1958 hdev->reassembly[index] = NULL;
1959 return -ENOMEM;
1960 }
1961 }
1962 break;
1963 }
1964
1965 if (scb->expect == 0) {
1966 /* Complete frame */
1967
1968 bt_cb(skb)->pkt_type = type;
1969 hci_recv_frame(skb);
1970
1971 hdev->reassembly[index] = NULL;
1972 return remain;
1973 }
1974 }
1975
1976 return remain;
1977}
1978
Marcel Holtmannef222012007-07-11 06:42:04 +02001979int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1980{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301981 int rem = 0;
1982
Marcel Holtmannef222012007-07-11 06:42:04 +02001983 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1984 return -EILSEQ;
1985
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001986 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001987 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301988 if (rem < 0)
1989 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001990
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301991 data += (count - rem);
1992 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001993 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001994
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301995 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001996}
1997EXPORT_SYMBOL(hci_recv_fragment);
1998
Suraj Sumangala99811512010-07-14 13:02:19 +05301999#define STREAM_REASSEMBLY 0
2000
2001int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2002{
2003 int type;
2004 int rem = 0;
2005
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002006 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302007 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2008
2009 if (!skb) {
2010 struct { char type; } *pkt;
2011
2012 /* Start of the frame */
2013 pkt = data;
2014 type = pkt->type;
2015
2016 data++;
2017 count--;
2018 } else
2019 type = bt_cb(skb)->pkt_type;
2020
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002021 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002022 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302023 if (rem < 0)
2024 return rem;
2025
2026 data += (count - rem);
2027 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002028 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302029
2030 return rem;
2031}
2032EXPORT_SYMBOL(hci_recv_stream_fragment);
2033
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034/* ---- Interface to upper protocols ---- */
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036int hci_register_cb(struct hci_cb *cb)
2037{
2038 BT_DBG("%p name %s", cb, cb->name);
2039
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002040 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002042 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043
2044 return 0;
2045}
2046EXPORT_SYMBOL(hci_register_cb);
2047
2048int hci_unregister_cb(struct hci_cb *cb)
2049{
2050 BT_DBG("%p name %s", cb, cb->name);
2051
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002052 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002054 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
2056 return 0;
2057}
2058EXPORT_SYMBOL(hci_unregister_cb);
2059
2060static int hci_send_frame(struct sk_buff *skb)
2061{
2062 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2063
2064 if (!hdev) {
2065 kfree_skb(skb);
2066 return -ENODEV;
2067 }
2068
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002069 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002071 /* Time stamp */
2072 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002074 /* Send copy to monitor */
2075 hci_send_to_monitor(hdev, skb);
2076
2077 if (atomic_read(&hdev->promisc)) {
2078 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002079 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080 }
2081
2082 /* Get rid of skb owner, prior to sending to the driver. */
2083 skb_orphan(skb);
2084
2085 return hdev->send(skb);
2086}
2087
2088/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002089int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 int len = HCI_COMMAND_HDR_SIZE + plen;
2092 struct hci_command_hdr *hdr;
2093 struct sk_buff *skb;
2094
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002095 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096
2097 skb = bt_skb_alloc(len, GFP_ATOMIC);
2098 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002099 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 return -ENOMEM;
2101 }
2102
2103 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002104 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002105 hdr->plen = plen;
2106
2107 if (plen)
2108 memcpy(skb_put(skb, plen), param, plen);
2109
2110 BT_DBG("skb len %d", skb->len);
2111
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002112 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002114
Johan Hedberga5040ef2011-01-10 13:28:59 +02002115 if (test_bit(HCI_INIT, &hdev->flags))
2116 hdev->init_last_cmd = opcode;
2117
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002119 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121 return 0;
2122}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002125void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126{
2127 struct hci_command_hdr *hdr;
2128
2129 if (!hdev->sent_cmd)
2130 return NULL;
2131
2132 hdr = (void *) hdev->sent_cmd->data;
2133
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002134 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135 return NULL;
2136
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002137 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138
2139 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2140}
2141
2142/* Send ACL data */
2143static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2144{
2145 struct hci_acl_hdr *hdr;
2146 int len = skb->len;
2147
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002148 skb_push(skb, HCI_ACL_HDR_SIZE);
2149 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002150 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002151 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2152 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002153}
2154
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002155static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002156 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157{
2158 struct hci_dev *hdev = conn->hdev;
2159 struct sk_buff *list;
2160
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002161 skb->len = skb_headlen(skb);
2162 skb->data_len = 0;
2163
2164 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2165 hci_add_acl_hdr(skb, conn->handle, flags);
2166
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002167 list = skb_shinfo(skb)->frag_list;
2168 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 /* Non fragmented */
2170 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002172 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 } else {
2174 /* Fragmented */
2175 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176
2177 skb_shinfo(skb)->frag_list = NULL;
2178
2179 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002180 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002182 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002183
2184 flags &= ~ACL_START;
2185 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 do {
2187 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002191 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002195 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 } while (list);
2197
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002198 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002200}
2201
2202void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203{
2204 struct hci_conn *conn = chan->conn;
2205 struct hci_dev *hdev = conn->hdev;
2206
2207 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2208
2209 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002210
2211 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002213 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215
2216/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002217void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218{
2219 struct hci_dev *hdev = conn->hdev;
2220 struct hci_sco_hdr hdr;
2221
2222 BT_DBG("%s len %d", hdev->name, skb->len);
2223
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002224 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225 hdr.dlen = skb->len;
2226
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002227 skb_push(skb, HCI_SCO_HDR_SIZE);
2228 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002229 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230
2231 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002232 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002233
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002235 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237
2238/* ---- HCI TX task (outgoing data) ---- */
2239
2240/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002241static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2242 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243{
2244 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002245 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002246 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002247
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002248 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002250
2251 rcu_read_lock();
2252
2253 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002254 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002256
2257 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2258 continue;
2259
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260 num++;
2261
2262 if (c->sent < min) {
2263 min = c->sent;
2264 conn = c;
2265 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002266
2267 if (hci_conn_num(hdev, type) == num)
2268 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269 }
2270
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002271 rcu_read_unlock();
2272
Linus Torvalds1da177e2005-04-16 15:20:36 -07002273 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002274 int cnt, q;
2275
2276 switch (conn->type) {
2277 case ACL_LINK:
2278 cnt = hdev->acl_cnt;
2279 break;
2280 case SCO_LINK:
2281 case ESCO_LINK:
2282 cnt = hdev->sco_cnt;
2283 break;
2284 case LE_LINK:
2285 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2286 break;
2287 default:
2288 cnt = 0;
2289 BT_ERR("Unknown link type");
2290 }
2291
2292 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002293 *quote = q ? q : 1;
2294 } else
2295 *quote = 0;
2296
2297 BT_DBG("conn %p quote %d", conn, *quote);
2298 return conn;
2299}
2300
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002301static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302{
2303 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002304 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305
Ville Tervobae1f5d92011-02-10 22:38:53 -03002306 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002308 rcu_read_lock();
2309
Linus Torvalds1da177e2005-04-16 15:20:36 -07002310 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002311 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002312 if (c->type == type && c->sent) {
2313 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002314 hdev->name, batostr(&c->dst));
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002315 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002316 }
2317 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002318
2319 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320}
2321
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002322static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2323 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002324{
2325 struct hci_conn_hash *h = &hdev->conn_hash;
2326 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002327 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002328 struct hci_conn *conn;
2329 int cnt, q, conn_num = 0;
2330
2331 BT_DBG("%s", hdev->name);
2332
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002333 rcu_read_lock();
2334
2335 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002336 struct hci_chan *tmp;
2337
2338 if (conn->type != type)
2339 continue;
2340
2341 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2342 continue;
2343
2344 conn_num++;
2345
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002346 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002347 struct sk_buff *skb;
2348
2349 if (skb_queue_empty(&tmp->data_q))
2350 continue;
2351
2352 skb = skb_peek(&tmp->data_q);
2353 if (skb->priority < cur_prio)
2354 continue;
2355
2356 if (skb->priority > cur_prio) {
2357 num = 0;
2358 min = ~0;
2359 cur_prio = skb->priority;
2360 }
2361
2362 num++;
2363
2364 if (conn->sent < min) {
2365 min = conn->sent;
2366 chan = tmp;
2367 }
2368 }
2369
2370 if (hci_conn_num(hdev, type) == conn_num)
2371 break;
2372 }
2373
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002374 rcu_read_unlock();
2375
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002376 if (!chan)
2377 return NULL;
2378
2379 switch (chan->conn->type) {
2380 case ACL_LINK:
2381 cnt = hdev->acl_cnt;
2382 break;
2383 case SCO_LINK:
2384 case ESCO_LINK:
2385 cnt = hdev->sco_cnt;
2386 break;
2387 case LE_LINK:
2388 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2389 break;
2390 default:
2391 cnt = 0;
2392 BT_ERR("Unknown link type");
2393 }
2394
2395 q = cnt / num;
2396 *quote = q ? q : 1;
2397 BT_DBG("chan %p quote %d", chan, *quote);
2398 return chan;
2399}
2400
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002401static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2402{
2403 struct hci_conn_hash *h = &hdev->conn_hash;
2404 struct hci_conn *conn;
2405 int num = 0;
2406
2407 BT_DBG("%s", hdev->name);
2408
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002409 rcu_read_lock();
2410
2411 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002412 struct hci_chan *chan;
2413
2414 if (conn->type != type)
2415 continue;
2416
2417 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2418 continue;
2419
2420 num++;
2421
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002422 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002423 struct sk_buff *skb;
2424
2425 if (chan->sent) {
2426 chan->sent = 0;
2427 continue;
2428 }
2429
2430 if (skb_queue_empty(&chan->data_q))
2431 continue;
2432
2433 skb = skb_peek(&chan->data_q);
2434 if (skb->priority >= HCI_PRIO_MAX - 1)
2435 continue;
2436
2437 skb->priority = HCI_PRIO_MAX - 1;
2438
2439 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002440 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002441 }
2442
2443 if (hci_conn_num(hdev, type) == num)
2444 break;
2445 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002446
2447 rcu_read_unlock();
2448
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002449}
2450
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002451static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2452{
2453 /* Calculate count of blocks used by this packet */
2454 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2455}
2456
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002457static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002458{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 if (!test_bit(HCI_RAW, &hdev->flags)) {
2460 /* ACL tx timeout must be longer than maximum
2461 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002462 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002463 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002464 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002465 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002466}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002468static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002469{
2470 unsigned int cnt = hdev->acl_cnt;
2471 struct hci_chan *chan;
2472 struct sk_buff *skb;
2473 int quote;
2474
2475 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002476
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002477 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002478 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002479 u32 priority = (skb_peek(&chan->data_q))->priority;
2480 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002481 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002482 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002483
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002484 /* Stop if priority has changed */
2485 if (skb->priority < priority)
2486 break;
2487
2488 skb = skb_dequeue(&chan->data_q);
2489
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002490 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002491 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002492
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 hci_send_frame(skb);
2494 hdev->acl_last_tx = jiffies;
2495
2496 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002497 chan->sent++;
2498 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499 }
2500 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002501
2502 if (cnt != hdev->acl_cnt)
2503 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002504}
2505
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002506static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002507{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509 struct hci_chan *chan;
2510 struct sk_buff *skb;
2511 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002512
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002513 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002514
2515 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002516 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002517 u32 priority = (skb_peek(&chan->data_q))->priority;
2518 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2519 int blocks;
2520
2521 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002522 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002523
2524 /* Stop if priority has changed */
2525 if (skb->priority < priority)
2526 break;
2527
2528 skb = skb_dequeue(&chan->data_q);
2529
2530 blocks = __get_blocks(hdev, skb);
2531 if (blocks > hdev->block_cnt)
2532 return;
2533
2534 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002535 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002536
2537 hci_send_frame(skb);
2538 hdev->acl_last_tx = jiffies;
2539
2540 hdev->block_cnt -= blocks;
2541 quote -= blocks;
2542
2543 chan->sent += blocks;
2544 chan->conn->sent += blocks;
2545 }
2546 }
2547
2548 if (cnt != hdev->block_cnt)
2549 hci_prio_recalculate(hdev, ACL_LINK);
2550}
2551
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002552static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002553{
2554 BT_DBG("%s", hdev->name);
2555
2556 if (!hci_conn_num(hdev, ACL_LINK))
2557 return;
2558
2559 switch (hdev->flow_ctl_mode) {
2560 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2561 hci_sched_acl_pkt(hdev);
2562 break;
2563
2564 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2565 hci_sched_acl_blk(hdev);
2566 break;
2567 }
2568}
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002571static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002572{
2573 struct hci_conn *conn;
2574 struct sk_buff *skb;
2575 int quote;
2576
2577 BT_DBG("%s", hdev->name);
2578
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002579 if (!hci_conn_num(hdev, SCO_LINK))
2580 return;
2581
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2583 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2584 BT_DBG("skb %p len %d", skb, skb->len);
2585 hci_send_frame(skb);
2586
2587 conn->sent++;
2588 if (conn->sent == ~0)
2589 conn->sent = 0;
2590 }
2591 }
2592}
2593
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002594static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002595{
2596 struct hci_conn *conn;
2597 struct sk_buff *skb;
2598 int quote;
2599
2600 BT_DBG("%s", hdev->name);
2601
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002602 if (!hci_conn_num(hdev, ESCO_LINK))
2603 return;
2604
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002605 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2606 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002607 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2608 BT_DBG("skb %p len %d", skb, skb->len);
2609 hci_send_frame(skb);
2610
2611 conn->sent++;
2612 if (conn->sent == ~0)
2613 conn->sent = 0;
2614 }
2615 }
2616}
2617
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002618static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002619{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002620 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002621 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002622 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002623
2624 BT_DBG("%s", hdev->name);
2625
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002626 if (!hci_conn_num(hdev, LE_LINK))
2627 return;
2628
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002629 if (!test_bit(HCI_RAW, &hdev->flags)) {
2630 /* LE tx timeout must be longer than maximum
2631 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002632 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002633 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002634 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002635 }
2636
2637 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002638 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002639 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002640 u32 priority = (skb_peek(&chan->data_q))->priority;
2641 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002642 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002643 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002644
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002645 /* Stop if priority has changed */
2646 if (skb->priority < priority)
2647 break;
2648
2649 skb = skb_dequeue(&chan->data_q);
2650
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002651 hci_send_frame(skb);
2652 hdev->le_last_tx = jiffies;
2653
2654 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002655 chan->sent++;
2656 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657 }
2658 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002659
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002660 if (hdev->le_pkts)
2661 hdev->le_cnt = cnt;
2662 else
2663 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002664
2665 if (cnt != tmp)
2666 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002667}
2668
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002669static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002671 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672 struct sk_buff *skb;
2673
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002674 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002675 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676
2677 /* Schedule queues and send stuff to HCI driver */
2678
2679 hci_sched_acl(hdev);
2680
2681 hci_sched_sco(hdev);
2682
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002683 hci_sched_esco(hdev);
2684
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002685 hci_sched_le(hdev);
2686
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 /* Send next queued raw (unknown type) packet */
2688 while ((skb = skb_dequeue(&hdev->raw_q)))
2689 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690}
2691
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002692/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693
2694/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002695static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696{
2697 struct hci_acl_hdr *hdr = (void *) skb->data;
2698 struct hci_conn *conn;
2699 __u16 handle, flags;
2700
2701 skb_pull(skb, HCI_ACL_HDR_SIZE);
2702
2703 handle = __le16_to_cpu(hdr->handle);
2704 flags = hci_flags(handle);
2705 handle = hci_handle(handle);
2706
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002707 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2708 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709
2710 hdev->stat.acl_rx++;
2711
2712 hci_dev_lock(hdev);
2713 conn = hci_conn_hash_lookup_handle(hdev, handle);
2714 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002715
Linus Torvalds1da177e2005-04-16 15:20:36 -07002716 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002717 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002718
Johan Hedberg671267b2012-05-12 16:11:50 -03002719 hci_dev_lock(hdev);
2720 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2721 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2722 mgmt_device_connected(hdev, &conn->dst, conn->type,
2723 conn->dst_type, 0, NULL, 0,
2724 conn->dev_class);
2725 hci_dev_unlock(hdev);
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002728 l2cap_recv_acldata(conn, skb, flags);
2729 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002731 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002732 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 }
2734
2735 kfree_skb(skb);
2736}
2737
2738/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002739static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002740{
2741 struct hci_sco_hdr *hdr = (void *) skb->data;
2742 struct hci_conn *conn;
2743 __u16 handle;
2744
2745 skb_pull(skb, HCI_SCO_HDR_SIZE);
2746
2747 handle = __le16_to_cpu(hdr->handle);
2748
2749 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2750
2751 hdev->stat.sco_rx++;
2752
2753 hci_dev_lock(hdev);
2754 conn = hci_conn_hash_lookup_handle(hdev, handle);
2755 hci_dev_unlock(hdev);
2756
2757 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002759 sco_recv_scodata(conn, skb);
2760 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002762 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002763 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 }
2765
2766 kfree_skb(skb);
2767}
2768
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002769static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002771 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002772 struct sk_buff *skb;
2773
2774 BT_DBG("%s", hdev->name);
2775
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002777 /* Send copy to monitor */
2778 hci_send_to_monitor(hdev, skb);
2779
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 if (atomic_read(&hdev->promisc)) {
2781 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002782 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783 }
2784
2785 if (test_bit(HCI_RAW, &hdev->flags)) {
2786 kfree_skb(skb);
2787 continue;
2788 }
2789
2790 if (test_bit(HCI_INIT, &hdev->flags)) {
2791 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002792 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 case HCI_ACLDATA_PKT:
2794 case HCI_SCODATA_PKT:
2795 kfree_skb(skb);
2796 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002797 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799
2800 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002801 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002803 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002804 hci_event_packet(hdev, skb);
2805 break;
2806
2807 case HCI_ACLDATA_PKT:
2808 BT_DBG("%s ACL data packet", hdev->name);
2809 hci_acldata_packet(hdev, skb);
2810 break;
2811
2812 case HCI_SCODATA_PKT:
2813 BT_DBG("%s SCO data packet", hdev->name);
2814 hci_scodata_packet(hdev, skb);
2815 break;
2816
2817 default:
2818 kfree_skb(skb);
2819 break;
2820 }
2821 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822}
2823
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002824static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002826 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827 struct sk_buff *skb;
2828
2829 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2830
Linus Torvalds1da177e2005-04-16 15:20:36 -07002831 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002832 if (atomic_read(&hdev->cmd_cnt)) {
2833 skb = skb_dequeue(&hdev->cmd_q);
2834 if (!skb)
2835 return;
2836
Wei Yongjun7585b972009-02-25 18:29:52 +08002837 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002838
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002839 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2840 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 atomic_dec(&hdev->cmd_cnt);
2842 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002843 if (test_bit(HCI_RESET, &hdev->flags))
2844 del_timer(&hdev->cmd_timer);
2845 else
2846 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002847 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 } else {
2849 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002850 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 }
2852 }
2853}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002854
2855int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2856{
2857 /* General inquiry access code (GIAC) */
2858 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2859 struct hci_cp_inquiry cp;
2860
2861 BT_DBG("%s", hdev->name);
2862
2863 if (test_bit(HCI_INQUIRY, &hdev->flags))
2864 return -EINPROGRESS;
2865
Johan Hedberg46632622012-01-02 16:06:08 +02002866 inquiry_cache_flush(hdev);
2867
Andre Guedes2519a1f2011-11-07 11:45:24 -03002868 memset(&cp, 0, sizeof(cp));
2869 memcpy(&cp.lap, lap, sizeof(cp.lap));
2870 cp.length = length;
2871
2872 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2873}
Andre Guedes023d50492011-11-04 14:16:52 -03002874
2875int hci_cancel_inquiry(struct hci_dev *hdev)
2876{
2877 BT_DBG("%s", hdev->name);
2878
2879 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002880 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002881
2882 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2883}
Andre Guedes31f79562012-04-24 21:02:53 -03002884
2885u8 bdaddr_to_le(u8 bdaddr_type)
2886{
2887 switch (bdaddr_type) {
2888 case BDADDR_LE_PUBLIC:
2889 return ADDR_LE_DEV_PUBLIC;
2890
2891 default:
2892 /* Fallback to LE Random address type */
2893 return ADDR_LE_DEV_RANDOM;
2894 }
2895}