blob: 8a0ce706aebd624ae7fd1c50b9670780dc4f6761 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200181 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800182 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200183 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 /* Mandatory initialization */
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200192 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200213 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300217 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200225static void amp_init(struct hci_dev *hdev)
226{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300234
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
260
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200274}
275
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277{
278 BT_DBG("%s", hdev->name);
279
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282}
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 scan = opt;
287
288 BT_DBG("%s %x", hdev->name, scan);
289
290 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 auth = opt;
297
298 BT_DBG("%s %x", hdev->name, auth);
299
300 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
304static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 encrypt = opt;
307
308 BT_DBG("%s %x", hdev->name, encrypt);
309
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200314static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __le16 policy = cpu_to_le16(opt);
317
Marcel Holtmanna418b892008-11-30 12:17:28 +0100318 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200319
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322}
323
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900324/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 * Device is held on return. */
326struct hci_dev *hci_dev_get(int index)
327{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200328 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 BT_DBG("%d", index);
331
332 if (index < 0)
333 return NULL;
334
335 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200336 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
339 break;
340 }
341 }
342 read_unlock(&hci_dev_list_lock);
343 return hdev;
344}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200347
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200348bool hci_discovery_active(struct hci_dev *hdev)
349{
350 struct discovery_state *discov = &hdev->discovery;
351
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300353 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300354 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200355 return true;
356
Andre Guedes6fbe1952012-02-03 17:47:58 -0300357 default:
358 return false;
359 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200360}
361
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362void hci_discovery_set_state(struct hci_dev *hdev, int state)
363{
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366 if (hdev->discovery.state == state)
367 return;
368
369 switch (state) {
370 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200373 break;
374 case DISCOVERY_STARTING:
375 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300376 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377 mgmt_discovering(hdev, 1);
378 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200379 case DISCOVERY_RESOLVING:
380 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200381 case DISCOVERY_STOPPING:
382 break;
383 }
384
385 hdev->discovery.state = state;
386}
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388static void inquiry_cache_flush(struct hci_dev *hdev)
389{
Johan Hedberg30883512012-01-04 14:16:21 +0200390 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200391 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Johan Hedberg561aafb2012-01-04 13:31:59 +0200393 list_for_each_entry_safe(p, n, &cache->all, all) {
394 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200395 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200397
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Johan Hedberg30883512012-01-04 14:16:21 +0200405 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 struct inquiry_entry *e;
407
408 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
409
Johan Hedberg561aafb2012-01-04 13:31:59 +0200410 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200412 return e;
413 }
414
415 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
Johan Hedberg561aafb2012-01-04 13:31:59 +0200418struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300419 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200420{
Johan Hedberg30883512012-01-04 14:16:21 +0200421 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200422 struct inquiry_entry *e;
423
424 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
425
426 list_for_each_entry(e, &cache->unknown, list) {
427 if (!bacmp(&e->data.bdaddr, bdaddr))
428 return e;
429 }
430
431 return NULL;
432}
433
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200434struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300435 bdaddr_t *bdaddr,
436 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200437{
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
440
441 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
442
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445 return e;
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
448 }
449
450 return NULL;
451}
452
Johan Hedberga3d4e202012-01-09 00:53:02 +0200453void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300454 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200455{
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
459
460 list_del(&ie->list);
461
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300464 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200465 break;
466 pos = &p->list;
467 }
468
469 list_add(&ie->list, pos);
470}
471
Johan Hedberg31754052012-01-04 13:39:52 +0200472bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300473 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Johan Hedberg30883512012-01-04 14:16:21 +0200475 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200476 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
478 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
479
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200480 if (ssp)
481 *ssp = data->ssp_mode;
482
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200484 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200485 if (ie->data.ssp_mode && ssp)
486 *ssp = true;
487
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300489 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
492 }
493
Johan Hedberg561aafb2012-01-04 13:31:59 +0200494 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200495 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200496
Johan Hedberg561aafb2012-01-04 13:31:59 +0200497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200500 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200501
502 list_add(&ie->all, &cache->all);
503
504 if (name_known) {
505 ie->name_state = NAME_KNOWN;
506 } else {
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
509 }
510
511update:
512 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300513 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 ie->name_state = NAME_KNOWN;
515 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200521
522 if (ie->name_state == NAME_NOT_KNOWN)
523 return false;
524
525 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
528static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529{
Johan Hedberg30883512012-01-04 14:16:21 +0200530 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
533 int copied = 0;
534
Johan Hedberg561aafb2012-01-04 13:31:59 +0200535 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200537
538 if (copied >= num)
539 break;
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200549 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551
552 BT_DBG("cache %p, copied %d", cache, copied);
553 return copied;
554}
555
556static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557{
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
560
561 BT_DBG("%s", hdev->name);
562
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
564 return;
565
566 /* Start Inquiry */
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
573int hci_inquiry(void __user *arg)
574{
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
579 long timeo;
580 __u8 *buf;
581
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
583 return -EFAULT;
584
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200585 hdev = hci_dev_get(ir.dev_id);
586 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return -ENODEV;
588
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300589 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 inquiry_cache_flush(hdev);
593 do_inquiry = 1;
594 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Marcel Holtmann04837f62006-07-03 10:02:33 +0200597 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200598
599 if (do_inquiry) {
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601 if (err < 0)
602 goto done;
603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300605 /* for unlimited number of responses we will use buffer with
606 * 255 entries
607 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = -ENOMEM;
616 goto done;
617 }
618
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300621 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300628 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900630 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200647 hdev = hci_dev_get(dev);
648 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
Johan Hovold94324962012-03-15 14:48:41 +0100655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656 ret = -ENODEV;
657 goto done;
658 }
659
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100676 set_bit(HCI_RAW, &hdev->flags);
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200686 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Andre Guedeseead27d2011-06-30 19:20:55 -0300690 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300691 ret = __hci_request(hdev, hci_le_init_req, 0,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300692 HCI_INIT_TIMEOUT);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 clear_bit(HCI_INIT, &hdev->flags);
695 }
696
697 if (!ret) {
698 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300703 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200704 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300705 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200706 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900707 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200709 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200710 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400711 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
Andre Guedes28b75a82012-02-03 17:48:00 -0300738 cancel_work_sync(&hdev->le_scan);
739
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300740 cancel_delayed_work(&hdev->power_off);
741
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 hci_req_cancel(hdev, ENODEV);
743 hci_req_lock(hdev);
744
745 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300746 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700747 hci_req_unlock(hdev);
748 return 0;
749 }
750
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200751 /* Flush RX and TX works */
752 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400753 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700754
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200755 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200756 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200757 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200758 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200759 }
760
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200761 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200762 cancel_delayed_work(&hdev->service_cache);
763
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300764 cancel_delayed_work_sync(&hdev->le_scan_disable);
765
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300766 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 inquiry_cache_flush(hdev);
768 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300769 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770
771 hci_notify(hdev, HCI_DEV_DOWN);
772
773 if (hdev->flush)
774 hdev->flush(hdev);
775
776 /* Reset device */
777 skb_queue_purge(&hdev->cmd_q);
778 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200779 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200780 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300782 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 clear_bit(HCI_INIT, &hdev->flags);
784 }
785
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200786 /* flush cmd work */
787 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788
789 /* Drop queues */
790 skb_queue_purge(&hdev->rx_q);
791 skb_queue_purge(&hdev->cmd_q);
792 skb_queue_purge(&hdev->raw_q);
793
794 /* Drop last sent command */
795 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300796 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 kfree_skb(hdev->sent_cmd);
798 hdev->sent_cmd = NULL;
799 }
800
801 /* After this point our queues are empty
802 * and no tasks are scheduled. */
803 hdev->close(hdev);
804
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300805 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
806 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100807 hci_dev_lock(hdev);
808 mgmt_powered(hdev, 0);
809 hci_dev_unlock(hdev);
810 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200811
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 /* Clear flags */
813 hdev->flags = 0;
814
Johan Hedberge59fda82012-02-22 18:11:53 +0200815 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200816 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200817
Linus Torvalds1da177e2005-04-16 15:20:36 -0700818 hci_req_unlock(hdev);
819
820 hci_dev_put(hdev);
821 return 0;
822}
823
824int hci_dev_close(__u16 dev)
825{
826 struct hci_dev *hdev;
827 int err;
828
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200829 hdev = hci_dev_get(dev);
830 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100832
833 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
834 cancel_delayed_work(&hdev->power_off);
835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100837
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 hci_dev_put(hdev);
839 return err;
840}
841
842int hci_dev_reset(__u16 dev)
843{
844 struct hci_dev *hdev;
845 int ret = 0;
846
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200847 hdev = hci_dev_get(dev);
848 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 return -ENODEV;
850
851 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852
853 if (!test_bit(HCI_UP, &hdev->flags))
854 goto done;
855
856 /* Drop queues */
857 skb_queue_purge(&hdev->rx_q);
858 skb_queue_purge(&hdev->cmd_q);
859
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300860 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 inquiry_cache_flush(hdev);
862 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300863 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864
865 if (hdev->flush)
866 hdev->flush(hdev);
867
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900868 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300869 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870
871 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300872 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700875 hci_req_unlock(hdev);
876 hci_dev_put(hdev);
877 return ret;
878}
879
880int hci_dev_reset_stat(__u16 dev)
881{
882 struct hci_dev *hdev;
883 int ret = 0;
884
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200885 hdev = hci_dev_get(dev);
886 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 return -ENODEV;
888
889 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
890
891 hci_dev_put(hdev);
892
893 return ret;
894}
895
896int hci_dev_cmd(unsigned int cmd, void __user *arg)
897{
898 struct hci_dev *hdev;
899 struct hci_dev_req dr;
900 int err = 0;
901
902 if (copy_from_user(&dr, arg, sizeof(dr)))
903 return -EFAULT;
904
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200905 hdev = hci_dev_get(dr.dev_id);
906 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 return -ENODEV;
908
909 switch (cmd) {
910 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200911 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300912 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 case HCISETENCRYPT:
916 if (!lmp_encrypt_capable(hdev)) {
917 err = -EOPNOTSUPP;
918 break;
919 }
920
921 if (!test_bit(HCI_AUTH, &hdev->flags)) {
922 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200923 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300924 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 if (err)
926 break;
927 }
928
Marcel Holtmann04837f62006-07-03 10:02:33 +0200929 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300930 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 break;
932
933 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200934 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300935 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700936 break;
937
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200938 case HCISETLINKPOL:
939 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300940 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200941 break;
942
943 case HCISETLINKMODE:
944 hdev->link_mode = ((__u16) dr.dev_opt) &
945 (HCI_LM_MASTER | HCI_LM_ACCEPT);
946 break;
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 case HCISETPTYPE:
949 hdev->pkt_type = (__u16) dr.dev_opt;
950 break;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200953 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
954 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 break;
956
957 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200958 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
959 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
961
962 default:
963 err = -EINVAL;
964 break;
965 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200966
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 hci_dev_put(hdev);
968 return err;
969}
970
971int hci_get_dev_list(void __user *arg)
972{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200973 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 struct hci_dev_list_req *dl;
975 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 int n = 0, size, err;
977 __u16 dev_num;
978
979 if (get_user(dev_num, (__u16 __user *) arg))
980 return -EFAULT;
981
982 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
983 return -EINVAL;
984
985 size = sizeof(*dl) + dev_num * sizeof(*dr);
986
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200987 dl = kzalloc(size, GFP_KERNEL);
988 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 return -ENOMEM;
990
991 dr = dl->dev_req;
992
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200993 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200994 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200995 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200996 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200997
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200998 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
999 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 (dr + n)->dev_id = hdev->id;
1002 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001003
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004 if (++n >= dev_num)
1005 break;
1006 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001007 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008
1009 dl->dev_num = n;
1010 size = sizeof(*dl) + n * sizeof(*dr);
1011
1012 err = copy_to_user(arg, dl, size);
1013 kfree(dl);
1014
1015 return err ? -EFAULT : 0;
1016}
1017
1018int hci_get_dev_info(void __user *arg)
1019{
1020 struct hci_dev *hdev;
1021 struct hci_dev_info di;
1022 int err = 0;
1023
1024 if (copy_from_user(&di, arg, sizeof(di)))
1025 return -EFAULT;
1026
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001027 hdev = hci_dev_get(di.dev_id);
1028 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 return -ENODEV;
1030
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001031 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001032 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001033
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001034 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1035 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001036
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037 strcpy(di.name, hdev->name);
1038 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001039 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040 di.flags = hdev->flags;
1041 di.pkt_type = hdev->pkt_type;
1042 di.acl_mtu = hdev->acl_mtu;
1043 di.acl_pkts = hdev->acl_pkts;
1044 di.sco_mtu = hdev->sco_mtu;
1045 di.sco_pkts = hdev->sco_pkts;
1046 di.link_policy = hdev->link_policy;
1047 di.link_mode = hdev->link_mode;
1048
1049 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1050 memcpy(&di.features, &hdev->features, sizeof(di.features));
1051
1052 if (copy_to_user(arg, &di, sizeof(di)))
1053 err = -EFAULT;
1054
1055 hci_dev_put(hdev);
1056
1057 return err;
1058}
1059
1060/* ---- Interface to HCI drivers ---- */
1061
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001062static int hci_rfkill_set_block(void *data, bool blocked)
1063{
1064 struct hci_dev *hdev = data;
1065
1066 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1067
1068 if (!blocked)
1069 return 0;
1070
1071 hci_dev_do_close(hdev);
1072
1073 return 0;
1074}
1075
1076static const struct rfkill_ops hci_rfkill_ops = {
1077 .set_block = hci_rfkill_set_block,
1078};
1079
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001080static void hci_power_on(struct work_struct *work)
1081{
1082 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1083
1084 BT_DBG("%s", hdev->name);
1085
1086 if (hci_dev_open(hdev->id) < 0)
1087 return;
1088
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001089 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001090 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001091
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001092 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001093 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001094}
1095
1096static void hci_power_off(struct work_struct *work)
1097{
Johan Hedberg32435532011-11-07 22:16:04 +02001098 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001099 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001100
1101 BT_DBG("%s", hdev->name);
1102
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001103 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001104}
1105
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001106static void hci_discov_off(struct work_struct *work)
1107{
1108 struct hci_dev *hdev;
1109 u8 scan = SCAN_PAGE;
1110
1111 hdev = container_of(work, struct hci_dev, discov_off.work);
1112
1113 BT_DBG("%s", hdev->name);
1114
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001115 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001116
1117 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1118
1119 hdev->discov_timeout = 0;
1120
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001121 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001122}
1123
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001124int hci_uuids_clear(struct hci_dev *hdev)
1125{
1126 struct list_head *p, *n;
1127
1128 list_for_each_safe(p, n, &hdev->uuids) {
1129 struct bt_uuid *uuid;
1130
1131 uuid = list_entry(p, struct bt_uuid, list);
1132
1133 list_del(p);
1134 kfree(uuid);
1135 }
1136
1137 return 0;
1138}
1139
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001140int hci_link_keys_clear(struct hci_dev *hdev)
1141{
1142 struct list_head *p, *n;
1143
1144 list_for_each_safe(p, n, &hdev->link_keys) {
1145 struct link_key *key;
1146
1147 key = list_entry(p, struct link_key, list);
1148
1149 list_del(p);
1150 kfree(key);
1151 }
1152
1153 return 0;
1154}
1155
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001156int hci_smp_ltks_clear(struct hci_dev *hdev)
1157{
1158 struct smp_ltk *k, *tmp;
1159
1160 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1161 list_del(&k->list);
1162 kfree(k);
1163 }
1164
1165 return 0;
1166}
1167
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001168struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1169{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001170 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001171
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001172 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173 if (bacmp(bdaddr, &k->bdaddr) == 0)
1174 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001175
1176 return NULL;
1177}
1178
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301179static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001180 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001181{
1182 /* Legacy key */
1183 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301184 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001185
1186 /* Debug keys are insecure so don't store them persistently */
1187 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301188 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001189
1190 /* Changed combination key and there's no previous one */
1191 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301192 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001193
1194 /* Security mode 3 case */
1195 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301196 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001197
1198 /* Neither local nor remote side had no-bonding as requirement */
1199 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301200 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001201
1202 /* Local side had dedicated bonding as requirement */
1203 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301204 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001205
1206 /* Remote side had dedicated bonding as requirement */
1207 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301208 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001209
1210 /* If none of the above criteria match, then don't store the key
1211 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301212 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001213}
1214
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001215struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001217 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001218
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001219 list_for_each_entry(k, &hdev->long_term_keys, list) {
1220 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001221 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001222 continue;
1223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001225 }
1226
1227 return NULL;
1228}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001229
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001230struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001231 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235 list_for_each_entry(k, &hdev->long_term_keys, list)
1236 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001237 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238 return k;
1239
1240 return NULL;
1241}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001242
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001243int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001244 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001245{
1246 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301247 u8 old_key_type;
1248 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001249
1250 old_key = hci_find_link_key(hdev, bdaddr);
1251 if (old_key) {
1252 old_key_type = old_key->type;
1253 key = old_key;
1254 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001255 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001256 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1257 if (!key)
1258 return -ENOMEM;
1259 list_add(&key->list, &hdev->link_keys);
1260 }
1261
1262 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1263
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001264 /* Some buggy controller combinations generate a changed
1265 * combination key for legacy pairing even when there's no
1266 * previous key */
1267 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001268 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001269 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001270 if (conn)
1271 conn->key_type = type;
1272 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001273
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001274 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001275 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001276 key->pin_len = pin_len;
1277
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001278 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001279 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001280 else
1281 key->type = type;
1282
Johan Hedberg4df378a2011-04-28 11:29:03 -07001283 if (!new_key)
1284 return 0;
1285
1286 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1287
Johan Hedberg744cf192011-11-08 20:40:14 +02001288 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001289
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301290 if (conn)
1291 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001292
1293 return 0;
1294}
1295
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001296int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001297 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001298 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001299{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001300 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001301
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001302 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1303 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001304
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001305 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1306 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001307 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001308 else {
1309 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001310 if (!key)
1311 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001312 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001313 }
1314
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001316 key->bdaddr_type = addr_type;
1317 memcpy(key->val, tk, sizeof(key->val));
1318 key->authenticated = authenticated;
1319 key->ediv = ediv;
1320 key->enc_size = enc_size;
1321 key->type = type;
1322 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001323
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001324 if (!new_key)
1325 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001327 if (type & HCI_SMP_LTK)
1328 mgmt_new_ltk(hdev, key, 1);
1329
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001330 return 0;
1331}
1332
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001333int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1334{
1335 struct link_key *key;
1336
1337 key = hci_find_link_key(hdev, bdaddr);
1338 if (!key)
1339 return -ENOENT;
1340
1341 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1342
1343 list_del(&key->list);
1344 kfree(key);
1345
1346 return 0;
1347}
1348
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001349int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1350{
1351 struct smp_ltk *k, *tmp;
1352
1353 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1354 if (bacmp(bdaddr, &k->bdaddr))
1355 continue;
1356
1357 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1358
1359 list_del(&k->list);
1360 kfree(k);
1361 }
1362
1363 return 0;
1364}
1365
Ville Tervo6bd32322011-02-16 16:32:41 +02001366/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001367static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001368{
1369 struct hci_dev *hdev = (void *) arg;
1370
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001371 if (hdev->sent_cmd) {
1372 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1373 u16 opcode = __le16_to_cpu(sent->opcode);
1374
1375 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1376 } else {
1377 BT_ERR("%s command tx timeout", hdev->name);
1378 }
1379
Ville Tervo6bd32322011-02-16 16:32:41 +02001380 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001381 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001382}
1383
Szymon Janc2763eda2011-03-22 13:12:22 +01001384struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001385 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001386{
1387 struct oob_data *data;
1388
1389 list_for_each_entry(data, &hdev->remote_oob_data, list)
1390 if (bacmp(bdaddr, &data->bdaddr) == 0)
1391 return data;
1392
1393 return NULL;
1394}
1395
1396int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1397{
1398 struct oob_data *data;
1399
1400 data = hci_find_remote_oob_data(hdev, bdaddr);
1401 if (!data)
1402 return -ENOENT;
1403
1404 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1405
1406 list_del(&data->list);
1407 kfree(data);
1408
1409 return 0;
1410}
1411
1412int hci_remote_oob_data_clear(struct hci_dev *hdev)
1413{
1414 struct oob_data *data, *n;
1415
1416 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1417 list_del(&data->list);
1418 kfree(data);
1419 }
1420
1421 return 0;
1422}
1423
1424int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001425 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001426{
1427 struct oob_data *data;
1428
1429 data = hci_find_remote_oob_data(hdev, bdaddr);
1430
1431 if (!data) {
1432 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1433 if (!data)
1434 return -ENOMEM;
1435
1436 bacpy(&data->bdaddr, bdaddr);
1437 list_add(&data->list, &hdev->remote_oob_data);
1438 }
1439
1440 memcpy(data->hash, hash, sizeof(data->hash));
1441 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1442
1443 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1444
1445 return 0;
1446}
1447
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001448struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001449{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001450 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001451
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001452 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001453 if (bacmp(bdaddr, &b->bdaddr) == 0)
1454 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001455
1456 return NULL;
1457}
1458
1459int hci_blacklist_clear(struct hci_dev *hdev)
1460{
1461 struct list_head *p, *n;
1462
1463 list_for_each_safe(p, n, &hdev->blacklist) {
1464 struct bdaddr_list *b;
1465
1466 b = list_entry(p, struct bdaddr_list, list);
1467
1468 list_del(p);
1469 kfree(b);
1470 }
1471
1472 return 0;
1473}
1474
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001475int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001476{
1477 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001478
1479 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1480 return -EBADF;
1481
Antti Julku5e762442011-08-25 16:48:02 +03001482 if (hci_blacklist_lookup(hdev, bdaddr))
1483 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001484
1485 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001486 if (!entry)
1487 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001488
1489 bacpy(&entry->bdaddr, bdaddr);
1490
1491 list_add(&entry->list, &hdev->blacklist);
1492
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001493 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494}
1495
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001496int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001497{
1498 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
Szymon Janc1ec918c2011-11-16 09:32:21 +01001500 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001501 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001502
1503 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001504 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001505 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506
1507 list_del(&entry->list);
1508 kfree(entry);
1509
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001510 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001511}
1512
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001513static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1514{
1515 struct le_scan_params *param = (struct le_scan_params *) opt;
1516 struct hci_cp_le_set_scan_param cp;
1517
1518 memset(&cp, 0, sizeof(cp));
1519 cp.type = param->type;
1520 cp.interval = cpu_to_le16(param->interval);
1521 cp.window = cpu_to_le16(param->window);
1522
1523 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1524}
1525
1526static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1527{
1528 struct hci_cp_le_set_scan_enable cp;
1529
1530 memset(&cp, 0, sizeof(cp));
1531 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001532 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001533
1534 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1535}
1536
1537static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001538 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001539{
1540 long timeo = msecs_to_jiffies(3000);
1541 struct le_scan_params param;
1542 int err;
1543
1544 BT_DBG("%s", hdev->name);
1545
1546 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1547 return -EINPROGRESS;
1548
1549 param.type = type;
1550 param.interval = interval;
1551 param.window = window;
1552
1553 hci_req_lock(hdev);
1554
1555 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001556 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001557 if (!err)
1558 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1559
1560 hci_req_unlock(hdev);
1561
1562 if (err < 0)
1563 return err;
1564
1565 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001566 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001567
1568 return 0;
1569}
1570
Andre Guedes7dbfac12012-03-15 16:52:07 -03001571int hci_cancel_le_scan(struct hci_dev *hdev)
1572{
1573 BT_DBG("%s", hdev->name);
1574
1575 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1576 return -EALREADY;
1577
1578 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1579 struct hci_cp_le_set_scan_enable cp;
1580
1581 /* Send HCI command to disable LE Scan */
1582 memset(&cp, 0, sizeof(cp));
1583 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1584 }
1585
1586 return 0;
1587}
1588
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001589static void le_scan_disable_work(struct work_struct *work)
1590{
1591 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001592 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001593 struct hci_cp_le_set_scan_enable cp;
1594
1595 BT_DBG("%s", hdev->name);
1596
1597 memset(&cp, 0, sizeof(cp));
1598
1599 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1600}
1601
Andre Guedes28b75a82012-02-03 17:48:00 -03001602static void le_scan_work(struct work_struct *work)
1603{
1604 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1605 struct le_scan_params *param = &hdev->le_scan_params;
1606
1607 BT_DBG("%s", hdev->name);
1608
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001609 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1610 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001611}
1612
1613int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001614 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001615{
1616 struct le_scan_params *param = &hdev->le_scan_params;
1617
1618 BT_DBG("%s", hdev->name);
1619
1620 if (work_busy(&hdev->le_scan))
1621 return -EINPROGRESS;
1622
1623 param->type = type;
1624 param->interval = interval;
1625 param->window = window;
1626 param->timeout = timeout;
1627
1628 queue_work(system_long_wq, &hdev->le_scan);
1629
1630 return 0;
1631}
1632
David Herrmann9be0dab2012-04-22 14:39:57 +02001633/* Alloc HCI device */
1634struct hci_dev *hci_alloc_dev(void)
1635{
1636 struct hci_dev *hdev;
1637
1638 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1639 if (!hdev)
1640 return NULL;
1641
David Herrmannb1b813d2012-04-22 14:39:58 +02001642 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1643 hdev->esco_type = (ESCO_HV1);
1644 hdev->link_mode = (HCI_LM_ACCEPT);
1645 hdev->io_capability = 0x03; /* No Input No Output */
1646
David Herrmannb1b813d2012-04-22 14:39:58 +02001647 hdev->sniff_max_interval = 800;
1648 hdev->sniff_min_interval = 80;
1649
1650 mutex_init(&hdev->lock);
1651 mutex_init(&hdev->req_lock);
1652
1653 INIT_LIST_HEAD(&hdev->mgmt_pending);
1654 INIT_LIST_HEAD(&hdev->blacklist);
1655 INIT_LIST_HEAD(&hdev->uuids);
1656 INIT_LIST_HEAD(&hdev->link_keys);
1657 INIT_LIST_HEAD(&hdev->long_term_keys);
1658 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001659 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001660
1661 INIT_WORK(&hdev->rx_work, hci_rx_work);
1662 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1663 INIT_WORK(&hdev->tx_work, hci_tx_work);
1664 INIT_WORK(&hdev->power_on, hci_power_on);
1665 INIT_WORK(&hdev->le_scan, le_scan_work);
1666
David Herrmannb1b813d2012-04-22 14:39:58 +02001667 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1668 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1669 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1670
David Herrmann9be0dab2012-04-22 14:39:57 +02001671 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001672 skb_queue_head_init(&hdev->rx_q);
1673 skb_queue_head_init(&hdev->cmd_q);
1674 skb_queue_head_init(&hdev->raw_q);
1675
1676 init_waitqueue_head(&hdev->req_wait_q);
1677
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001678 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001679
David Herrmannb1b813d2012-04-22 14:39:58 +02001680 hci_init_sysfs(hdev);
1681 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001682
1683 return hdev;
1684}
1685EXPORT_SYMBOL(hci_alloc_dev);
1686
1687/* Free HCI device */
1688void hci_free_dev(struct hci_dev *hdev)
1689{
1690 skb_queue_purge(&hdev->driver_init);
1691
1692 /* will free via device release */
1693 put_device(&hdev->dev);
1694}
1695EXPORT_SYMBOL(hci_free_dev);
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697/* Register HCI device */
1698int hci_register_dev(struct hci_dev *hdev)
1699{
David Herrmannb1b813d2012-04-22 14:39:58 +02001700 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
David Herrmann010666a2012-01-07 15:47:07 +01001702 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 return -EINVAL;
1704
Mat Martineau08add512011-11-02 16:18:36 -07001705 /* Do not allow HCI_AMP devices to register at index 0,
1706 * so the index can be used as the AMP controller ID.
1707 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001708 switch (hdev->dev_type) {
1709 case HCI_BREDR:
1710 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1711 break;
1712 case HCI_AMP:
1713 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1714 break;
1715 default:
1716 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001718
Sasha Levin3df92b32012-05-27 22:36:56 +02001719 if (id < 0)
1720 return id;
1721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 sprintf(hdev->name, "hci%d", id);
1723 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001724
1725 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726
Sasha Levin3df92b32012-05-27 22:36:56 +02001727 write_lock(&hci_dev_list_lock);
1728 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001729 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001731 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001732 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001733 if (!hdev->workqueue) {
1734 error = -ENOMEM;
1735 goto err;
1736 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001737
David Herrmann33ca9542011-10-08 14:58:49 +02001738 error = hci_add_sysfs(hdev);
1739 if (error < 0)
1740 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001742 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001743 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1744 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001745 if (hdev->rfkill) {
1746 if (rfkill_register(hdev->rfkill) < 0) {
1747 rfkill_destroy(hdev->rfkill);
1748 hdev->rfkill = NULL;
1749 }
1750 }
1751
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001752 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001753
1754 if (hdev->dev_type != HCI_AMP)
1755 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1756
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001757 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001760 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761
1762 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001763
David Herrmann33ca9542011-10-08 14:58:49 +02001764err_wqueue:
1765 destroy_workqueue(hdev->workqueue);
1766err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001767 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001768 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001769 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001770 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001771
David Herrmann33ca9542011-10-08 14:58:49 +02001772 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773}
1774EXPORT_SYMBOL(hci_register_dev);
1775
1776/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001777void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778{
Sasha Levin3df92b32012-05-27 22:36:56 +02001779 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001780
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001781 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782
Johan Hovold94324962012-03-15 14:48:41 +01001783 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1784
Sasha Levin3df92b32012-05-27 22:36:56 +02001785 id = hdev->id;
1786
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001787 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001789 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
1791 hci_dev_do_close(hdev);
1792
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301793 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001794 kfree_skb(hdev->reassembly[i]);
1795
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001797 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001798 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001799 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001800 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001801 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001802
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001803 /* mgmt_index_removed should take care of emptying the
1804 * pending list */
1805 BUG_ON(!list_empty(&hdev->mgmt_pending));
1806
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807 hci_notify(hdev, HCI_DEV_UNREG);
1808
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001809 if (hdev->rfkill) {
1810 rfkill_unregister(hdev->rfkill);
1811 rfkill_destroy(hdev->rfkill);
1812 }
1813
David Herrmannce242972011-10-08 14:58:48 +02001814 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001815
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001816 destroy_workqueue(hdev->workqueue);
1817
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001818 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001819 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001820 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001821 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001822 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001823 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001824 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001825
David Herrmanndc946bd2012-01-07 15:47:24 +01001826 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001827
1828 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001829}
1830EXPORT_SYMBOL(hci_unregister_dev);
1831
1832/* Suspend HCI device */
1833int hci_suspend_dev(struct hci_dev *hdev)
1834{
1835 hci_notify(hdev, HCI_DEV_SUSPEND);
1836 return 0;
1837}
1838EXPORT_SYMBOL(hci_suspend_dev);
1839
1840/* Resume HCI device */
1841int hci_resume_dev(struct hci_dev *hdev)
1842{
1843 hci_notify(hdev, HCI_DEV_RESUME);
1844 return 0;
1845}
1846EXPORT_SYMBOL(hci_resume_dev);
1847
Marcel Holtmann76bca882009-11-18 00:40:39 +01001848/* Receive frame from HCI drivers */
1849int hci_recv_frame(struct sk_buff *skb)
1850{
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001853 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001854 kfree_skb(skb);
1855 return -ENXIO;
1856 }
1857
1858 /* Incomming skb */
1859 bt_cb(skb)->incoming = 1;
1860
1861 /* Time stamp */
1862 __net_timestamp(skb);
1863
Marcel Holtmann76bca882009-11-18 00:40:39 +01001864 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001865 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001866
Marcel Holtmann76bca882009-11-18 00:40:39 +01001867 return 0;
1868}
1869EXPORT_SYMBOL(hci_recv_frame);
1870
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301871static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001872 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301873{
1874 int len = 0;
1875 int hlen = 0;
1876 int remain = count;
1877 struct sk_buff *skb;
1878 struct bt_skb_cb *scb;
1879
1880 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001881 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301882 return -EILSEQ;
1883
1884 skb = hdev->reassembly[index];
1885
1886 if (!skb) {
1887 switch (type) {
1888 case HCI_ACLDATA_PKT:
1889 len = HCI_MAX_FRAME_SIZE;
1890 hlen = HCI_ACL_HDR_SIZE;
1891 break;
1892 case HCI_EVENT_PKT:
1893 len = HCI_MAX_EVENT_SIZE;
1894 hlen = HCI_EVENT_HDR_SIZE;
1895 break;
1896 case HCI_SCODATA_PKT:
1897 len = HCI_MAX_SCO_SIZE;
1898 hlen = HCI_SCO_HDR_SIZE;
1899 break;
1900 }
1901
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001902 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301903 if (!skb)
1904 return -ENOMEM;
1905
1906 scb = (void *) skb->cb;
1907 scb->expect = hlen;
1908 scb->pkt_type = type;
1909
1910 skb->dev = (void *) hdev;
1911 hdev->reassembly[index] = skb;
1912 }
1913
1914 while (count) {
1915 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001916 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301917
1918 memcpy(skb_put(skb, len), data, len);
1919
1920 count -= len;
1921 data += len;
1922 scb->expect -= len;
1923 remain = count;
1924
1925 switch (type) {
1926 case HCI_EVENT_PKT:
1927 if (skb->len == HCI_EVENT_HDR_SIZE) {
1928 struct hci_event_hdr *h = hci_event_hdr(skb);
1929 scb->expect = h->plen;
1930
1931 if (skb_tailroom(skb) < scb->expect) {
1932 kfree_skb(skb);
1933 hdev->reassembly[index] = NULL;
1934 return -ENOMEM;
1935 }
1936 }
1937 break;
1938
1939 case HCI_ACLDATA_PKT:
1940 if (skb->len == HCI_ACL_HDR_SIZE) {
1941 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1942 scb->expect = __le16_to_cpu(h->dlen);
1943
1944 if (skb_tailroom(skb) < scb->expect) {
1945 kfree_skb(skb);
1946 hdev->reassembly[index] = NULL;
1947 return -ENOMEM;
1948 }
1949 }
1950 break;
1951
1952 case HCI_SCODATA_PKT:
1953 if (skb->len == HCI_SCO_HDR_SIZE) {
1954 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1955 scb->expect = h->dlen;
1956
1957 if (skb_tailroom(skb) < scb->expect) {
1958 kfree_skb(skb);
1959 hdev->reassembly[index] = NULL;
1960 return -ENOMEM;
1961 }
1962 }
1963 break;
1964 }
1965
1966 if (scb->expect == 0) {
1967 /* Complete frame */
1968
1969 bt_cb(skb)->pkt_type = type;
1970 hci_recv_frame(skb);
1971
1972 hdev->reassembly[index] = NULL;
1973 return remain;
1974 }
1975 }
1976
1977 return remain;
1978}
1979
Marcel Holtmannef222012007-07-11 06:42:04 +02001980int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1981{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301982 int rem = 0;
1983
Marcel Holtmannef222012007-07-11 06:42:04 +02001984 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1985 return -EILSEQ;
1986
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001987 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001988 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301989 if (rem < 0)
1990 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001991
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301992 data += (count - rem);
1993 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001994 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001995
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301996 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001997}
1998EXPORT_SYMBOL(hci_recv_fragment);
1999
Suraj Sumangala99811512010-07-14 13:02:19 +05302000#define STREAM_REASSEMBLY 0
2001
2002int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2003{
2004 int type;
2005 int rem = 0;
2006
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002007 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302008 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2009
2010 if (!skb) {
2011 struct { char type; } *pkt;
2012
2013 /* Start of the frame */
2014 pkt = data;
2015 type = pkt->type;
2016
2017 data++;
2018 count--;
2019 } else
2020 type = bt_cb(skb)->pkt_type;
2021
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002022 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002023 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302024 if (rem < 0)
2025 return rem;
2026
2027 data += (count - rem);
2028 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002029 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302030
2031 return rem;
2032}
2033EXPORT_SYMBOL(hci_recv_stream_fragment);
2034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035/* ---- Interface to upper protocols ---- */
2036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037int hci_register_cb(struct hci_cb *cb)
2038{
2039 BT_DBG("%p name %s", cb, cb->name);
2040
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002041 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002043 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044
2045 return 0;
2046}
2047EXPORT_SYMBOL(hci_register_cb);
2048
2049int hci_unregister_cb(struct hci_cb *cb)
2050{
2051 BT_DBG("%p name %s", cb, cb->name);
2052
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002053 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002055 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002056
2057 return 0;
2058}
2059EXPORT_SYMBOL(hci_unregister_cb);
2060
2061static int hci_send_frame(struct sk_buff *skb)
2062{
2063 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2064
2065 if (!hdev) {
2066 kfree_skb(skb);
2067 return -ENODEV;
2068 }
2069
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002070 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002072 /* Time stamp */
2073 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002075 /* Send copy to monitor */
2076 hci_send_to_monitor(hdev, skb);
2077
2078 if (atomic_read(&hdev->promisc)) {
2079 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002080 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081 }
2082
2083 /* Get rid of skb owner, prior to sending to the driver. */
2084 skb_orphan(skb);
2085
2086 return hdev->send(skb);
2087}
2088
2089/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002090int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091{
2092 int len = HCI_COMMAND_HDR_SIZE + plen;
2093 struct hci_command_hdr *hdr;
2094 struct sk_buff *skb;
2095
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002096 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
2098 skb = bt_skb_alloc(len, GFP_ATOMIC);
2099 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002100 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 return -ENOMEM;
2102 }
2103
2104 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002105 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 hdr->plen = plen;
2107
2108 if (plen)
2109 memcpy(skb_put(skb, plen), param, plen);
2110
2111 BT_DBG("skb len %d", skb->len);
2112
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002113 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002115
Johan Hedberga5040ef2011-01-10 13:28:59 +02002116 if (test_bit(HCI_INIT, &hdev->flags))
2117 hdev->init_last_cmd = opcode;
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002120 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 return 0;
2123}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
2125/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002126void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127{
2128 struct hci_command_hdr *hdr;
2129
2130 if (!hdev->sent_cmd)
2131 return NULL;
2132
2133 hdr = (void *) hdev->sent_cmd->data;
2134
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002135 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002136 return NULL;
2137
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002138 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002139
2140 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2141}
2142
2143/* Send ACL data */
2144static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2145{
2146 struct hci_acl_hdr *hdr;
2147 int len = skb->len;
2148
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002149 skb_push(skb, HCI_ACL_HDR_SIZE);
2150 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002151 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002152 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2153 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154}
2155
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002156static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002157 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158{
2159 struct hci_dev *hdev = conn->hdev;
2160 struct sk_buff *list;
2161
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002162 skb->len = skb_headlen(skb);
2163 skb->data_len = 0;
2164
2165 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2166 hci_add_acl_hdr(skb, conn->handle, flags);
2167
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002168 list = skb_shinfo(skb)->frag_list;
2169 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 /* Non fragmented */
2171 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2172
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002173 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002174 } else {
2175 /* Fragmented */
2176 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2177
2178 skb_shinfo(skb)->frag_list = NULL;
2179
2180 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002181 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002183 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002184
2185 flags &= ~ACL_START;
2186 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187 do {
2188 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002189
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002191 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002192 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193
2194 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2195
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002196 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 } while (list);
2198
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002199 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002201}
2202
2203void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2204{
2205 struct hci_conn *conn = chan->conn;
2206 struct hci_dev *hdev = conn->hdev;
2207
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002208 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209
2210 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002211
2212 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002214 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002216
2217/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002218void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219{
2220 struct hci_dev *hdev = conn->hdev;
2221 struct hci_sco_hdr hdr;
2222
2223 BT_DBG("%s len %d", hdev->name, skb->len);
2224
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002225 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002226 hdr.dlen = skb->len;
2227
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002228 skb_push(skb, HCI_SCO_HDR_SIZE);
2229 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002230 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002233 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002236 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238
2239/* ---- HCI TX task (outgoing data) ---- */
2240
2241/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002242static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2243 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244{
2245 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002246 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002247 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002249 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002251
2252 rcu_read_lock();
2253
2254 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002255 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002256 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002257
2258 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2259 continue;
2260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 num++;
2262
2263 if (c->sent < min) {
2264 min = c->sent;
2265 conn = c;
2266 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002267
2268 if (hci_conn_num(hdev, type) == num)
2269 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 }
2271
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002272 rcu_read_unlock();
2273
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002275 int cnt, q;
2276
2277 switch (conn->type) {
2278 case ACL_LINK:
2279 cnt = hdev->acl_cnt;
2280 break;
2281 case SCO_LINK:
2282 case ESCO_LINK:
2283 cnt = hdev->sco_cnt;
2284 break;
2285 case LE_LINK:
2286 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2287 break;
2288 default:
2289 cnt = 0;
2290 BT_ERR("Unknown link type");
2291 }
2292
2293 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002294 *quote = q ? q : 1;
2295 } else
2296 *quote = 0;
2297
2298 BT_DBG("conn %p quote %d", conn, *quote);
2299 return conn;
2300}
2301
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002302static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303{
2304 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002305 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Ville Tervobae1f5d92011-02-10 22:38:53 -03002307 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002309 rcu_read_lock();
2310
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002312 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002313 if (c->type == type && c->sent) {
2314 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002315 hdev->name, batostr(&c->dst));
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002316 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 }
2318 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002319
2320 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321}
2322
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002323static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2324 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002325{
2326 struct hci_conn_hash *h = &hdev->conn_hash;
2327 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002328 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002329 struct hci_conn *conn;
2330 int cnt, q, conn_num = 0;
2331
2332 BT_DBG("%s", hdev->name);
2333
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002334 rcu_read_lock();
2335
2336 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002337 struct hci_chan *tmp;
2338
2339 if (conn->type != type)
2340 continue;
2341
2342 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2343 continue;
2344
2345 conn_num++;
2346
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002347 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002348 struct sk_buff *skb;
2349
2350 if (skb_queue_empty(&tmp->data_q))
2351 continue;
2352
2353 skb = skb_peek(&tmp->data_q);
2354 if (skb->priority < cur_prio)
2355 continue;
2356
2357 if (skb->priority > cur_prio) {
2358 num = 0;
2359 min = ~0;
2360 cur_prio = skb->priority;
2361 }
2362
2363 num++;
2364
2365 if (conn->sent < min) {
2366 min = conn->sent;
2367 chan = tmp;
2368 }
2369 }
2370
2371 if (hci_conn_num(hdev, type) == conn_num)
2372 break;
2373 }
2374
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002375 rcu_read_unlock();
2376
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002377 if (!chan)
2378 return NULL;
2379
2380 switch (chan->conn->type) {
2381 case ACL_LINK:
2382 cnt = hdev->acl_cnt;
2383 break;
2384 case SCO_LINK:
2385 case ESCO_LINK:
2386 cnt = hdev->sco_cnt;
2387 break;
2388 case LE_LINK:
2389 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2390 break;
2391 default:
2392 cnt = 0;
2393 BT_ERR("Unknown link type");
2394 }
2395
2396 q = cnt / num;
2397 *quote = q ? q : 1;
2398 BT_DBG("chan %p quote %d", chan, *quote);
2399 return chan;
2400}
2401
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002402static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2403{
2404 struct hci_conn_hash *h = &hdev->conn_hash;
2405 struct hci_conn *conn;
2406 int num = 0;
2407
2408 BT_DBG("%s", hdev->name);
2409
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002410 rcu_read_lock();
2411
2412 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002413 struct hci_chan *chan;
2414
2415 if (conn->type != type)
2416 continue;
2417
2418 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2419 continue;
2420
2421 num++;
2422
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002423 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002424 struct sk_buff *skb;
2425
2426 if (chan->sent) {
2427 chan->sent = 0;
2428 continue;
2429 }
2430
2431 if (skb_queue_empty(&chan->data_q))
2432 continue;
2433
2434 skb = skb_peek(&chan->data_q);
2435 if (skb->priority >= HCI_PRIO_MAX - 1)
2436 continue;
2437
2438 skb->priority = HCI_PRIO_MAX - 1;
2439
2440 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002441 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002442 }
2443
2444 if (hci_conn_num(hdev, type) == num)
2445 break;
2446 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002447
2448 rcu_read_unlock();
2449
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002450}
2451
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002452static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2453{
2454 /* Calculate count of blocks used by this packet */
2455 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2456}
2457
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002458static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 if (!test_bit(HCI_RAW, &hdev->flags)) {
2461 /* ACL tx timeout must be longer than maximum
2462 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002463 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002464 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002465 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002466 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002467}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002469static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002470{
2471 unsigned int cnt = hdev->acl_cnt;
2472 struct hci_chan *chan;
2473 struct sk_buff *skb;
2474 int quote;
2475
2476 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002477
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002478 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002479 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002480 u32 priority = (skb_peek(&chan->data_q))->priority;
2481 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002482 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002483 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002484
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002485 /* Stop if priority has changed */
2486 if (skb->priority < priority)
2487 break;
2488
2489 skb = skb_dequeue(&chan->data_q);
2490
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002491 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002492 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002493
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hci_send_frame(skb);
2495 hdev->acl_last_tx = jiffies;
2496
2497 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002498 chan->sent++;
2499 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002500 }
2501 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002502
2503 if (cnt != hdev->acl_cnt)
2504 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505}
2506
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002507static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002508{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002509 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002510 struct hci_chan *chan;
2511 struct sk_buff *skb;
2512 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002513
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002514 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002515
2516 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002517 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002518 u32 priority = (skb_peek(&chan->data_q))->priority;
2519 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2520 int blocks;
2521
2522 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002523 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002524
2525 /* Stop if priority has changed */
2526 if (skb->priority < priority)
2527 break;
2528
2529 skb = skb_dequeue(&chan->data_q);
2530
2531 blocks = __get_blocks(hdev, skb);
2532 if (blocks > hdev->block_cnt)
2533 return;
2534
2535 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002536 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002537
2538 hci_send_frame(skb);
2539 hdev->acl_last_tx = jiffies;
2540
2541 hdev->block_cnt -= blocks;
2542 quote -= blocks;
2543
2544 chan->sent += blocks;
2545 chan->conn->sent += blocks;
2546 }
2547 }
2548
2549 if (cnt != hdev->block_cnt)
2550 hci_prio_recalculate(hdev, ACL_LINK);
2551}
2552
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002553static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002554{
2555 BT_DBG("%s", hdev->name);
2556
2557 if (!hci_conn_num(hdev, ACL_LINK))
2558 return;
2559
2560 switch (hdev->flow_ctl_mode) {
2561 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2562 hci_sched_acl_pkt(hdev);
2563 break;
2564
2565 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2566 hci_sched_acl_blk(hdev);
2567 break;
2568 }
2569}
2570
Linus Torvalds1da177e2005-04-16 15:20:36 -07002571/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002572static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573{
2574 struct hci_conn *conn;
2575 struct sk_buff *skb;
2576 int quote;
2577
2578 BT_DBG("%s", hdev->name);
2579
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002580 if (!hci_conn_num(hdev, SCO_LINK))
2581 return;
2582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2584 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2585 BT_DBG("skb %p len %d", skb, skb->len);
2586 hci_send_frame(skb);
2587
2588 conn->sent++;
2589 if (conn->sent == ~0)
2590 conn->sent = 0;
2591 }
2592 }
2593}
2594
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002595static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002596{
2597 struct hci_conn *conn;
2598 struct sk_buff *skb;
2599 int quote;
2600
2601 BT_DBG("%s", hdev->name);
2602
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002603 if (!hci_conn_num(hdev, ESCO_LINK))
2604 return;
2605
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002606 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2607 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002608 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2609 BT_DBG("skb %p len %d", skb, skb->len);
2610 hci_send_frame(skb);
2611
2612 conn->sent++;
2613 if (conn->sent == ~0)
2614 conn->sent = 0;
2615 }
2616 }
2617}
2618
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002619static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002620{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002621 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002622 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002623 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002624
2625 BT_DBG("%s", hdev->name);
2626
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002627 if (!hci_conn_num(hdev, LE_LINK))
2628 return;
2629
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002630 if (!test_bit(HCI_RAW, &hdev->flags)) {
2631 /* LE tx timeout must be longer than maximum
2632 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002633 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002634 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002635 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002636 }
2637
2638 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002639 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002640 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002641 u32 priority = (skb_peek(&chan->data_q))->priority;
2642 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002643 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002644 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002645
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002646 /* Stop if priority has changed */
2647 if (skb->priority < priority)
2648 break;
2649
2650 skb = skb_dequeue(&chan->data_q);
2651
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002652 hci_send_frame(skb);
2653 hdev->le_last_tx = jiffies;
2654
2655 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002656 chan->sent++;
2657 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002658 }
2659 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002660
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002661 if (hdev->le_pkts)
2662 hdev->le_cnt = cnt;
2663 else
2664 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002665
2666 if (cnt != tmp)
2667 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002668}
2669
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002670static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002672 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673 struct sk_buff *skb;
2674
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002675 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002676 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002677
2678 /* Schedule queues and send stuff to HCI driver */
2679
2680 hci_sched_acl(hdev);
2681
2682 hci_sched_sco(hdev);
2683
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002684 hci_sched_esco(hdev);
2685
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002686 hci_sched_le(hdev);
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688 /* Send next queued raw (unknown type) packet */
2689 while ((skb = skb_dequeue(&hdev->raw_q)))
2690 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691}
2692
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002693/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002696static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697{
2698 struct hci_acl_hdr *hdr = (void *) skb->data;
2699 struct hci_conn *conn;
2700 __u16 handle, flags;
2701
2702 skb_pull(skb, HCI_ACL_HDR_SIZE);
2703
2704 handle = __le16_to_cpu(hdr->handle);
2705 flags = hci_flags(handle);
2706 handle = hci_handle(handle);
2707
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002708 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002709 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710
2711 hdev->stat.acl_rx++;
2712
2713 hci_dev_lock(hdev);
2714 conn = hci_conn_hash_lookup_handle(hdev, handle);
2715 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002716
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002718 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002719
Johan Hedberg671267b2012-05-12 16:11:50 -03002720 hci_dev_lock(hdev);
2721 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2722 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2723 mgmt_device_connected(hdev, &conn->dst, conn->type,
2724 conn->dst_type, 0, NULL, 0,
2725 conn->dev_class);
2726 hci_dev_unlock(hdev);
2727
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002729 l2cap_recv_acldata(conn, skb, flags);
2730 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002732 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002733 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734 }
2735
2736 kfree_skb(skb);
2737}
2738
2739/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002740static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741{
2742 struct hci_sco_hdr *hdr = (void *) skb->data;
2743 struct hci_conn *conn;
2744 __u16 handle;
2745
2746 skb_pull(skb, HCI_SCO_HDR_SIZE);
2747
2748 handle = __le16_to_cpu(hdr->handle);
2749
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002750 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002751
2752 hdev->stat.sco_rx++;
2753
2754 hci_dev_lock(hdev);
2755 conn = hci_conn_hash_lookup_handle(hdev, handle);
2756 hci_dev_unlock(hdev);
2757
2758 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002760 sco_recv_scodata(conn, skb);
2761 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002763 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002764 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765 }
2766
2767 kfree_skb(skb);
2768}
2769
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002770static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002772 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 struct sk_buff *skb;
2774
2775 BT_DBG("%s", hdev->name);
2776
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002778 /* Send copy to monitor */
2779 hci_send_to_monitor(hdev, skb);
2780
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 if (atomic_read(&hdev->promisc)) {
2782 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002783 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 }
2785
2786 if (test_bit(HCI_RAW, &hdev->flags)) {
2787 kfree_skb(skb);
2788 continue;
2789 }
2790
2791 if (test_bit(HCI_INIT, &hdev->flags)) {
2792 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002793 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002794 case HCI_ACLDATA_PKT:
2795 case HCI_SCODATA_PKT:
2796 kfree_skb(skb);
2797 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002798 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 }
2800
2801 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002802 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002804 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002805 hci_event_packet(hdev, skb);
2806 break;
2807
2808 case HCI_ACLDATA_PKT:
2809 BT_DBG("%s ACL data packet", hdev->name);
2810 hci_acldata_packet(hdev, skb);
2811 break;
2812
2813 case HCI_SCODATA_PKT:
2814 BT_DBG("%s SCO data packet", hdev->name);
2815 hci_scodata_packet(hdev, skb);
2816 break;
2817
2818 default:
2819 kfree_skb(skb);
2820 break;
2821 }
2822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823}
2824
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002825static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002827 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 struct sk_buff *skb;
2829
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002830 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2831 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002834 if (atomic_read(&hdev->cmd_cnt)) {
2835 skb = skb_dequeue(&hdev->cmd_q);
2836 if (!skb)
2837 return;
2838
Wei Yongjun7585b972009-02-25 18:29:52 +08002839 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002841 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2842 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 atomic_dec(&hdev->cmd_cnt);
2844 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002845 if (test_bit(HCI_RESET, &hdev->flags))
2846 del_timer(&hdev->cmd_timer);
2847 else
2848 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002849 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002850 } else {
2851 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002852 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 }
2854 }
2855}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002856
2857int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2858{
2859 /* General inquiry access code (GIAC) */
2860 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2861 struct hci_cp_inquiry cp;
2862
2863 BT_DBG("%s", hdev->name);
2864
2865 if (test_bit(HCI_INQUIRY, &hdev->flags))
2866 return -EINPROGRESS;
2867
Johan Hedberg46632622012-01-02 16:06:08 +02002868 inquiry_cache_flush(hdev);
2869
Andre Guedes2519a1f2011-11-07 11:45:24 -03002870 memset(&cp, 0, sizeof(cp));
2871 memcpy(&cp.lap, lap, sizeof(cp.lap));
2872 cp.length = length;
2873
2874 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2875}
Andre Guedes023d50492011-11-04 14:16:52 -03002876
2877int hci_cancel_inquiry(struct hci_dev *hdev)
2878{
2879 BT_DBG("%s", hdev->name);
2880
2881 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002882 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002883
2884 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2885}
Andre Guedes31f79562012-04-24 21:02:53 -03002886
2887u8 bdaddr_to_le(u8 bdaddr_type)
2888{
2889 switch (bdaddr_type) {
2890 case BDADDR_LE_PUBLIC:
2891 return ADDR_LE_DEV_PUBLIC;
2892
2893 default:
2894 /* Fallback to LE Random address type */
2895 return ADDR_LE_DEV_RANDOM;
2896 }
2897}