blob: 2e72c410fb47f02f0595c7bcbcb7501dac76f258 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200181 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800182 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200183 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 /* Mandatory initialization */
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200192 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200213 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300217 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200225static void amp_init(struct hci_dev *hdev)
226{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300234
235 /* Read Data Blk size */
236 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237}
238
239static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
240{
241 struct sk_buff *skb;
242
243 BT_DBG("%s %ld", hdev->name, opt);
244
245 /* Driver initialization */
246
247 /* Special commands */
248 while ((skb = skb_dequeue(&hdev->driver_init))) {
249 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
250 skb->dev = (void *) hdev;
251
252 skb_queue_tail(&hdev->cmd_q, skb);
253 queue_work(hdev->workqueue, &hdev->cmd_work);
254 }
255 skb_queue_purge(&hdev->driver_init);
256
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300257 /* Reset */
258 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
259 hci_reset_req(hdev, 0);
260
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200261 switch (hdev->dev_type) {
262 case HCI_BREDR:
263 bredr_init(hdev);
264 break;
265
266 case HCI_AMP:
267 amp_init(hdev);
268 break;
269
270 default:
271 BT_ERR("Unknown device type %d", hdev->dev_type);
272 break;
273 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200274}
275
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300276static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
277{
278 BT_DBG("%s", hdev->name);
279
280 /* Read LE buffer size */
281 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
282}
283
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
285{
286 __u8 scan = opt;
287
288 BT_DBG("%s %x", hdev->name, scan);
289
290 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200291 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
295{
296 __u8 auth = opt;
297
298 BT_DBG("%s %x", hdev->name, auth);
299
300 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200301 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302}
303
304static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
305{
306 __u8 encrypt = opt;
307
308 BT_DBG("%s %x", hdev->name, encrypt);
309
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200310 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200311 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312}
313
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200314static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
315{
316 __le16 policy = cpu_to_le16(opt);
317
Marcel Holtmanna418b892008-11-30 12:17:28 +0100318 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200319
320 /* Default link policy */
321 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
322}
323
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900324/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 * Device is held on return. */
326struct hci_dev *hci_dev_get(int index)
327{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200328 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329
330 BT_DBG("%d", index);
331
332 if (index < 0)
333 return NULL;
334
335 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200336 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 if (d->id == index) {
338 hdev = hci_dev_hold(d);
339 break;
340 }
341 }
342 read_unlock(&hci_dev_list_lock);
343 return hdev;
344}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200347
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200348bool hci_discovery_active(struct hci_dev *hdev)
349{
350 struct discovery_state *discov = &hdev->discovery;
351
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300353 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300354 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200355 return true;
356
Andre Guedes6fbe1952012-02-03 17:47:58 -0300357 default:
358 return false;
359 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200360}
361
Johan Hedbergff9ef572012-01-04 14:23:45 +0200362void hci_discovery_set_state(struct hci_dev *hdev, int state)
363{
364 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
365
366 if (hdev->discovery.state == state)
367 return;
368
369 switch (state) {
370 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300371 if (hdev->discovery.state != DISCOVERY_STARTING)
372 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200373 break;
374 case DISCOVERY_STARTING:
375 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300376 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200377 mgmt_discovering(hdev, 1);
378 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200379 case DISCOVERY_RESOLVING:
380 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200381 case DISCOVERY_STOPPING:
382 break;
383 }
384
385 hdev->discovery.state = state;
386}
387
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388static void inquiry_cache_flush(struct hci_dev *hdev)
389{
Johan Hedberg30883512012-01-04 14:16:21 +0200390 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200391 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392
Johan Hedberg561aafb2012-01-04 13:31:59 +0200393 list_for_each_entry_safe(p, n, &cache->all, all) {
394 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200395 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200397
398 INIT_LIST_HEAD(&cache->unknown);
399 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300402struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
403 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Johan Hedberg30883512012-01-04 14:16:21 +0200405 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 struct inquiry_entry *e;
407
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300408 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409
Johan Hedberg561aafb2012-01-04 13:31:59 +0200410 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200412 return e;
413 }
414
415 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416}
417
Johan Hedberg561aafb2012-01-04 13:31:59 +0200418struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300419 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200420{
Johan Hedberg30883512012-01-04 14:16:21 +0200421 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200422 struct inquiry_entry *e;
423
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300424 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425
426 list_for_each_entry(e, &cache->unknown, list) {
427 if (!bacmp(&e->data.bdaddr, bdaddr))
428 return e;
429 }
430
431 return NULL;
432}
433
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200434struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300435 bdaddr_t *bdaddr,
436 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200437{
438 struct discovery_state *cache = &hdev->discovery;
439 struct inquiry_entry *e;
440
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300441 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200442
443 list_for_each_entry(e, &cache->resolve, list) {
444 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
445 return e;
446 if (!bacmp(&e->data.bdaddr, bdaddr))
447 return e;
448 }
449
450 return NULL;
451}
452
Johan Hedberga3d4e202012-01-09 00:53:02 +0200453void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300454 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200455{
456 struct discovery_state *cache = &hdev->discovery;
457 struct list_head *pos = &cache->resolve;
458 struct inquiry_entry *p;
459
460 list_del(&ie->list);
461
462 list_for_each_entry(p, &cache->resolve, list) {
463 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300464 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200465 break;
466 pos = &p->list;
467 }
468
469 list_add(&ie->list, pos);
470}
471
Johan Hedberg31754052012-01-04 13:39:52 +0200472bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300473 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Johan Hedberg30883512012-01-04 14:16:21 +0200475 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200476 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300478 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200480 if (ssp)
481 *ssp = data->ssp_mode;
482
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200483 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200484 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200485 if (ie->data.ssp_mode && ssp)
486 *ssp = true;
487
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300489 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200490 ie->data.rssi = data->rssi;
491 hci_inquiry_cache_update_resolve(hdev, ie);
492 }
493
Johan Hedberg561aafb2012-01-04 13:31:59 +0200494 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200495 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200496
Johan Hedberg561aafb2012-01-04 13:31:59 +0200497 /* Entry not in the cache. Add new one. */
498 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
499 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200500 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200501
502 list_add(&ie->all, &cache->all);
503
504 if (name_known) {
505 ie->name_state = NAME_KNOWN;
506 } else {
507 ie->name_state = NAME_NOT_KNOWN;
508 list_add(&ie->list, &cache->unknown);
509 }
510
511update:
512 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300513 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200514 ie->name_state = NAME_KNOWN;
515 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 }
517
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200518 memcpy(&ie->data, data, sizeof(*data));
519 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700520 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200521
522 if (ie->name_state == NAME_NOT_KNOWN)
523 return false;
524
525 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526}
527
528static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
529{
Johan Hedberg30883512012-01-04 14:16:21 +0200530 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 struct inquiry_info *info = (struct inquiry_info *) buf;
532 struct inquiry_entry *e;
533 int copied = 0;
534
Johan Hedberg561aafb2012-01-04 13:31:59 +0200535 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200537
538 if (copied >= num)
539 break;
540
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 bacpy(&info->bdaddr, &data->bdaddr);
542 info->pscan_rep_mode = data->pscan_rep_mode;
543 info->pscan_period_mode = data->pscan_period_mode;
544 info->pscan_mode = data->pscan_mode;
545 memcpy(info->dev_class, data->dev_class, 3);
546 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200549 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550 }
551
552 BT_DBG("cache %p, copied %d", cache, copied);
553 return copied;
554}
555
556static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
557{
558 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
559 struct hci_cp_inquiry cp;
560
561 BT_DBG("%s", hdev->name);
562
563 if (test_bit(HCI_INQUIRY, &hdev->flags))
564 return;
565
566 /* Start Inquiry */
567 memcpy(&cp.lap, &ir->lap, 3);
568 cp.length = ir->length;
569 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200570 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571}
572
573int hci_inquiry(void __user *arg)
574{
575 __u8 __user *ptr = arg;
576 struct hci_inquiry_req ir;
577 struct hci_dev *hdev;
578 int err = 0, do_inquiry = 0, max_rsp;
579 long timeo;
580 __u8 *buf;
581
582 if (copy_from_user(&ir, ptr, sizeof(ir)))
583 return -EFAULT;
584
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200585 hdev = hci_dev_get(ir.dev_id);
586 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return -ENODEV;
588
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300589 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900590 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300591 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 inquiry_cache_flush(hdev);
593 do_inquiry = 1;
594 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300595 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596
Marcel Holtmann04837f62006-07-03 10:02:33 +0200597 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200598
599 if (do_inquiry) {
600 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
601 if (err < 0)
602 goto done;
603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300605 /* for unlimited number of responses we will use buffer with
606 * 255 entries
607 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
612 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200614 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 err = -ENOMEM;
616 goto done;
617 }
618
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300621 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622
623 BT_DBG("num_rsp %d", ir.num_rsp);
624
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626 ptr += sizeof(ir);
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300628 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900630 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631 err = -EFAULT;
632
633 kfree(buf);
634
635done:
636 hci_dev_put(hdev);
637 return err;
638}
639
640/* ---- HCI ioctl helpers ---- */
641
642int hci_dev_open(__u16 dev)
643{
644 struct hci_dev *hdev;
645 int ret = 0;
646
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200647 hdev = hci_dev_get(dev);
648 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 return -ENODEV;
650
651 BT_DBG("%s %p", hdev->name, hdev);
652
653 hci_req_lock(hdev);
654
Johan Hovold94324962012-03-15 14:48:41 +0100655 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
656 ret = -ENODEV;
657 goto done;
658 }
659
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200660 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
661 ret = -ERFKILL;
662 goto done;
663 }
664
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665 if (test_bit(HCI_UP, &hdev->flags)) {
666 ret = -EALREADY;
667 goto done;
668 }
669
670 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
671 set_bit(HCI_RAW, &hdev->flags);
672
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200673 /* Treat all non BR/EDR controllers as raw devices if
674 enable_hs is not set */
675 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100676 set_bit(HCI_RAW, &hdev->flags);
677
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 if (hdev->open(hdev)) {
679 ret = -EIO;
680 goto done;
681 }
682
683 if (!test_bit(HCI_RAW, &hdev->flags)) {
684 atomic_set(&hdev->cmd_cnt, 1);
685 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200686 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300688 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700689
Andre Guedeseead27d2011-06-30 19:20:55 -0300690 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300691 ret = __hci_request(hdev, hci_le_init_req, 0,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300692 HCI_INIT_TIMEOUT);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300693
Linus Torvalds1da177e2005-04-16 15:20:36 -0700694 clear_bit(HCI_INIT, &hdev->flags);
695 }
696
697 if (!ret) {
698 hci_dev_hold(hdev);
699 set_bit(HCI_UP, &hdev->flags);
700 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300701 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
702 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300703 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200704 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300705 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200706 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900707 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200709 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200710 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400711 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
713 skb_queue_purge(&hdev->cmd_q);
714 skb_queue_purge(&hdev->rx_q);
715
716 if (hdev->flush)
717 hdev->flush(hdev);
718
719 if (hdev->sent_cmd) {
720 kfree_skb(hdev->sent_cmd);
721 hdev->sent_cmd = NULL;
722 }
723
724 hdev->close(hdev);
725 hdev->flags = 0;
726 }
727
728done:
729 hci_req_unlock(hdev);
730 hci_dev_put(hdev);
731 return ret;
732}
733
734static int hci_dev_do_close(struct hci_dev *hdev)
735{
736 BT_DBG("%s %p", hdev->name, hdev);
737
Andre Guedes28b75a82012-02-03 17:48:00 -0300738 cancel_work_sync(&hdev->le_scan);
739
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740 hci_req_cancel(hdev, ENODEV);
741 hci_req_lock(hdev);
742
743 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300744 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 hci_req_unlock(hdev);
746 return 0;
747 }
748
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200749 /* Flush RX and TX works */
750 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400751 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200753 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200754 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200755 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200756 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200757 }
758
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200759 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200760 cancel_delayed_work(&hdev->service_cache);
761
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300762 cancel_delayed_work_sync(&hdev->le_scan_disable);
763
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300764 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 inquiry_cache_flush(hdev);
766 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300767 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
769 hci_notify(hdev, HCI_DEV_DOWN);
770
771 if (hdev->flush)
772 hdev->flush(hdev);
773
774 /* Reset device */
775 skb_queue_purge(&hdev->cmd_q);
776 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200777 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200778 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300780 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700781 clear_bit(HCI_INIT, &hdev->flags);
782 }
783
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200784 /* flush cmd work */
785 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700786
787 /* Drop queues */
788 skb_queue_purge(&hdev->rx_q);
789 skb_queue_purge(&hdev->cmd_q);
790 skb_queue_purge(&hdev->raw_q);
791
792 /* Drop last sent command */
793 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300794 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 kfree_skb(hdev->sent_cmd);
796 hdev->sent_cmd = NULL;
797 }
798
799 /* After this point our queues are empty
800 * and no tasks are scheduled. */
801 hdev->close(hdev);
802
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300803 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
804 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100805 hci_dev_lock(hdev);
806 mgmt_powered(hdev, 0);
807 hci_dev_unlock(hdev);
808 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200809
Linus Torvalds1da177e2005-04-16 15:20:36 -0700810 /* Clear flags */
811 hdev->flags = 0;
812
Johan Hedberge59fda82012-02-22 18:11:53 +0200813 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200814 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200815
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816 hci_req_unlock(hdev);
817
818 hci_dev_put(hdev);
819 return 0;
820}
821
822int hci_dev_close(__u16 dev)
823{
824 struct hci_dev *hdev;
825 int err;
826
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200827 hdev = hci_dev_get(dev);
828 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100830
831 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
832 cancel_delayed_work(&hdev->power_off);
833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100835
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836 hci_dev_put(hdev);
837 return err;
838}
839
840int hci_dev_reset(__u16 dev)
841{
842 struct hci_dev *hdev;
843 int ret = 0;
844
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200845 hdev = hci_dev_get(dev);
846 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 return -ENODEV;
848
849 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
851 if (!test_bit(HCI_UP, &hdev->flags))
852 goto done;
853
854 /* Drop queues */
855 skb_queue_purge(&hdev->rx_q);
856 skb_queue_purge(&hdev->cmd_q);
857
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300858 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 inquiry_cache_flush(hdev);
860 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300861 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862
863 if (hdev->flush)
864 hdev->flush(hdev);
865
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900866 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300867 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700868
869 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300870 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871
872done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 hci_req_unlock(hdev);
874 hci_dev_put(hdev);
875 return ret;
876}
877
878int hci_dev_reset_stat(__u16 dev)
879{
880 struct hci_dev *hdev;
881 int ret = 0;
882
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200883 hdev = hci_dev_get(dev);
884 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 return -ENODEV;
886
887 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
888
889 hci_dev_put(hdev);
890
891 return ret;
892}
893
894int hci_dev_cmd(unsigned int cmd, void __user *arg)
895{
896 struct hci_dev *hdev;
897 struct hci_dev_req dr;
898 int err = 0;
899
900 if (copy_from_user(&dr, arg, sizeof(dr)))
901 return -EFAULT;
902
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200903 hdev = hci_dev_get(dr.dev_id);
904 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 return -ENODEV;
906
907 switch (cmd) {
908 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200909 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300910 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 break;
912
913 case HCISETENCRYPT:
914 if (!lmp_encrypt_capable(hdev)) {
915 err = -EOPNOTSUPP;
916 break;
917 }
918
919 if (!test_bit(HCI_AUTH, &hdev->flags)) {
920 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200921 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300922 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 if (err)
924 break;
925 }
926
Marcel Holtmann04837f62006-07-03 10:02:33 +0200927 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300928 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 break;
930
931 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200932 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300933 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 break;
935
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200936 case HCISETLINKPOL:
937 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300938 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200939 break;
940
941 case HCISETLINKMODE:
942 hdev->link_mode = ((__u16) dr.dev_opt) &
943 (HCI_LM_MASTER | HCI_LM_ACCEPT);
944 break;
945
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 case HCISETPTYPE:
947 hdev->pkt_type = (__u16) dr.dev_opt;
948 break;
949
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200951 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
952 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 break;
954
955 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200956 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
957 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700958 break;
959
960 default:
961 err = -EINVAL;
962 break;
963 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200964
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 hci_dev_put(hdev);
966 return err;
967}
968
969int hci_get_dev_list(void __user *arg)
970{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200971 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 struct hci_dev_list_req *dl;
973 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974 int n = 0, size, err;
975 __u16 dev_num;
976
977 if (get_user(dev_num, (__u16 __user *) arg))
978 return -EFAULT;
979
980 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
981 return -EINVAL;
982
983 size = sizeof(*dl) + dev_num * sizeof(*dr);
984
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200985 dl = kzalloc(size, GFP_KERNEL);
986 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987 return -ENOMEM;
988
989 dr = dl->dev_req;
990
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200991 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200992 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200993 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200994 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200995
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200996 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
997 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 (dr + n)->dev_id = hdev->id;
1000 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001001
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 if (++n >= dev_num)
1003 break;
1004 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001005 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006
1007 dl->dev_num = n;
1008 size = sizeof(*dl) + n * sizeof(*dr);
1009
1010 err = copy_to_user(arg, dl, size);
1011 kfree(dl);
1012
1013 return err ? -EFAULT : 0;
1014}
1015
1016int hci_get_dev_info(void __user *arg)
1017{
1018 struct hci_dev *hdev;
1019 struct hci_dev_info di;
1020 int err = 0;
1021
1022 if (copy_from_user(&di, arg, sizeof(di)))
1023 return -EFAULT;
1024
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001025 hdev = hci_dev_get(di.dev_id);
1026 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 return -ENODEV;
1028
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001029 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001030 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001031
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001032 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1033 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001034
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 strcpy(di.name, hdev->name);
1036 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001037 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001038 di.flags = hdev->flags;
1039 di.pkt_type = hdev->pkt_type;
1040 di.acl_mtu = hdev->acl_mtu;
1041 di.acl_pkts = hdev->acl_pkts;
1042 di.sco_mtu = hdev->sco_mtu;
1043 di.sco_pkts = hdev->sco_pkts;
1044 di.link_policy = hdev->link_policy;
1045 di.link_mode = hdev->link_mode;
1046
1047 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1048 memcpy(&di.features, &hdev->features, sizeof(di.features));
1049
1050 if (copy_to_user(arg, &di, sizeof(di)))
1051 err = -EFAULT;
1052
1053 hci_dev_put(hdev);
1054
1055 return err;
1056}
1057
1058/* ---- Interface to HCI drivers ---- */
1059
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001060static int hci_rfkill_set_block(void *data, bool blocked)
1061{
1062 struct hci_dev *hdev = data;
1063
1064 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1065
1066 if (!blocked)
1067 return 0;
1068
1069 hci_dev_do_close(hdev);
1070
1071 return 0;
1072}
1073
1074static const struct rfkill_ops hci_rfkill_ops = {
1075 .set_block = hci_rfkill_set_block,
1076};
1077
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001078static void hci_power_on(struct work_struct *work)
1079{
1080 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1081
1082 BT_DBG("%s", hdev->name);
1083
1084 if (hci_dev_open(hdev->id) < 0)
1085 return;
1086
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001087 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001088 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001089
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001090 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001091 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001092}
1093
1094static void hci_power_off(struct work_struct *work)
1095{
Johan Hedberg32435532011-11-07 22:16:04 +02001096 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001097 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001098
1099 BT_DBG("%s", hdev->name);
1100
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001101 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001102}
1103
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001104static void hci_discov_off(struct work_struct *work)
1105{
1106 struct hci_dev *hdev;
1107 u8 scan = SCAN_PAGE;
1108
1109 hdev = container_of(work, struct hci_dev, discov_off.work);
1110
1111 BT_DBG("%s", hdev->name);
1112
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001113 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001114
1115 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1116
1117 hdev->discov_timeout = 0;
1118
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001119 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001120}
1121
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001122int hci_uuids_clear(struct hci_dev *hdev)
1123{
1124 struct list_head *p, *n;
1125
1126 list_for_each_safe(p, n, &hdev->uuids) {
1127 struct bt_uuid *uuid;
1128
1129 uuid = list_entry(p, struct bt_uuid, list);
1130
1131 list_del(p);
1132 kfree(uuid);
1133 }
1134
1135 return 0;
1136}
1137
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001138int hci_link_keys_clear(struct hci_dev *hdev)
1139{
1140 struct list_head *p, *n;
1141
1142 list_for_each_safe(p, n, &hdev->link_keys) {
1143 struct link_key *key;
1144
1145 key = list_entry(p, struct link_key, list);
1146
1147 list_del(p);
1148 kfree(key);
1149 }
1150
1151 return 0;
1152}
1153
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001154int hci_smp_ltks_clear(struct hci_dev *hdev)
1155{
1156 struct smp_ltk *k, *tmp;
1157
1158 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1159 list_del(&k->list);
1160 kfree(k);
1161 }
1162
1163 return 0;
1164}
1165
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001166struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001168 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001169
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001170 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173
1174 return NULL;
1175}
1176
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301177static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001178 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001179{
1180 /* Legacy key */
1181 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301182 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001183
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301186 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001187
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301190 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001191
1192 /* Security mode 3 case */
1193 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301194 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001195
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301198 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001199
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301202 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301206 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001207
1208 /* If none of the above criteria match, then don't store the key
1209 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301210 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001211}
1212
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001213struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001214{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001215 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001216
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001217 list_for_each_entry(k, &hdev->long_term_keys, list) {
1218 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001219 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001220 continue;
1221
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001222 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223 }
1224
1225 return NULL;
1226}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001228struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001229 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001232
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001233 list_for_each_entry(k, &hdev->long_term_keys, list)
1234 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001235 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001236 return k;
1237
1238 return NULL;
1239}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001240
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001241int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001242 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001243{
1244 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301245 u8 old_key_type;
1246 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001247
1248 old_key = hci_find_link_key(hdev, bdaddr);
1249 if (old_key) {
1250 old_key_type = old_key->type;
1251 key = old_key;
1252 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001253 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001254 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1255 if (!key)
1256 return -ENOMEM;
1257 list_add(&key->list, &hdev->link_keys);
1258 }
1259
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001260 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001261
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001262 /* Some buggy controller combinations generate a changed
1263 * combination key for legacy pairing even when there's no
1264 * previous key */
1265 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001266 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001267 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001268 if (conn)
1269 conn->key_type = type;
1270 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001271
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001272 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001273 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001274 key->pin_len = pin_len;
1275
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001276 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001277 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001278 else
1279 key->type = type;
1280
Johan Hedberg4df378a2011-04-28 11:29:03 -07001281 if (!new_key)
1282 return 0;
1283
1284 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1285
Johan Hedberg744cf192011-11-08 20:40:14 +02001286 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001287
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301288 if (conn)
1289 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001290
1291 return 0;
1292}
1293
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001294int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001295 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001296 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001297{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001298 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001299
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001300 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1301 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001302
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001303 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1304 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001305 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001306 else {
1307 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001308 if (!key)
1309 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001311 }
1312
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001313 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001314 key->bdaddr_type = addr_type;
1315 memcpy(key->val, tk, sizeof(key->val));
1316 key->authenticated = authenticated;
1317 key->ediv = ediv;
1318 key->enc_size = enc_size;
1319 key->type = type;
1320 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001321
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001322 if (!new_key)
1323 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001324
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001325 if (type & HCI_SMP_LTK)
1326 mgmt_new_ltk(hdev, key, 1);
1327
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328 return 0;
1329}
1330
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001331int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1332{
1333 struct link_key *key;
1334
1335 key = hci_find_link_key(hdev, bdaddr);
1336 if (!key)
1337 return -ENOENT;
1338
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001339 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001340
1341 list_del(&key->list);
1342 kfree(key);
1343
1344 return 0;
1345}
1346
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001347int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1348{
1349 struct smp_ltk *k, *tmp;
1350
1351 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1352 if (bacmp(bdaddr, &k->bdaddr))
1353 continue;
1354
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001355 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001356
1357 list_del(&k->list);
1358 kfree(k);
1359 }
1360
1361 return 0;
1362}
1363
Ville Tervo6bd32322011-02-16 16:32:41 +02001364/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001365static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001366{
1367 struct hci_dev *hdev = (void *) arg;
1368
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001369 if (hdev->sent_cmd) {
1370 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1371 u16 opcode = __le16_to_cpu(sent->opcode);
1372
1373 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1374 } else {
1375 BT_ERR("%s command tx timeout", hdev->name);
1376 }
1377
Ville Tervo6bd32322011-02-16 16:32:41 +02001378 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001379 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001380}
1381
Szymon Janc2763eda2011-03-22 13:12:22 +01001382struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001383 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001384{
1385 struct oob_data *data;
1386
1387 list_for_each_entry(data, &hdev->remote_oob_data, list)
1388 if (bacmp(bdaddr, &data->bdaddr) == 0)
1389 return data;
1390
1391 return NULL;
1392}
1393
1394int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395{
1396 struct oob_data *data;
1397
1398 data = hci_find_remote_oob_data(hdev, bdaddr);
1399 if (!data)
1400 return -ENOENT;
1401
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001402 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001403
1404 list_del(&data->list);
1405 kfree(data);
1406
1407 return 0;
1408}
1409
1410int hci_remote_oob_data_clear(struct hci_dev *hdev)
1411{
1412 struct oob_data *data, *n;
1413
1414 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1415 list_del(&data->list);
1416 kfree(data);
1417 }
1418
1419 return 0;
1420}
1421
1422int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001423 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001424{
1425 struct oob_data *data;
1426
1427 data = hci_find_remote_oob_data(hdev, bdaddr);
1428
1429 if (!data) {
1430 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1431 if (!data)
1432 return -ENOMEM;
1433
1434 bacpy(&data->bdaddr, bdaddr);
1435 list_add(&data->list, &hdev->remote_oob_data);
1436 }
1437
1438 memcpy(data->hash, hash, sizeof(data->hash));
1439 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1440
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001441 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001442
1443 return 0;
1444}
1445
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001446struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001447{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001448 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001449
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001450 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001451 if (bacmp(bdaddr, &b->bdaddr) == 0)
1452 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001453
1454 return NULL;
1455}
1456
1457int hci_blacklist_clear(struct hci_dev *hdev)
1458{
1459 struct list_head *p, *n;
1460
1461 list_for_each_safe(p, n, &hdev->blacklist) {
1462 struct bdaddr_list *b;
1463
1464 b = list_entry(p, struct bdaddr_list, list);
1465
1466 list_del(p);
1467 kfree(b);
1468 }
1469
1470 return 0;
1471}
1472
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001473int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001474{
1475 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001476
1477 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1478 return -EBADF;
1479
Antti Julku5e762442011-08-25 16:48:02 +03001480 if (hci_blacklist_lookup(hdev, bdaddr))
1481 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001482
1483 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001484 if (!entry)
1485 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001486
1487 bacpy(&entry->bdaddr, bdaddr);
1488
1489 list_add(&entry->list, &hdev->blacklist);
1490
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001491 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001492}
1493
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001494int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495{
1496 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001497
Szymon Janc1ec918c2011-11-16 09:32:21 +01001498 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001499 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001500
1501 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001502 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001503 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001504
1505 list_del(&entry->list);
1506 kfree(entry);
1507
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001508 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509}
1510
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001511static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1512{
1513 struct le_scan_params *param = (struct le_scan_params *) opt;
1514 struct hci_cp_le_set_scan_param cp;
1515
1516 memset(&cp, 0, sizeof(cp));
1517 cp.type = param->type;
1518 cp.interval = cpu_to_le16(param->interval);
1519 cp.window = cpu_to_le16(param->window);
1520
1521 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1522}
1523
1524static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1525{
1526 struct hci_cp_le_set_scan_enable cp;
1527
1528 memset(&cp, 0, sizeof(cp));
1529 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001530 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001531
1532 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1533}
1534
1535static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001536 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001537{
1538 long timeo = msecs_to_jiffies(3000);
1539 struct le_scan_params param;
1540 int err;
1541
1542 BT_DBG("%s", hdev->name);
1543
1544 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1545 return -EINPROGRESS;
1546
1547 param.type = type;
1548 param.interval = interval;
1549 param.window = window;
1550
1551 hci_req_lock(hdev);
1552
1553 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001554 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001555 if (!err)
1556 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1557
1558 hci_req_unlock(hdev);
1559
1560 if (err < 0)
1561 return err;
1562
1563 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001564 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001565
1566 return 0;
1567}
1568
Andre Guedes7dbfac12012-03-15 16:52:07 -03001569int hci_cancel_le_scan(struct hci_dev *hdev)
1570{
1571 BT_DBG("%s", hdev->name);
1572
1573 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1574 return -EALREADY;
1575
1576 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1577 struct hci_cp_le_set_scan_enable cp;
1578
1579 /* Send HCI command to disable LE Scan */
1580 memset(&cp, 0, sizeof(cp));
1581 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1582 }
1583
1584 return 0;
1585}
1586
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001587static void le_scan_disable_work(struct work_struct *work)
1588{
1589 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001590 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001591 struct hci_cp_le_set_scan_enable cp;
1592
1593 BT_DBG("%s", hdev->name);
1594
1595 memset(&cp, 0, sizeof(cp));
1596
1597 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1598}
1599
Andre Guedes28b75a82012-02-03 17:48:00 -03001600static void le_scan_work(struct work_struct *work)
1601{
1602 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1603 struct le_scan_params *param = &hdev->le_scan_params;
1604
1605 BT_DBG("%s", hdev->name);
1606
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001607 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1608 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001609}
1610
1611int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001612 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001613{
1614 struct le_scan_params *param = &hdev->le_scan_params;
1615
1616 BT_DBG("%s", hdev->name);
1617
1618 if (work_busy(&hdev->le_scan))
1619 return -EINPROGRESS;
1620
1621 param->type = type;
1622 param->interval = interval;
1623 param->window = window;
1624 param->timeout = timeout;
1625
1626 queue_work(system_long_wq, &hdev->le_scan);
1627
1628 return 0;
1629}
1630
David Herrmann9be0dab2012-04-22 14:39:57 +02001631/* Alloc HCI device */
1632struct hci_dev *hci_alloc_dev(void)
1633{
1634 struct hci_dev *hdev;
1635
1636 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1637 if (!hdev)
1638 return NULL;
1639
David Herrmannb1b813d2012-04-22 14:39:58 +02001640 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1641 hdev->esco_type = (ESCO_HV1);
1642 hdev->link_mode = (HCI_LM_ACCEPT);
1643 hdev->io_capability = 0x03; /* No Input No Output */
1644
David Herrmannb1b813d2012-04-22 14:39:58 +02001645 hdev->sniff_max_interval = 800;
1646 hdev->sniff_min_interval = 80;
1647
1648 mutex_init(&hdev->lock);
1649 mutex_init(&hdev->req_lock);
1650
1651 INIT_LIST_HEAD(&hdev->mgmt_pending);
1652 INIT_LIST_HEAD(&hdev->blacklist);
1653 INIT_LIST_HEAD(&hdev->uuids);
1654 INIT_LIST_HEAD(&hdev->link_keys);
1655 INIT_LIST_HEAD(&hdev->long_term_keys);
1656 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001657 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001658
1659 INIT_WORK(&hdev->rx_work, hci_rx_work);
1660 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1661 INIT_WORK(&hdev->tx_work, hci_tx_work);
1662 INIT_WORK(&hdev->power_on, hci_power_on);
1663 INIT_WORK(&hdev->le_scan, le_scan_work);
1664
David Herrmannb1b813d2012-04-22 14:39:58 +02001665 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1666 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1667 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1668
David Herrmann9be0dab2012-04-22 14:39:57 +02001669 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001670 skb_queue_head_init(&hdev->rx_q);
1671 skb_queue_head_init(&hdev->cmd_q);
1672 skb_queue_head_init(&hdev->raw_q);
1673
1674 init_waitqueue_head(&hdev->req_wait_q);
1675
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001676 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001677
David Herrmannb1b813d2012-04-22 14:39:58 +02001678 hci_init_sysfs(hdev);
1679 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001680
1681 return hdev;
1682}
1683EXPORT_SYMBOL(hci_alloc_dev);
1684
1685/* Free HCI device */
1686void hci_free_dev(struct hci_dev *hdev)
1687{
1688 skb_queue_purge(&hdev->driver_init);
1689
1690 /* will free via device release */
1691 put_device(&hdev->dev);
1692}
1693EXPORT_SYMBOL(hci_free_dev);
1694
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695/* Register HCI device */
1696int hci_register_dev(struct hci_dev *hdev)
1697{
David Herrmannb1b813d2012-04-22 14:39:58 +02001698 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699
David Herrmann010666a2012-01-07 15:47:07 +01001700 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701 return -EINVAL;
1702
Mat Martineau08add512011-11-02 16:18:36 -07001703 /* Do not allow HCI_AMP devices to register at index 0,
1704 * so the index can be used as the AMP controller ID.
1705 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001706 switch (hdev->dev_type) {
1707 case HCI_BREDR:
1708 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1709 break;
1710 case HCI_AMP:
1711 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1712 break;
1713 default:
1714 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001716
Sasha Levin3df92b32012-05-27 22:36:56 +02001717 if (id < 0)
1718 return id;
1719
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720 sprintf(hdev->name, "hci%d", id);
1721 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001722
1723 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
Sasha Levin3df92b32012-05-27 22:36:56 +02001725 write_lock(&hci_dev_list_lock);
1726 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001727 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001729 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001730 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001731 if (!hdev->workqueue) {
1732 error = -ENOMEM;
1733 goto err;
1734 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001735
David Herrmann33ca9542011-10-08 14:58:49 +02001736 error = hci_add_sysfs(hdev);
1737 if (error < 0)
1738 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001739
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001740 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001741 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1742 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001743 if (hdev->rfkill) {
1744 if (rfkill_register(hdev->rfkill) < 0) {
1745 rfkill_destroy(hdev->rfkill);
1746 hdev->rfkill = NULL;
1747 }
1748 }
1749
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001750 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001751
1752 if (hdev->dev_type != HCI_AMP)
1753 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1754
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001755 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001758 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759
1760 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001761
David Herrmann33ca9542011-10-08 14:58:49 +02001762err_wqueue:
1763 destroy_workqueue(hdev->workqueue);
1764err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001765 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001766 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001767 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001768 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001769
David Herrmann33ca9542011-10-08 14:58:49 +02001770 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771}
1772EXPORT_SYMBOL(hci_register_dev);
1773
1774/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001775void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
Sasha Levin3df92b32012-05-27 22:36:56 +02001777 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001778
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001779 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
Johan Hovold94324962012-03-15 14:48:41 +01001781 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1782
Sasha Levin3df92b32012-05-27 22:36:56 +02001783 id = hdev->id;
1784
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001785 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001787 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788
1789 hci_dev_do_close(hdev);
1790
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301791 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001792 kfree_skb(hdev->reassembly[i]);
1793
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001794 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001795 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001796 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001797 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001798 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001799 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001800
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001801 /* mgmt_index_removed should take care of emptying the
1802 * pending list */
1803 BUG_ON(!list_empty(&hdev->mgmt_pending));
1804
Linus Torvalds1da177e2005-04-16 15:20:36 -07001805 hci_notify(hdev, HCI_DEV_UNREG);
1806
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001807 if (hdev->rfkill) {
1808 rfkill_unregister(hdev->rfkill);
1809 rfkill_destroy(hdev->rfkill);
1810 }
1811
David Herrmannce242972011-10-08 14:58:48 +02001812 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001813
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001814 destroy_workqueue(hdev->workqueue);
1815
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001816 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001817 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001818 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001819 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001820 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001821 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001822 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001823
David Herrmanndc946bd2012-01-07 15:47:24 +01001824 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001825
1826 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827}
1828EXPORT_SYMBOL(hci_unregister_dev);
1829
1830/* Suspend HCI device */
1831int hci_suspend_dev(struct hci_dev *hdev)
1832{
1833 hci_notify(hdev, HCI_DEV_SUSPEND);
1834 return 0;
1835}
1836EXPORT_SYMBOL(hci_suspend_dev);
1837
1838/* Resume HCI device */
1839int hci_resume_dev(struct hci_dev *hdev)
1840{
1841 hci_notify(hdev, HCI_DEV_RESUME);
1842 return 0;
1843}
1844EXPORT_SYMBOL(hci_resume_dev);
1845
Marcel Holtmann76bca882009-11-18 00:40:39 +01001846/* Receive frame from HCI drivers */
1847int hci_recv_frame(struct sk_buff *skb)
1848{
1849 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1850 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001851 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001852 kfree_skb(skb);
1853 return -ENXIO;
1854 }
1855
1856 /* Incomming skb */
1857 bt_cb(skb)->incoming = 1;
1858
1859 /* Time stamp */
1860 __net_timestamp(skb);
1861
Marcel Holtmann76bca882009-11-18 00:40:39 +01001862 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001863 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001864
Marcel Holtmann76bca882009-11-18 00:40:39 +01001865 return 0;
1866}
1867EXPORT_SYMBOL(hci_recv_frame);
1868
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301869static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001870 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301871{
1872 int len = 0;
1873 int hlen = 0;
1874 int remain = count;
1875 struct sk_buff *skb;
1876 struct bt_skb_cb *scb;
1877
1878 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001879 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301880 return -EILSEQ;
1881
1882 skb = hdev->reassembly[index];
1883
1884 if (!skb) {
1885 switch (type) {
1886 case HCI_ACLDATA_PKT:
1887 len = HCI_MAX_FRAME_SIZE;
1888 hlen = HCI_ACL_HDR_SIZE;
1889 break;
1890 case HCI_EVENT_PKT:
1891 len = HCI_MAX_EVENT_SIZE;
1892 hlen = HCI_EVENT_HDR_SIZE;
1893 break;
1894 case HCI_SCODATA_PKT:
1895 len = HCI_MAX_SCO_SIZE;
1896 hlen = HCI_SCO_HDR_SIZE;
1897 break;
1898 }
1899
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001900 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301901 if (!skb)
1902 return -ENOMEM;
1903
1904 scb = (void *) skb->cb;
1905 scb->expect = hlen;
1906 scb->pkt_type = type;
1907
1908 skb->dev = (void *) hdev;
1909 hdev->reassembly[index] = skb;
1910 }
1911
1912 while (count) {
1913 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001914 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301915
1916 memcpy(skb_put(skb, len), data, len);
1917
1918 count -= len;
1919 data += len;
1920 scb->expect -= len;
1921 remain = count;
1922
1923 switch (type) {
1924 case HCI_EVENT_PKT:
1925 if (skb->len == HCI_EVENT_HDR_SIZE) {
1926 struct hci_event_hdr *h = hci_event_hdr(skb);
1927 scb->expect = h->plen;
1928
1929 if (skb_tailroom(skb) < scb->expect) {
1930 kfree_skb(skb);
1931 hdev->reassembly[index] = NULL;
1932 return -ENOMEM;
1933 }
1934 }
1935 break;
1936
1937 case HCI_ACLDATA_PKT:
1938 if (skb->len == HCI_ACL_HDR_SIZE) {
1939 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1940 scb->expect = __le16_to_cpu(h->dlen);
1941
1942 if (skb_tailroom(skb) < scb->expect) {
1943 kfree_skb(skb);
1944 hdev->reassembly[index] = NULL;
1945 return -ENOMEM;
1946 }
1947 }
1948 break;
1949
1950 case HCI_SCODATA_PKT:
1951 if (skb->len == HCI_SCO_HDR_SIZE) {
1952 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1953 scb->expect = h->dlen;
1954
1955 if (skb_tailroom(skb) < scb->expect) {
1956 kfree_skb(skb);
1957 hdev->reassembly[index] = NULL;
1958 return -ENOMEM;
1959 }
1960 }
1961 break;
1962 }
1963
1964 if (scb->expect == 0) {
1965 /* Complete frame */
1966
1967 bt_cb(skb)->pkt_type = type;
1968 hci_recv_frame(skb);
1969
1970 hdev->reassembly[index] = NULL;
1971 return remain;
1972 }
1973 }
1974
1975 return remain;
1976}
1977
Marcel Holtmannef222012007-07-11 06:42:04 +02001978int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1979{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301980 int rem = 0;
1981
Marcel Holtmannef222012007-07-11 06:42:04 +02001982 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1983 return -EILSEQ;
1984
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001985 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001986 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301987 if (rem < 0)
1988 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001989
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301990 data += (count - rem);
1991 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001992 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001993
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301994 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001995}
1996EXPORT_SYMBOL(hci_recv_fragment);
1997
Suraj Sumangala99811512010-07-14 13:02:19 +05301998#define STREAM_REASSEMBLY 0
1999
2000int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2001{
2002 int type;
2003 int rem = 0;
2004
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002005 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302006 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2007
2008 if (!skb) {
2009 struct { char type; } *pkt;
2010
2011 /* Start of the frame */
2012 pkt = data;
2013 type = pkt->type;
2014
2015 data++;
2016 count--;
2017 } else
2018 type = bt_cb(skb)->pkt_type;
2019
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002020 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002021 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302022 if (rem < 0)
2023 return rem;
2024
2025 data += (count - rem);
2026 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002027 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302028
2029 return rem;
2030}
2031EXPORT_SYMBOL(hci_recv_stream_fragment);
2032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033/* ---- Interface to upper protocols ---- */
2034
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035int hci_register_cb(struct hci_cb *cb)
2036{
2037 BT_DBG("%p name %s", cb, cb->name);
2038
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002039 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002041 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002042
2043 return 0;
2044}
2045EXPORT_SYMBOL(hci_register_cb);
2046
2047int hci_unregister_cb(struct hci_cb *cb)
2048{
2049 BT_DBG("%p name %s", cb, cb->name);
2050
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002051 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002053 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054
2055 return 0;
2056}
2057EXPORT_SYMBOL(hci_unregister_cb);
2058
2059static int hci_send_frame(struct sk_buff *skb)
2060{
2061 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2062
2063 if (!hdev) {
2064 kfree_skb(skb);
2065 return -ENODEV;
2066 }
2067
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002068 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002070 /* Time stamp */
2071 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002072
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002073 /* Send copy to monitor */
2074 hci_send_to_monitor(hdev, skb);
2075
2076 if (atomic_read(&hdev->promisc)) {
2077 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002078 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 }
2080
2081 /* Get rid of skb owner, prior to sending to the driver. */
2082 skb_orphan(skb);
2083
2084 return hdev->send(skb);
2085}
2086
2087/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002088int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089{
2090 int len = HCI_COMMAND_HDR_SIZE + plen;
2091 struct hci_command_hdr *hdr;
2092 struct sk_buff *skb;
2093
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002094 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095
2096 skb = bt_skb_alloc(len, GFP_ATOMIC);
2097 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002098 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 return -ENOMEM;
2100 }
2101
2102 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002103 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104 hdr->plen = plen;
2105
2106 if (plen)
2107 memcpy(skb_put(skb, plen), param, plen);
2108
2109 BT_DBG("skb len %d", skb->len);
2110
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002111 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002113
Johan Hedberga5040ef2011-01-10 13:28:59 +02002114 if (test_bit(HCI_INIT, &hdev->flags))
2115 hdev->init_last_cmd = opcode;
2116
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002118 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 return 0;
2121}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
2123/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002124void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125{
2126 struct hci_command_hdr *hdr;
2127
2128 if (!hdev->sent_cmd)
2129 return NULL;
2130
2131 hdr = (void *) hdev->sent_cmd->data;
2132
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002133 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134 return NULL;
2135
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002136 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137
2138 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2139}
2140
2141/* Send ACL data */
2142static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2143{
2144 struct hci_acl_hdr *hdr;
2145 int len = skb->len;
2146
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002147 skb_push(skb, HCI_ACL_HDR_SIZE);
2148 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002149 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002150 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2151 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152}
2153
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002154static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002155 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002157 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002158 struct hci_dev *hdev = conn->hdev;
2159 struct sk_buff *list;
2160
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002161 skb->len = skb_headlen(skb);
2162 skb->data_len = 0;
2163
2164 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2165 hci_add_acl_hdr(skb, conn->handle, flags);
2166
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002167 list = skb_shinfo(skb)->frag_list;
2168 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 /* Non fragmented */
2170 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2171
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002172 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 } else {
2174 /* Fragmented */
2175 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2176
2177 skb_shinfo(skb)->frag_list = NULL;
2178
2179 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002180 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002182 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002183
2184 flags &= ~ACL_START;
2185 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 do {
2187 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002188
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002190 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002191 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002192
2193 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2194
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002195 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 } while (list);
2197
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002198 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002200}
2201
2202void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2203{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002204 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002205
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002206 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002207
2208 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002209
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002210 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002212 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002216void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217{
2218 struct hci_dev *hdev = conn->hdev;
2219 struct hci_sco_hdr hdr;
2220
2221 BT_DBG("%s len %d", hdev->name, skb->len);
2222
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002223 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 hdr.dlen = skb->len;
2225
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002226 skb_push(skb, HCI_SCO_HDR_SIZE);
2227 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002228 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229
2230 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002231 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002232
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002234 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236
2237/* ---- HCI TX task (outgoing data) ---- */
2238
2239/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002240static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2241 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242{
2243 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002244 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002245 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002247 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002249
2250 rcu_read_lock();
2251
2252 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002253 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002255
2256 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2257 continue;
2258
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 num++;
2260
2261 if (c->sent < min) {
2262 min = c->sent;
2263 conn = c;
2264 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002265
2266 if (hci_conn_num(hdev, type) == num)
2267 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 }
2269
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002270 rcu_read_unlock();
2271
Linus Torvalds1da177e2005-04-16 15:20:36 -07002272 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002273 int cnt, q;
2274
2275 switch (conn->type) {
2276 case ACL_LINK:
2277 cnt = hdev->acl_cnt;
2278 break;
2279 case SCO_LINK:
2280 case ESCO_LINK:
2281 cnt = hdev->sco_cnt;
2282 break;
2283 case LE_LINK:
2284 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2285 break;
2286 default:
2287 cnt = 0;
2288 BT_ERR("Unknown link type");
2289 }
2290
2291 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292 *quote = q ? q : 1;
2293 } else
2294 *quote = 0;
2295
2296 BT_DBG("conn %p quote %d", conn, *quote);
2297 return conn;
2298}
2299
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002300static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301{
2302 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002303 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
Ville Tervobae1f5d92011-02-10 22:38:53 -03002305 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002307 rcu_read_lock();
2308
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002310 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002311 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002312 BT_ERR("%s killing stalled connection %pMR",
2313 hdev->name, &c->dst);
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002314 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315 }
2316 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002317
2318 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002319}
2320
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002321static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2322 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002323{
2324 struct hci_conn_hash *h = &hdev->conn_hash;
2325 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002326 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002327 struct hci_conn *conn;
2328 int cnt, q, conn_num = 0;
2329
2330 BT_DBG("%s", hdev->name);
2331
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002332 rcu_read_lock();
2333
2334 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002335 struct hci_chan *tmp;
2336
2337 if (conn->type != type)
2338 continue;
2339
2340 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2341 continue;
2342
2343 conn_num++;
2344
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002345 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002346 struct sk_buff *skb;
2347
2348 if (skb_queue_empty(&tmp->data_q))
2349 continue;
2350
2351 skb = skb_peek(&tmp->data_q);
2352 if (skb->priority < cur_prio)
2353 continue;
2354
2355 if (skb->priority > cur_prio) {
2356 num = 0;
2357 min = ~0;
2358 cur_prio = skb->priority;
2359 }
2360
2361 num++;
2362
2363 if (conn->sent < min) {
2364 min = conn->sent;
2365 chan = tmp;
2366 }
2367 }
2368
2369 if (hci_conn_num(hdev, type) == conn_num)
2370 break;
2371 }
2372
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002373 rcu_read_unlock();
2374
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002375 if (!chan)
2376 return NULL;
2377
2378 switch (chan->conn->type) {
2379 case ACL_LINK:
2380 cnt = hdev->acl_cnt;
2381 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002382 case AMP_LINK:
2383 cnt = hdev->block_cnt;
2384 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002385 case SCO_LINK:
2386 case ESCO_LINK:
2387 cnt = hdev->sco_cnt;
2388 break;
2389 case LE_LINK:
2390 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2391 break;
2392 default:
2393 cnt = 0;
2394 BT_ERR("Unknown link type");
2395 }
2396
2397 q = cnt / num;
2398 *quote = q ? q : 1;
2399 BT_DBG("chan %p quote %d", chan, *quote);
2400 return chan;
2401}
2402
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002403static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2404{
2405 struct hci_conn_hash *h = &hdev->conn_hash;
2406 struct hci_conn *conn;
2407 int num = 0;
2408
2409 BT_DBG("%s", hdev->name);
2410
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002411 rcu_read_lock();
2412
2413 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002414 struct hci_chan *chan;
2415
2416 if (conn->type != type)
2417 continue;
2418
2419 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2420 continue;
2421
2422 num++;
2423
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002424 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002425 struct sk_buff *skb;
2426
2427 if (chan->sent) {
2428 chan->sent = 0;
2429 continue;
2430 }
2431
2432 if (skb_queue_empty(&chan->data_q))
2433 continue;
2434
2435 skb = skb_peek(&chan->data_q);
2436 if (skb->priority >= HCI_PRIO_MAX - 1)
2437 continue;
2438
2439 skb->priority = HCI_PRIO_MAX - 1;
2440
2441 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002442 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002443 }
2444
2445 if (hci_conn_num(hdev, type) == num)
2446 break;
2447 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002448
2449 rcu_read_unlock();
2450
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002451}
2452
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002453static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2454{
2455 /* Calculate count of blocks used by this packet */
2456 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2457}
2458
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002459static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461 if (!test_bit(HCI_RAW, &hdev->flags)) {
2462 /* ACL tx timeout must be longer than maximum
2463 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002464 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002465 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002466 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002467 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002468}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002469
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002470static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002471{
2472 unsigned int cnt = hdev->acl_cnt;
2473 struct hci_chan *chan;
2474 struct sk_buff *skb;
2475 int quote;
2476
2477 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002478
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002479 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002480 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002481 u32 priority = (skb_peek(&chan->data_q))->priority;
2482 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002483 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002484 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002485
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002486 /* Stop if priority has changed */
2487 if (skb->priority < priority)
2488 break;
2489
2490 skb = skb_dequeue(&chan->data_q);
2491
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002492 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002493 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002494
Linus Torvalds1da177e2005-04-16 15:20:36 -07002495 hci_send_frame(skb);
2496 hdev->acl_last_tx = jiffies;
2497
2498 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002499 chan->sent++;
2500 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501 }
2502 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002503
2504 if (cnt != hdev->acl_cnt)
2505 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002506}
2507
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002508static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002510 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002511 struct hci_chan *chan;
2512 struct sk_buff *skb;
2513 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002514 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002515
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002516 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002517
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002518 BT_DBG("%s", hdev->name);
2519
2520 if (hdev->dev_type == HCI_AMP)
2521 type = AMP_LINK;
2522 else
2523 type = ACL_LINK;
2524
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002525 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002526 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002527 u32 priority = (skb_peek(&chan->data_q))->priority;
2528 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2529 int blocks;
2530
2531 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002532 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002533
2534 /* Stop if priority has changed */
2535 if (skb->priority < priority)
2536 break;
2537
2538 skb = skb_dequeue(&chan->data_q);
2539
2540 blocks = __get_blocks(hdev, skb);
2541 if (blocks > hdev->block_cnt)
2542 return;
2543
2544 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002545 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002546
2547 hci_send_frame(skb);
2548 hdev->acl_last_tx = jiffies;
2549
2550 hdev->block_cnt -= blocks;
2551 quote -= blocks;
2552
2553 chan->sent += blocks;
2554 chan->conn->sent += blocks;
2555 }
2556 }
2557
2558 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002559 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002560}
2561
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002562static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002563{
2564 BT_DBG("%s", hdev->name);
2565
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002566 /* No ACL link over BR/EDR controller */
2567 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2568 return;
2569
2570 /* No AMP link over AMP controller */
2571 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002572 return;
2573
2574 switch (hdev->flow_ctl_mode) {
2575 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2576 hci_sched_acl_pkt(hdev);
2577 break;
2578
2579 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2580 hci_sched_acl_blk(hdev);
2581 break;
2582 }
2583}
2584
Linus Torvalds1da177e2005-04-16 15:20:36 -07002585/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002586static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587{
2588 struct hci_conn *conn;
2589 struct sk_buff *skb;
2590 int quote;
2591
2592 BT_DBG("%s", hdev->name);
2593
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002594 if (!hci_conn_num(hdev, SCO_LINK))
2595 return;
2596
Linus Torvalds1da177e2005-04-16 15:20:36 -07002597 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599 BT_DBG("skb %p len %d", skb, skb->len);
2600 hci_send_frame(skb);
2601
2602 conn->sent++;
2603 if (conn->sent == ~0)
2604 conn->sent = 0;
2605 }
2606 }
2607}
2608
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002609static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002610{
2611 struct hci_conn *conn;
2612 struct sk_buff *skb;
2613 int quote;
2614
2615 BT_DBG("%s", hdev->name);
2616
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002617 if (!hci_conn_num(hdev, ESCO_LINK))
2618 return;
2619
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002620 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2621 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002622 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2623 BT_DBG("skb %p len %d", skb, skb->len);
2624 hci_send_frame(skb);
2625
2626 conn->sent++;
2627 if (conn->sent == ~0)
2628 conn->sent = 0;
2629 }
2630 }
2631}
2632
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002633static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002634{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002635 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002636 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002637 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002638
2639 BT_DBG("%s", hdev->name);
2640
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002641 if (!hci_conn_num(hdev, LE_LINK))
2642 return;
2643
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002644 if (!test_bit(HCI_RAW, &hdev->flags)) {
2645 /* LE tx timeout must be longer than maximum
2646 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002647 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002648 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002649 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002650 }
2651
2652 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002653 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002654 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002655 u32 priority = (skb_peek(&chan->data_q))->priority;
2656 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002657 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002658 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002659
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002660 /* Stop if priority has changed */
2661 if (skb->priority < priority)
2662 break;
2663
2664 skb = skb_dequeue(&chan->data_q);
2665
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002666 hci_send_frame(skb);
2667 hdev->le_last_tx = jiffies;
2668
2669 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002670 chan->sent++;
2671 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002672 }
2673 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002674
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002675 if (hdev->le_pkts)
2676 hdev->le_cnt = cnt;
2677 else
2678 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002679
2680 if (cnt != tmp)
2681 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002682}
2683
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002684static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002686 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 struct sk_buff *skb;
2688
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002689 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002690 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691
2692 /* Schedule queues and send stuff to HCI driver */
2693
2694 hci_sched_acl(hdev);
2695
2696 hci_sched_sco(hdev);
2697
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002698 hci_sched_esco(hdev);
2699
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002700 hci_sched_le(hdev);
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 /* Send next queued raw (unknown type) packet */
2703 while ((skb = skb_dequeue(&hdev->raw_q)))
2704 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705}
2706
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002707/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708
2709/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002710static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711{
2712 struct hci_acl_hdr *hdr = (void *) skb->data;
2713 struct hci_conn *conn;
2714 __u16 handle, flags;
2715
2716 skb_pull(skb, HCI_ACL_HDR_SIZE);
2717
2718 handle = __le16_to_cpu(hdr->handle);
2719 flags = hci_flags(handle);
2720 handle = hci_handle(handle);
2721
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002722 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002723 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
2725 hdev->stat.acl_rx++;
2726
2727 hci_dev_lock(hdev);
2728 conn = hci_conn_hash_lookup_handle(hdev, handle);
2729 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002730
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002732 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002733
Johan Hedberg671267b2012-05-12 16:11:50 -03002734 hci_dev_lock(hdev);
2735 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2736 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2737 mgmt_device_connected(hdev, &conn->dst, conn->type,
2738 conn->dst_type, 0, NULL, 0,
2739 conn->dev_class);
2740 hci_dev_unlock(hdev);
2741
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002743 l2cap_recv_acldata(conn, skb, flags);
2744 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002745 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002746 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002747 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002748 }
2749
2750 kfree_skb(skb);
2751}
2752
2753/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002754static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755{
2756 struct hci_sco_hdr *hdr = (void *) skb->data;
2757 struct hci_conn *conn;
2758 __u16 handle;
2759
2760 skb_pull(skb, HCI_SCO_HDR_SIZE);
2761
2762 handle = __le16_to_cpu(hdr->handle);
2763
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002764 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
2766 hdev->stat.sco_rx++;
2767
2768 hci_dev_lock(hdev);
2769 conn = hci_conn_hash_lookup_handle(hdev, handle);
2770 hci_dev_unlock(hdev);
2771
2772 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002774 sco_recv_scodata(conn, skb);
2775 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002777 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002778 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779 }
2780
2781 kfree_skb(skb);
2782}
2783
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002784static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002785{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002786 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 struct sk_buff *skb;
2788
2789 BT_DBG("%s", hdev->name);
2790
Linus Torvalds1da177e2005-04-16 15:20:36 -07002791 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002792 /* Send copy to monitor */
2793 hci_send_to_monitor(hdev, skb);
2794
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 if (atomic_read(&hdev->promisc)) {
2796 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002797 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 }
2799
2800 if (test_bit(HCI_RAW, &hdev->flags)) {
2801 kfree_skb(skb);
2802 continue;
2803 }
2804
2805 if (test_bit(HCI_INIT, &hdev->flags)) {
2806 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002807 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808 case HCI_ACLDATA_PKT:
2809 case HCI_SCODATA_PKT:
2810 kfree_skb(skb);
2811 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813 }
2814
2815 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002816 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002818 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 hci_event_packet(hdev, skb);
2820 break;
2821
2822 case HCI_ACLDATA_PKT:
2823 BT_DBG("%s ACL data packet", hdev->name);
2824 hci_acldata_packet(hdev, skb);
2825 break;
2826
2827 case HCI_SCODATA_PKT:
2828 BT_DBG("%s SCO data packet", hdev->name);
2829 hci_scodata_packet(hdev, skb);
2830 break;
2831
2832 default:
2833 kfree_skb(skb);
2834 break;
2835 }
2836 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837}
2838
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002839static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002841 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 struct sk_buff *skb;
2843
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002844 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2845 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002848 if (atomic_read(&hdev->cmd_cnt)) {
2849 skb = skb_dequeue(&hdev->cmd_q);
2850 if (!skb)
2851 return;
2852
Wei Yongjun7585b972009-02-25 18:29:52 +08002853 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002854
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002855 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2856 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 atomic_dec(&hdev->cmd_cnt);
2858 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002859 if (test_bit(HCI_RESET, &hdev->flags))
2860 del_timer(&hdev->cmd_timer);
2861 else
2862 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002863 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 } else {
2865 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002866 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 }
2868 }
2869}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002870
2871int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2872{
2873 /* General inquiry access code (GIAC) */
2874 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2875 struct hci_cp_inquiry cp;
2876
2877 BT_DBG("%s", hdev->name);
2878
2879 if (test_bit(HCI_INQUIRY, &hdev->flags))
2880 return -EINPROGRESS;
2881
Johan Hedberg46632622012-01-02 16:06:08 +02002882 inquiry_cache_flush(hdev);
2883
Andre Guedes2519a1f2011-11-07 11:45:24 -03002884 memset(&cp, 0, sizeof(cp));
2885 memcpy(&cp.lap, lap, sizeof(cp.lap));
2886 cp.length = length;
2887
2888 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2889}
Andre Guedes023d50492011-11-04 14:16:52 -03002890
2891int hci_cancel_inquiry(struct hci_dev *hdev)
2892{
2893 BT_DBG("%s", hdev->name);
2894
2895 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002896 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002897
2898 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2899}
Andre Guedes31f79562012-04-24 21:02:53 -03002900
2901u8 bdaddr_to_le(u8 bdaddr_type)
2902{
2903 switch (bdaddr_type) {
2904 case BDADDR_LE_PUBLIC:
2905 return ADDR_LE_DEV_PUBLIC;
2906
2907 default:
2908 /* Fallback to LE Random address type */
2909 return ADDR_LE_DEV_RANDOM;
2910 }
2911}