blob: 08994ecc3b6a5e7cc168ee7cd957a38c49d2e579 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Johan Hedbergab81cbf2010-12-15 13:53:18 +020036#define AUTO_OFF_TIMEOUT 2000
37
Marcel Holtmannb78752c2010-08-08 23:06:53 -040038static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020039static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020040static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
Sasha Levin3df92b32012-05-27 22:36:56 +020050/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053/* ---- HCI notifications ---- */
54
Marcel Holtmann65164552005-10-28 19:20:48 +020055static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Marcel Holtmann040030e2012-02-20 14:50:37 +010057 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
60/* ---- HCI requests ---- */
61
Johan Hedberg23bb5762010-12-21 23:01:27 +020062void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Johan Hedberg23bb5762010-12-21 23:01:27 +020064 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
Johan Hedberga5040ef2011-01-10 13:28:59 +020066 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020069 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020071 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020072 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020081 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020082 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
Johan Hedberg23bb5762010-12-21 23:01:27 +020090 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98}
99
100static void hci_req_cancel(struct hci_dev *hdev, int err)
101{
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300112static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700136 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Johan Hedberga5040ef2011-01-10 13:28:59 +0200148 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153}
154
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300155static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 int ret;
160
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170}
171
172static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173{
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300177 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200181static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200183 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800184 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200185 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Mandatory initialization */
190
191 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200200 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200221 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300225 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200233static void amp_init(struct hci_dev *hdev)
234{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200245}
246
247static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279}
280
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300281static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282{
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200319static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __le16 policy = cpu_to_le16(opt);
322
Marcel Holtmanna418b892008-11-30 12:17:28 +0100323 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327}
328
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900329/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 * Device is held on return. */
331struct hci_dev *hci_dev_get(int index)
332{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200333 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200341 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200352
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200353bool hci_discovery_active(struct hci_dev *hdev)
354{
355 struct discovery_state *discov = &hdev->discovery;
356
Andre Guedes6fbe1952012-02-03 17:47:58 -0300357 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300358 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300359 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200360 return true;
361
Andre Guedes6fbe1952012-02-03 17:47:58 -0300362 default:
363 return false;
364 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200365}
366
Johan Hedbergff9ef572012-01-04 14:23:45 +0200367void hci_discovery_set_state(struct hci_dev *hdev, int state)
368{
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 break;
379 case DISCOVERY_STARTING:
380 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300381 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382 mgmt_discovering(hdev, 1);
383 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200384 case DISCOVERY_RESOLVING:
385 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391}
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393static void inquiry_cache_flush(struct hci_dev *hdev)
394{
Johan Hedberg30883512012-01-04 14:16:21 +0200395 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200396 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Johan Hedberg561aafb2012-01-04 13:31:59 +0200398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200400 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
Johan Hedberg30883512012-01-04 14:16:21 +0200410 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200417 return e;
418 }
419
420 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300424 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425{
Johan Hedberg30883512012-01-04 14:16:21 +0200426 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
432 if (!bacmp(&e->data.bdaddr, bdaddr))
433 return e;
434 }
435
436 return NULL;
437}
438
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200439struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300440 bdaddr_t *bdaddr,
441 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200442{
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456}
457
Johan Hedberga3d4e202012-01-09 00:53:02 +0200458void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300459 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200460{
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300469 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475}
476
Johan Hedberg31754052012-01-04 13:39:52 +0200477bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300478 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Johan Hedberg30883512012-01-04 14:16:21 +0200480 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200481 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200485 if (ssp)
486 *ssp = data->ssp_mode;
487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200489 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300494 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
Johan Hedberg561aafb2012-01-04 13:31:59 +0200499 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200500 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200501
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200505 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
515
516update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300518 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
533static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534{
Johan Hedberg30883512012-01-04 14:16:21 +0200535 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
Johan Hedberg561aafb2012-01-04 13:31:59 +0200540 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200542
543 if (copied >= num)
544 break;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559}
560
561static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562{
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
578int hci_inquiry(void __user *arg)
579{
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return -ENODEV;
593
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300594 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300600 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Marcel Holtmann04837f62006-07-03 10:02:33 +0200602 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200619 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 err = -ENOMEM;
621 goto done;
622 }
623
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300624 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300633 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
Johan Hovold94324962012-03-15 14:48:41 +0100660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100681 set_bit(HCI_RAW, &hdev->flags);
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200691 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Marcel Holtmann04837f62006-07-03 10:02:33 +0200693 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Andre Guedeseead27d2011-06-30 19:20:55 -0300696 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300697 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 clear_bit(HCI_INIT, &hdev->flags);
701 }
702
703 if (!ret) {
704 hci_dev_hold(hdev);
705 set_bit(HCI_UP, &hdev->flags);
706 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200707 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300708 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200709 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300710 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200711 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900712 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200714 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200715 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400716 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 skb_queue_purge(&hdev->cmd_q);
719 skb_queue_purge(&hdev->rx_q);
720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
724 if (hdev->sent_cmd) {
725 kfree_skb(hdev->sent_cmd);
726 hdev->sent_cmd = NULL;
727 }
728
729 hdev->close(hdev);
730 hdev->flags = 0;
731 }
732
733done:
734 hci_req_unlock(hdev);
735 hci_dev_put(hdev);
736 return ret;
737}
738
739static int hci_dev_do_close(struct hci_dev *hdev)
740{
741 BT_DBG("%s %p", hdev->name, hdev);
742
Andre Guedes28b75a82012-02-03 17:48:00 -0300743 cancel_work_sync(&hdev->le_scan);
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 hci_req_cancel(hdev, ENODEV);
746 hci_req_lock(hdev);
747
748 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300749 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 hci_req_unlock(hdev);
751 return 0;
752 }
753
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200754 /* Flush RX and TX works */
755 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400756 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200758 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200759 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200760 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200761 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200762 }
763
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200764 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200765 cancel_delayed_work(&hdev->service_cache);
766
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300767 cancel_delayed_work_sync(&hdev->le_scan_disable);
768
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300769 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 inquiry_cache_flush(hdev);
771 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300772 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 hci_notify(hdev, HCI_DEV_DOWN);
775
776 if (hdev->flush)
777 hdev->flush(hdev);
778
779 /* Reset device */
780 skb_queue_purge(&hdev->cmd_q);
781 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200782 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200785 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300786 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 clear_bit(HCI_INIT, &hdev->flags);
788 }
789
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200790 /* flush cmd work */
791 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 /* Drop queues */
794 skb_queue_purge(&hdev->rx_q);
795 skb_queue_purge(&hdev->cmd_q);
796 skb_queue_purge(&hdev->raw_q);
797
798 /* Drop last sent command */
799 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300800 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 kfree_skb(hdev->sent_cmd);
802 hdev->sent_cmd = NULL;
803 }
804
805 /* After this point our queues are empty
806 * and no tasks are scheduled. */
807 hdev->close(hdev);
808
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100809 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
810 hci_dev_lock(hdev);
811 mgmt_powered(hdev, 0);
812 hci_dev_unlock(hdev);
813 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 /* Clear flags */
816 hdev->flags = 0;
817
Johan Hedberge59fda82012-02-22 18:11:53 +0200818 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200819 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hci_req_unlock(hdev);
822
823 hci_dev_put(hdev);
824 return 0;
825}
826
827int hci_dev_close(__u16 dev)
828{
829 struct hci_dev *hdev;
830 int err;
831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200832 hdev = hci_dev_get(dev);
833 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100835
836 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
837 cancel_delayed_work(&hdev->power_off);
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 hci_dev_put(hdev);
842 return err;
843}
844
845int hci_dev_reset(__u16 dev)
846{
847 struct hci_dev *hdev;
848 int ret = 0;
849
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200850 hdev = hci_dev_get(dev);
851 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 return -ENODEV;
853
854 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (!test_bit(HCI_UP, &hdev->flags))
857 goto done;
858
859 /* Drop queues */
860 skb_queue_purge(&hdev->rx_q);
861 skb_queue_purge(&hdev->cmd_q);
862
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300863 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 inquiry_cache_flush(hdev);
865 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300866 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 if (hdev->flush)
869 hdev->flush(hdev);
870
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900871 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300872 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200875 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 hci_req_unlock(hdev);
880 hci_dev_put(hdev);
881 return ret;
882}
883
884int hci_dev_reset_stat(__u16 dev)
885{
886 struct hci_dev *hdev;
887 int ret = 0;
888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200889 hdev = hci_dev_get(dev);
890 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 return -ENODEV;
892
893 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
894
895 hci_dev_put(hdev);
896
897 return ret;
898}
899
900int hci_dev_cmd(unsigned int cmd, void __user *arg)
901{
902 struct hci_dev *hdev;
903 struct hci_dev_req dr;
904 int err = 0;
905
906 if (copy_from_user(&dr, arg, sizeof(dr)))
907 return -EFAULT;
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 hdev = hci_dev_get(dr.dev_id);
910 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -ENODEV;
912
913 switch (cmd) {
914 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCISETENCRYPT:
920 if (!lmp_encrypt_capable(hdev)) {
921 err = -EOPNOTSUPP;
922 break;
923 }
924
925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
926 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if (err)
930 break;
931 }
932
Marcel Holtmann04837f62006-07-03 10:02:33 +0200933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 break;
936
937 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 break;
941
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 case HCISETLINKPOL:
943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200945 break;
946
947 case HCISETLINKMODE:
948 hdev->link_mode = ((__u16) dr.dev_opt) &
949 (HCI_LM_MASTER | HCI_LM_ACCEPT);
950 break;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 case HCISETPTYPE:
953 hdev->pkt_type = (__u16) dr.dev_opt;
954 break;
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200957 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
958 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 break;
960
961 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200962 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
963 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 break;
965
966 default:
967 err = -EINVAL;
968 break;
969 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 hci_dev_put(hdev);
972 return err;
973}
974
975int hci_get_dev_list(void __user *arg)
976{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200977 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 struct hci_dev_list_req *dl;
979 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 int n = 0, size, err;
981 __u16 dev_num;
982
983 if (get_user(dev_num, (__u16 __user *) arg))
984 return -EFAULT;
985
986 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
987 return -EINVAL;
988
989 size = sizeof(*dl) + dev_num * sizeof(*dr);
990
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200991 dl = kzalloc(size, GFP_KERNEL);
992 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 return -ENOMEM;
994
995 dr = dl->dev_req;
996
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200997 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200998 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200999 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001000 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001001
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001002 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1003 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 (dr + n)->dev_id = hdev->id;
1006 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (++n >= dev_num)
1009 break;
1010 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001011 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 dl->dev_num = n;
1014 size = sizeof(*dl) + n * sizeof(*dr);
1015
1016 err = copy_to_user(arg, dl, size);
1017 kfree(dl);
1018
1019 return err ? -EFAULT : 0;
1020}
1021
1022int hci_get_dev_info(void __user *arg)
1023{
1024 struct hci_dev *hdev;
1025 struct hci_dev_info di;
1026 int err = 0;
1027
1028 if (copy_from_user(&di, arg, sizeof(di)))
1029 return -EFAULT;
1030
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001031 hdev = hci_dev_get(di.dev_id);
1032 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return -ENODEV;
1034
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001035 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001036 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001037
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001038 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1039 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001040
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 strcpy(di.name, hdev->name);
1042 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001043 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 di.flags = hdev->flags;
1045 di.pkt_type = hdev->pkt_type;
1046 di.acl_mtu = hdev->acl_mtu;
1047 di.acl_pkts = hdev->acl_pkts;
1048 di.sco_mtu = hdev->sco_mtu;
1049 di.sco_pkts = hdev->sco_pkts;
1050 di.link_policy = hdev->link_policy;
1051 di.link_mode = hdev->link_mode;
1052
1053 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1054 memcpy(&di.features, &hdev->features, sizeof(di.features));
1055
1056 if (copy_to_user(arg, &di, sizeof(di)))
1057 err = -EFAULT;
1058
1059 hci_dev_put(hdev);
1060
1061 return err;
1062}
1063
1064/* ---- Interface to HCI drivers ---- */
1065
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001066static int hci_rfkill_set_block(void *data, bool blocked)
1067{
1068 struct hci_dev *hdev = data;
1069
1070 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071
1072 if (!blocked)
1073 return 0;
1074
1075 hci_dev_do_close(hdev);
1076
1077 return 0;
1078}
1079
1080static const struct rfkill_ops hci_rfkill_ops = {
1081 .set_block = hci_rfkill_set_block,
1082};
1083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001084static void hci_power_on(struct work_struct *work)
1085{
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001094 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001098 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001099}
1100
1101static void hci_power_off(struct work_struct *work)
1102{
Johan Hedberg32435532011-11-07 22:16:04 +02001103 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001104 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105
1106 BT_DBG("%s", hdev->name);
1107
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001108 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001109}
1110
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001111static void hci_discov_off(struct work_struct *work)
1112{
1113 struct hci_dev *hdev;
1114 u8 scan = SCAN_PAGE;
1115
1116 hdev = container_of(work, struct hci_dev, discov_off.work);
1117
1118 BT_DBG("%s", hdev->name);
1119
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001120 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001121
1122 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1123
1124 hdev->discov_timeout = 0;
1125
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001126 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001127}
1128
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001129int hci_uuids_clear(struct hci_dev *hdev)
1130{
1131 struct list_head *p, *n;
1132
1133 list_for_each_safe(p, n, &hdev->uuids) {
1134 struct bt_uuid *uuid;
1135
1136 uuid = list_entry(p, struct bt_uuid, list);
1137
1138 list_del(p);
1139 kfree(uuid);
1140 }
1141
1142 return 0;
1143}
1144
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001145int hci_link_keys_clear(struct hci_dev *hdev)
1146{
1147 struct list_head *p, *n;
1148
1149 list_for_each_safe(p, n, &hdev->link_keys) {
1150 struct link_key *key;
1151
1152 key = list_entry(p, struct link_key, list);
1153
1154 list_del(p);
1155 kfree(key);
1156 }
1157
1158 return 0;
1159}
1160
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001161int hci_smp_ltks_clear(struct hci_dev *hdev)
1162{
1163 struct smp_ltk *k, *tmp;
1164
1165 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166 list_del(&k->list);
1167 kfree(k);
1168 }
1169
1170 return 0;
1171}
1172
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1174{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001175 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001177 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001178 if (bacmp(bdaddr, &k->bdaddr) == 0)
1179 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001180
1181 return NULL;
1182}
1183
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001185 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186{
1187 /* Legacy key */
1188 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301189 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001190
1191 /* Debug keys are insecure so don't store them persistently */
1192 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301193 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001194
1195 /* Changed combination key and there's no previous one */
1196 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301197 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001198
1199 /* Security mode 3 case */
1200 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202
1203 /* Neither local nor remote side had no-bonding as requirement */
1204 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301205 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001206
1207 /* Local side had dedicated bonding as requirement */
1208 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301209 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001210
1211 /* Remote side had dedicated bonding as requirement */
1212 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214
1215 /* If none of the above criteria match, then don't store the key
1216 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301217 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001218}
1219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001221{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001222 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1225 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001226 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227 continue;
1228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230 }
1231
1232 return NULL;
1233}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001236 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001237{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001238 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 list_for_each_entry(k, &hdev->long_term_keys, list)
1241 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001242 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243 return k;
1244
1245 return NULL;
1246}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001247
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001250{
1251 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301252 u8 old_key_type;
1253 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001254
1255 old_key = hci_find_link_key(hdev, bdaddr);
1256 if (old_key) {
1257 old_key_type = old_key->type;
1258 key = old_key;
1259 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001260 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 if (!key)
1263 return -ENOMEM;
1264 list_add(&key->list, &hdev->link_keys);
1265 }
1266
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1271 * previous key */
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001274 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001275 if (conn)
1276 conn->key_type = type;
1277 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001278
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001279 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001281 key->pin_len = pin_len;
1282
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001283 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001284 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001285 else
1286 key->type = type;
1287
Johan Hedberg4df378a2011-04-28 11:29:03 -07001288 if (!new_key)
1289 return 0;
1290
1291 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1292
Johan Hedberg744cf192011-11-08 20:40:14 +02001293 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001294
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301295 if (conn)
1296 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001297
1298 return 0;
1299}
1300
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001301int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001302 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001303 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001304{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001305 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001306
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001307 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1308 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001309
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1311 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001312 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001313 else {
1314 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315 if (!key)
1316 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318 }
1319
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001320 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321 key->bdaddr_type = addr_type;
1322 memcpy(key->val, tk, sizeof(key->val));
1323 key->authenticated = authenticated;
1324 key->ediv = ediv;
1325 key->enc_size = enc_size;
1326 key->type = type;
1327 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001329 if (!new_key)
1330 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001331
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001332 if (type & HCI_SMP_LTK)
1333 mgmt_new_ltk(hdev, key, 1);
1334
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 return 0;
1336}
1337
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001338int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct link_key *key;
1341
1342 key = hci_find_link_key(hdev, bdaddr);
1343 if (!key)
1344 return -ENOENT;
1345
1346 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1347
1348 list_del(&key->list);
1349 kfree(key);
1350
1351 return 0;
1352}
1353
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001354int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355{
1356 struct smp_ltk *k, *tmp;
1357
1358 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1359 if (bacmp(bdaddr, &k->bdaddr))
1360 continue;
1361
1362 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364 list_del(&k->list);
1365 kfree(k);
1366 }
1367
1368 return 0;
1369}
1370
Ville Tervo6bd32322011-02-16 16:32:41 +02001371/* HCI command timer function */
1372static void hci_cmd_timer(unsigned long arg)
1373{
1374 struct hci_dev *hdev = (void *) arg;
1375
1376 BT_ERR("%s command tx timeout", hdev->name);
1377 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001378 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001379}
1380
Szymon Janc2763eda2011-03-22 13:12:22 +01001381struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001382 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001383{
1384 struct oob_data *data;
1385
1386 list_for_each_entry(data, &hdev->remote_oob_data, list)
1387 if (bacmp(bdaddr, &data->bdaddr) == 0)
1388 return data;
1389
1390 return NULL;
1391}
1392
1393int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct oob_data *data;
1396
1397 data = hci_find_remote_oob_data(hdev, bdaddr);
1398 if (!data)
1399 return -ENOENT;
1400
1401 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403 list_del(&data->list);
1404 kfree(data);
1405
1406 return 0;
1407}
1408
1409int hci_remote_oob_data_clear(struct hci_dev *hdev)
1410{
1411 struct oob_data *data, *n;
1412
1413 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1414 list_del(&data->list);
1415 kfree(data);
1416 }
1417
1418 return 0;
1419}
1420
1421int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001422 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001423{
1424 struct oob_data *data;
1425
1426 data = hci_find_remote_oob_data(hdev, bdaddr);
1427
1428 if (!data) {
1429 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1430 if (!data)
1431 return -ENOMEM;
1432
1433 bacpy(&data->bdaddr, bdaddr);
1434 list_add(&data->list, &hdev->remote_oob_data);
1435 }
1436
1437 memcpy(data->hash, hash, sizeof(data->hash));
1438 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1439
1440 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1441
1442 return 0;
1443}
1444
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001445struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001446{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001447 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001449 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001450 if (bacmp(bdaddr, &b->bdaddr) == 0)
1451 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001452
1453 return NULL;
1454}
1455
1456int hci_blacklist_clear(struct hci_dev *hdev)
1457{
1458 struct list_head *p, *n;
1459
1460 list_for_each_safe(p, n, &hdev->blacklist) {
1461 struct bdaddr_list *b;
1462
1463 b = list_entry(p, struct bdaddr_list, list);
1464
1465 list_del(p);
1466 kfree(b);
1467 }
1468
1469 return 0;
1470}
1471
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001472int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001473{
1474 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001475
1476 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1477 return -EBADF;
1478
Antti Julku5e762442011-08-25 16:48:02 +03001479 if (hci_blacklist_lookup(hdev, bdaddr))
1480 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001481
1482 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001483 if (!entry)
1484 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485
1486 bacpy(&entry->bdaddr, bdaddr);
1487
1488 list_add(&entry->list, &hdev->blacklist);
1489
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001490 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491}
1492
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001493int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494{
1495 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
Szymon Janc1ec918c2011-11-16 09:32:21 +01001497 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001498 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001501 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001502 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001503
1504 list_del(&entry->list);
1505 kfree(entry);
1506
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001507 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508}
1509
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001510static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1511{
1512 struct le_scan_params *param = (struct le_scan_params *) opt;
1513 struct hci_cp_le_set_scan_param cp;
1514
1515 memset(&cp, 0, sizeof(cp));
1516 cp.type = param->type;
1517 cp.interval = cpu_to_le16(param->interval);
1518 cp.window = cpu_to_le16(param->window);
1519
1520 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1521}
1522
1523static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1524{
1525 struct hci_cp_le_set_scan_enable cp;
1526
1527 memset(&cp, 0, sizeof(cp));
1528 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001529 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001530
1531 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1532}
1533
1534static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001535 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001536{
1537 long timeo = msecs_to_jiffies(3000);
1538 struct le_scan_params param;
1539 int err;
1540
1541 BT_DBG("%s", hdev->name);
1542
1543 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1544 return -EINPROGRESS;
1545
1546 param.type = type;
1547 param.interval = interval;
1548 param.window = window;
1549
1550 hci_req_lock(hdev);
1551
1552 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001553 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001554 if (!err)
1555 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1556
1557 hci_req_unlock(hdev);
1558
1559 if (err < 0)
1560 return err;
1561
1562 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001563 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001564
1565 return 0;
1566}
1567
Andre Guedes7dbfac12012-03-15 16:52:07 -03001568int hci_cancel_le_scan(struct hci_dev *hdev)
1569{
1570 BT_DBG("%s", hdev->name);
1571
1572 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1573 return -EALREADY;
1574
1575 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1576 struct hci_cp_le_set_scan_enable cp;
1577
1578 /* Send HCI command to disable LE Scan */
1579 memset(&cp, 0, sizeof(cp));
1580 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1581 }
1582
1583 return 0;
1584}
1585
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001586static void le_scan_disable_work(struct work_struct *work)
1587{
1588 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001589 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001590 struct hci_cp_le_set_scan_enable cp;
1591
1592 BT_DBG("%s", hdev->name);
1593
1594 memset(&cp, 0, sizeof(cp));
1595
1596 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1597}
1598
Andre Guedes28b75a82012-02-03 17:48:00 -03001599static void le_scan_work(struct work_struct *work)
1600{
1601 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1602 struct le_scan_params *param = &hdev->le_scan_params;
1603
1604 BT_DBG("%s", hdev->name);
1605
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001606 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1607 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001608}
1609
1610int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001611 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001612{
1613 struct le_scan_params *param = &hdev->le_scan_params;
1614
1615 BT_DBG("%s", hdev->name);
1616
1617 if (work_busy(&hdev->le_scan))
1618 return -EINPROGRESS;
1619
1620 param->type = type;
1621 param->interval = interval;
1622 param->window = window;
1623 param->timeout = timeout;
1624
1625 queue_work(system_long_wq, &hdev->le_scan);
1626
1627 return 0;
1628}
1629
David Herrmann9be0dab2012-04-22 14:39:57 +02001630/* Alloc HCI device */
1631struct hci_dev *hci_alloc_dev(void)
1632{
1633 struct hci_dev *hdev;
1634
1635 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1636 if (!hdev)
1637 return NULL;
1638
David Herrmannb1b813d2012-04-22 14:39:58 +02001639 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1640 hdev->esco_type = (ESCO_HV1);
1641 hdev->link_mode = (HCI_LM_ACCEPT);
1642 hdev->io_capability = 0x03; /* No Input No Output */
1643
David Herrmannb1b813d2012-04-22 14:39:58 +02001644 hdev->sniff_max_interval = 800;
1645 hdev->sniff_min_interval = 80;
1646
1647 mutex_init(&hdev->lock);
1648 mutex_init(&hdev->req_lock);
1649
1650 INIT_LIST_HEAD(&hdev->mgmt_pending);
1651 INIT_LIST_HEAD(&hdev->blacklist);
1652 INIT_LIST_HEAD(&hdev->uuids);
1653 INIT_LIST_HEAD(&hdev->link_keys);
1654 INIT_LIST_HEAD(&hdev->long_term_keys);
1655 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001656
1657 INIT_WORK(&hdev->rx_work, hci_rx_work);
1658 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1659 INIT_WORK(&hdev->tx_work, hci_tx_work);
1660 INIT_WORK(&hdev->power_on, hci_power_on);
1661 INIT_WORK(&hdev->le_scan, le_scan_work);
1662
David Herrmannb1b813d2012-04-22 14:39:58 +02001663 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1664 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1665 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1666
David Herrmann9be0dab2012-04-22 14:39:57 +02001667 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001668 skb_queue_head_init(&hdev->rx_q);
1669 skb_queue_head_init(&hdev->cmd_q);
1670 skb_queue_head_init(&hdev->raw_q);
1671
1672 init_waitqueue_head(&hdev->req_wait_q);
1673
1674 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1675
David Herrmannb1b813d2012-04-22 14:39:58 +02001676 hci_init_sysfs(hdev);
1677 discovery_init(hdev);
1678 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001679
1680 return hdev;
1681}
1682EXPORT_SYMBOL(hci_alloc_dev);
1683
1684/* Free HCI device */
1685void hci_free_dev(struct hci_dev *hdev)
1686{
1687 skb_queue_purge(&hdev->driver_init);
1688
1689 /* will free via device release */
1690 put_device(&hdev->dev);
1691}
1692EXPORT_SYMBOL(hci_free_dev);
1693
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694/* Register HCI device */
1695int hci_register_dev(struct hci_dev *hdev)
1696{
David Herrmannb1b813d2012-04-22 14:39:58 +02001697 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001698
David Herrmann010666a2012-01-07 15:47:07 +01001699 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001700 return -EINVAL;
1701
Mat Martineau08add512011-11-02 16:18:36 -07001702 /* Do not allow HCI_AMP devices to register at index 0,
1703 * so the index can be used as the AMP controller ID.
1704 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001705 switch (hdev->dev_type) {
1706 case HCI_BREDR:
1707 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1708 break;
1709 case HCI_AMP:
1710 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1711 break;
1712 default:
1713 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001715
Sasha Levin3df92b32012-05-27 22:36:56 +02001716 if (id < 0)
1717 return id;
1718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 sprintf(hdev->name, "hci%d", id);
1720 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001721
1722 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1723
Sasha Levin3df92b32012-05-27 22:36:56 +02001724 write_lock(&hci_dev_list_lock);
1725 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001726 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001727
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001728 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001729 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001730 if (!hdev->workqueue) {
1731 error = -ENOMEM;
1732 goto err;
1733 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001734
David Herrmann33ca9542011-10-08 14:58:49 +02001735 error = hci_add_sysfs(hdev);
1736 if (error < 0)
1737 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001739 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001740 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1741 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001742 if (hdev->rfkill) {
1743 if (rfkill_register(hdev->rfkill) < 0) {
1744 rfkill_destroy(hdev->rfkill);
1745 hdev->rfkill = NULL;
1746 }
1747 }
1748
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001749 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1750 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001751 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001752
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001754 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755
1756 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001757
David Herrmann33ca9542011-10-08 14:58:49 +02001758err_wqueue:
1759 destroy_workqueue(hdev->workqueue);
1760err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001761 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001762 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001763 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001764 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001765
David Herrmann33ca9542011-10-08 14:58:49 +02001766 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767}
1768EXPORT_SYMBOL(hci_register_dev);
1769
1770/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001771void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001772{
Sasha Levin3df92b32012-05-27 22:36:56 +02001773 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001774
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001775 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776
Johan Hovold94324962012-03-15 14:48:41 +01001777 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1778
Sasha Levin3df92b32012-05-27 22:36:56 +02001779 id = hdev->id;
1780
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001781 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001782 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001783 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784
1785 hci_dev_do_close(hdev);
1786
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301787 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001788 kfree_skb(hdev->reassembly[i]);
1789
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001790 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001791 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001792 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001793 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001794 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001795 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001796
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001797 /* mgmt_index_removed should take care of emptying the
1798 * pending list */
1799 BUG_ON(!list_empty(&hdev->mgmt_pending));
1800
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801 hci_notify(hdev, HCI_DEV_UNREG);
1802
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001803 if (hdev->rfkill) {
1804 rfkill_unregister(hdev->rfkill);
1805 rfkill_destroy(hdev->rfkill);
1806 }
1807
David Herrmannce242972011-10-08 14:58:48 +02001808 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001809
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001810 destroy_workqueue(hdev->workqueue);
1811
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001812 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001813 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001814 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001815 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001816 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001817 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001818 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001819
David Herrmanndc946bd2012-01-07 15:47:24 +01001820 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001821
1822 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001823}
1824EXPORT_SYMBOL(hci_unregister_dev);
1825
1826/* Suspend HCI device */
1827int hci_suspend_dev(struct hci_dev *hdev)
1828{
1829 hci_notify(hdev, HCI_DEV_SUSPEND);
1830 return 0;
1831}
1832EXPORT_SYMBOL(hci_suspend_dev);
1833
1834/* Resume HCI device */
1835int hci_resume_dev(struct hci_dev *hdev)
1836{
1837 hci_notify(hdev, HCI_DEV_RESUME);
1838 return 0;
1839}
1840EXPORT_SYMBOL(hci_resume_dev);
1841
Marcel Holtmann76bca882009-11-18 00:40:39 +01001842/* Receive frame from HCI drivers */
1843int hci_recv_frame(struct sk_buff *skb)
1844{
1845 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1846 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001847 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001848 kfree_skb(skb);
1849 return -ENXIO;
1850 }
1851
1852 /* Incomming skb */
1853 bt_cb(skb)->incoming = 1;
1854
1855 /* Time stamp */
1856 __net_timestamp(skb);
1857
Marcel Holtmann76bca882009-11-18 00:40:39 +01001858 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001859 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001860
Marcel Holtmann76bca882009-11-18 00:40:39 +01001861 return 0;
1862}
1863EXPORT_SYMBOL(hci_recv_frame);
1864
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301865static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001866 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301867{
1868 int len = 0;
1869 int hlen = 0;
1870 int remain = count;
1871 struct sk_buff *skb;
1872 struct bt_skb_cb *scb;
1873
1874 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001875 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301876 return -EILSEQ;
1877
1878 skb = hdev->reassembly[index];
1879
1880 if (!skb) {
1881 switch (type) {
1882 case HCI_ACLDATA_PKT:
1883 len = HCI_MAX_FRAME_SIZE;
1884 hlen = HCI_ACL_HDR_SIZE;
1885 break;
1886 case HCI_EVENT_PKT:
1887 len = HCI_MAX_EVENT_SIZE;
1888 hlen = HCI_EVENT_HDR_SIZE;
1889 break;
1890 case HCI_SCODATA_PKT:
1891 len = HCI_MAX_SCO_SIZE;
1892 hlen = HCI_SCO_HDR_SIZE;
1893 break;
1894 }
1895
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001896 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301897 if (!skb)
1898 return -ENOMEM;
1899
1900 scb = (void *) skb->cb;
1901 scb->expect = hlen;
1902 scb->pkt_type = type;
1903
1904 skb->dev = (void *) hdev;
1905 hdev->reassembly[index] = skb;
1906 }
1907
1908 while (count) {
1909 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001910 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301911
1912 memcpy(skb_put(skb, len), data, len);
1913
1914 count -= len;
1915 data += len;
1916 scb->expect -= len;
1917 remain = count;
1918
1919 switch (type) {
1920 case HCI_EVENT_PKT:
1921 if (skb->len == HCI_EVENT_HDR_SIZE) {
1922 struct hci_event_hdr *h = hci_event_hdr(skb);
1923 scb->expect = h->plen;
1924
1925 if (skb_tailroom(skb) < scb->expect) {
1926 kfree_skb(skb);
1927 hdev->reassembly[index] = NULL;
1928 return -ENOMEM;
1929 }
1930 }
1931 break;
1932
1933 case HCI_ACLDATA_PKT:
1934 if (skb->len == HCI_ACL_HDR_SIZE) {
1935 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1936 scb->expect = __le16_to_cpu(h->dlen);
1937
1938 if (skb_tailroom(skb) < scb->expect) {
1939 kfree_skb(skb);
1940 hdev->reassembly[index] = NULL;
1941 return -ENOMEM;
1942 }
1943 }
1944 break;
1945
1946 case HCI_SCODATA_PKT:
1947 if (skb->len == HCI_SCO_HDR_SIZE) {
1948 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1949 scb->expect = h->dlen;
1950
1951 if (skb_tailroom(skb) < scb->expect) {
1952 kfree_skb(skb);
1953 hdev->reassembly[index] = NULL;
1954 return -ENOMEM;
1955 }
1956 }
1957 break;
1958 }
1959
1960 if (scb->expect == 0) {
1961 /* Complete frame */
1962
1963 bt_cb(skb)->pkt_type = type;
1964 hci_recv_frame(skb);
1965
1966 hdev->reassembly[index] = NULL;
1967 return remain;
1968 }
1969 }
1970
1971 return remain;
1972}
1973
Marcel Holtmannef222012007-07-11 06:42:04 +02001974int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1975{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301976 int rem = 0;
1977
Marcel Holtmannef222012007-07-11 06:42:04 +02001978 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1979 return -EILSEQ;
1980
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001981 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001982 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301983 if (rem < 0)
1984 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001985
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301986 data += (count - rem);
1987 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001988 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001989
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301990 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001991}
1992EXPORT_SYMBOL(hci_recv_fragment);
1993
Suraj Sumangala99811512010-07-14 13:02:19 +05301994#define STREAM_REASSEMBLY 0
1995
1996int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1997{
1998 int type;
1999 int rem = 0;
2000
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002001 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302002 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2003
2004 if (!skb) {
2005 struct { char type; } *pkt;
2006
2007 /* Start of the frame */
2008 pkt = data;
2009 type = pkt->type;
2010
2011 data++;
2012 count--;
2013 } else
2014 type = bt_cb(skb)->pkt_type;
2015
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002016 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002017 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302018 if (rem < 0)
2019 return rem;
2020
2021 data += (count - rem);
2022 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002023 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302024
2025 return rem;
2026}
2027EXPORT_SYMBOL(hci_recv_stream_fragment);
2028
Linus Torvalds1da177e2005-04-16 15:20:36 -07002029/* ---- Interface to upper protocols ---- */
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031int hci_register_cb(struct hci_cb *cb)
2032{
2033 BT_DBG("%p name %s", cb, cb->name);
2034
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002035 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002037 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038
2039 return 0;
2040}
2041EXPORT_SYMBOL(hci_register_cb);
2042
2043int hci_unregister_cb(struct hci_cb *cb)
2044{
2045 BT_DBG("%p name %s", cb, cb->name);
2046
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002047 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002048 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002049 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050
2051 return 0;
2052}
2053EXPORT_SYMBOL(hci_unregister_cb);
2054
2055static int hci_send_frame(struct sk_buff *skb)
2056{
2057 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2058
2059 if (!hdev) {
2060 kfree_skb(skb);
2061 return -ENODEV;
2062 }
2063
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002064 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002065
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002066 /* Time stamp */
2067 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002068
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002069 /* Send copy to monitor */
2070 hci_send_to_monitor(hdev, skb);
2071
2072 if (atomic_read(&hdev->promisc)) {
2073 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002074 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002075 }
2076
2077 /* Get rid of skb owner, prior to sending to the driver. */
2078 skb_orphan(skb);
2079
2080 return hdev->send(skb);
2081}
2082
2083/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002084int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085{
2086 int len = HCI_COMMAND_HDR_SIZE + plen;
2087 struct hci_command_hdr *hdr;
2088 struct sk_buff *skb;
2089
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002090 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 skb = bt_skb_alloc(len, GFP_ATOMIC);
2093 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002094 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002095 return -ENOMEM;
2096 }
2097
2098 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002099 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 hdr->plen = plen;
2101
2102 if (plen)
2103 memcpy(skb_put(skb, plen), param, plen);
2104
2105 BT_DBG("skb len %d", skb->len);
2106
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002107 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002109
Johan Hedberga5040ef2011-01-10 13:28:59 +02002110 if (test_bit(HCI_INIT, &hdev->flags))
2111 hdev->init_last_cmd = opcode;
2112
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002114 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116 return 0;
2117}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
2119/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002120void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
2122 struct hci_command_hdr *hdr;
2123
2124 if (!hdev->sent_cmd)
2125 return NULL;
2126
2127 hdr = (void *) hdev->sent_cmd->data;
2128
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002129 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 return NULL;
2131
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002132 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
2134 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2135}
2136
2137/* Send ACL data */
2138static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2139{
2140 struct hci_acl_hdr *hdr;
2141 int len = skb->len;
2142
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002143 skb_push(skb, HCI_ACL_HDR_SIZE);
2144 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002145 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002146 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2147 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148}
2149
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002150static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002151 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152{
2153 struct hci_dev *hdev = conn->hdev;
2154 struct sk_buff *list;
2155
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002156 skb->len = skb_headlen(skb);
2157 skb->data_len = 0;
2158
2159 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2160 hci_add_acl_hdr(skb, conn->handle, flags);
2161
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002162 list = skb_shinfo(skb)->frag_list;
2163 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 /* Non fragmented */
2165 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2166
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002167 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168 } else {
2169 /* Fragmented */
2170 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2171
2172 skb_shinfo(skb)->frag_list = NULL;
2173
2174 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002175 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002177 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002178
2179 flags &= ~ACL_START;
2180 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 do {
2182 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002183
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002185 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002186 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2189
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002190 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002191 } while (list);
2192
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002193 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002195}
2196
2197void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2198{
2199 struct hci_conn *conn = chan->conn;
2200 struct hci_dev *hdev = conn->hdev;
2201
2202 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2203
2204 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002205
2206 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002207
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002208 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002210
2211/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002212void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213{
2214 struct hci_dev *hdev = conn->hdev;
2215 struct hci_sco_hdr hdr;
2216
2217 BT_DBG("%s len %d", hdev->name, skb->len);
2218
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002219 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002220 hdr.dlen = skb->len;
2221
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002222 skb_push(skb, HCI_SCO_HDR_SIZE);
2223 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002224 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002225
2226 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002227 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002228
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002230 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232
2233/* ---- HCI TX task (outgoing data) ---- */
2234
2235/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002236static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2237 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002238{
2239 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002240 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002241 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002243 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002245
2246 rcu_read_lock();
2247
2248 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002249 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002251
2252 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2253 continue;
2254
Linus Torvalds1da177e2005-04-16 15:20:36 -07002255 num++;
2256
2257 if (c->sent < min) {
2258 min = c->sent;
2259 conn = c;
2260 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002261
2262 if (hci_conn_num(hdev, type) == num)
2263 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264 }
2265
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002266 rcu_read_unlock();
2267
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002269 int cnt, q;
2270
2271 switch (conn->type) {
2272 case ACL_LINK:
2273 cnt = hdev->acl_cnt;
2274 break;
2275 case SCO_LINK:
2276 case ESCO_LINK:
2277 cnt = hdev->sco_cnt;
2278 break;
2279 case LE_LINK:
2280 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2281 break;
2282 default:
2283 cnt = 0;
2284 BT_ERR("Unknown link type");
2285 }
2286
2287 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 *quote = q ? q : 1;
2289 } else
2290 *quote = 0;
2291
2292 BT_DBG("conn %p quote %d", conn, *quote);
2293 return conn;
2294}
2295
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002296static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297{
2298 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002299 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002300
Ville Tervobae1f5d92011-02-10 22:38:53 -03002301 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002303 rcu_read_lock();
2304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002306 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002307 if (c->type == type && c->sent) {
2308 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002309 hdev->name, batostr(&c->dst));
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002310 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002311 }
2312 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002313
2314 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315}
2316
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002317static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2318 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002319{
2320 struct hci_conn_hash *h = &hdev->conn_hash;
2321 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002322 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002323 struct hci_conn *conn;
2324 int cnt, q, conn_num = 0;
2325
2326 BT_DBG("%s", hdev->name);
2327
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002328 rcu_read_lock();
2329
2330 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002331 struct hci_chan *tmp;
2332
2333 if (conn->type != type)
2334 continue;
2335
2336 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2337 continue;
2338
2339 conn_num++;
2340
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002341 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002342 struct sk_buff *skb;
2343
2344 if (skb_queue_empty(&tmp->data_q))
2345 continue;
2346
2347 skb = skb_peek(&tmp->data_q);
2348 if (skb->priority < cur_prio)
2349 continue;
2350
2351 if (skb->priority > cur_prio) {
2352 num = 0;
2353 min = ~0;
2354 cur_prio = skb->priority;
2355 }
2356
2357 num++;
2358
2359 if (conn->sent < min) {
2360 min = conn->sent;
2361 chan = tmp;
2362 }
2363 }
2364
2365 if (hci_conn_num(hdev, type) == conn_num)
2366 break;
2367 }
2368
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002369 rcu_read_unlock();
2370
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002371 if (!chan)
2372 return NULL;
2373
2374 switch (chan->conn->type) {
2375 case ACL_LINK:
2376 cnt = hdev->acl_cnt;
2377 break;
2378 case SCO_LINK:
2379 case ESCO_LINK:
2380 cnt = hdev->sco_cnt;
2381 break;
2382 case LE_LINK:
2383 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2384 break;
2385 default:
2386 cnt = 0;
2387 BT_ERR("Unknown link type");
2388 }
2389
2390 q = cnt / num;
2391 *quote = q ? q : 1;
2392 BT_DBG("chan %p quote %d", chan, *quote);
2393 return chan;
2394}
2395
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002396static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2397{
2398 struct hci_conn_hash *h = &hdev->conn_hash;
2399 struct hci_conn *conn;
2400 int num = 0;
2401
2402 BT_DBG("%s", hdev->name);
2403
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002404 rcu_read_lock();
2405
2406 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002407 struct hci_chan *chan;
2408
2409 if (conn->type != type)
2410 continue;
2411
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 continue;
2414
2415 num++;
2416
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002417 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002418 struct sk_buff *skb;
2419
2420 if (chan->sent) {
2421 chan->sent = 0;
2422 continue;
2423 }
2424
2425 if (skb_queue_empty(&chan->data_q))
2426 continue;
2427
2428 skb = skb_peek(&chan->data_q);
2429 if (skb->priority >= HCI_PRIO_MAX - 1)
2430 continue;
2431
2432 skb->priority = HCI_PRIO_MAX - 1;
2433
2434 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002435 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002436 }
2437
2438 if (hci_conn_num(hdev, type) == num)
2439 break;
2440 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002441
2442 rcu_read_unlock();
2443
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002444}
2445
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002446static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2447{
2448 /* Calculate count of blocks used by this packet */
2449 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2450}
2451
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002452static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002454 if (!test_bit(HCI_RAW, &hdev->flags)) {
2455 /* ACL tx timeout must be longer than maximum
2456 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002457 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002458 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002459 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002460 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002461}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002463static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002464{
2465 unsigned int cnt = hdev->acl_cnt;
2466 struct hci_chan *chan;
2467 struct sk_buff *skb;
2468 int quote;
2469
2470 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002471
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002472 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002473 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002474 u32 priority = (skb_peek(&chan->data_q))->priority;
2475 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002476 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002477 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002478
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002479 /* Stop if priority has changed */
2480 if (skb->priority < priority)
2481 break;
2482
2483 skb = skb_dequeue(&chan->data_q);
2484
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002485 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002486 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 hci_send_frame(skb);
2489 hdev->acl_last_tx = jiffies;
2490
2491 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002492 chan->sent++;
2493 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 }
2495 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002496
2497 if (cnt != hdev->acl_cnt)
2498 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002499}
2500
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002501static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002502{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002503 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002504 struct hci_chan *chan;
2505 struct sk_buff *skb;
2506 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002507
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002508 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509
2510 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002511 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002512 u32 priority = (skb_peek(&chan->data_q))->priority;
2513 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2514 int blocks;
2515
2516 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002517 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002518
2519 /* Stop if priority has changed */
2520 if (skb->priority < priority)
2521 break;
2522
2523 skb = skb_dequeue(&chan->data_q);
2524
2525 blocks = __get_blocks(hdev, skb);
2526 if (blocks > hdev->block_cnt)
2527 return;
2528
2529 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002530 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002531
2532 hci_send_frame(skb);
2533 hdev->acl_last_tx = jiffies;
2534
2535 hdev->block_cnt -= blocks;
2536 quote -= blocks;
2537
2538 chan->sent += blocks;
2539 chan->conn->sent += blocks;
2540 }
2541 }
2542
2543 if (cnt != hdev->block_cnt)
2544 hci_prio_recalculate(hdev, ACL_LINK);
2545}
2546
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002547static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002548{
2549 BT_DBG("%s", hdev->name);
2550
2551 if (!hci_conn_num(hdev, ACL_LINK))
2552 return;
2553
2554 switch (hdev->flow_ctl_mode) {
2555 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2556 hci_sched_acl_pkt(hdev);
2557 break;
2558
2559 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2560 hci_sched_acl_blk(hdev);
2561 break;
2562 }
2563}
2564
Linus Torvalds1da177e2005-04-16 15:20:36 -07002565/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002566static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567{
2568 struct hci_conn *conn;
2569 struct sk_buff *skb;
2570 int quote;
2571
2572 BT_DBG("%s", hdev->name);
2573
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002574 if (!hci_conn_num(hdev, SCO_LINK))
2575 return;
2576
Linus Torvalds1da177e2005-04-16 15:20:36 -07002577 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2578 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2579 BT_DBG("skb %p len %d", skb, skb->len);
2580 hci_send_frame(skb);
2581
2582 conn->sent++;
2583 if (conn->sent == ~0)
2584 conn->sent = 0;
2585 }
2586 }
2587}
2588
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002589static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002590{
2591 struct hci_conn *conn;
2592 struct sk_buff *skb;
2593 int quote;
2594
2595 BT_DBG("%s", hdev->name);
2596
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002597 if (!hci_conn_num(hdev, ESCO_LINK))
2598 return;
2599
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002600 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2601 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002602 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2603 BT_DBG("skb %p len %d", skb, skb->len);
2604 hci_send_frame(skb);
2605
2606 conn->sent++;
2607 if (conn->sent == ~0)
2608 conn->sent = 0;
2609 }
2610 }
2611}
2612
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002613static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002614{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002615 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002616 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002617 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002618
2619 BT_DBG("%s", hdev->name);
2620
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002621 if (!hci_conn_num(hdev, LE_LINK))
2622 return;
2623
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002624 if (!test_bit(HCI_RAW, &hdev->flags)) {
2625 /* LE tx timeout must be longer than maximum
2626 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002627 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002628 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002629 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002630 }
2631
2632 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002633 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002634 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002635 u32 priority = (skb_peek(&chan->data_q))->priority;
2636 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002637 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002638 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002639
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002640 /* Stop if priority has changed */
2641 if (skb->priority < priority)
2642 break;
2643
2644 skb = skb_dequeue(&chan->data_q);
2645
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002646 hci_send_frame(skb);
2647 hdev->le_last_tx = jiffies;
2648
2649 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002650 chan->sent++;
2651 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002652 }
2653 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002654
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002655 if (hdev->le_pkts)
2656 hdev->le_cnt = cnt;
2657 else
2658 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002659
2660 if (cnt != tmp)
2661 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002662}
2663
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002664static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002665{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002666 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 struct sk_buff *skb;
2668
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002669 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002670 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002671
2672 /* Schedule queues and send stuff to HCI driver */
2673
2674 hci_sched_acl(hdev);
2675
2676 hci_sched_sco(hdev);
2677
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002678 hci_sched_esco(hdev);
2679
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002680 hci_sched_le(hdev);
2681
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 /* Send next queued raw (unknown type) packet */
2683 while ((skb = skb_dequeue(&hdev->raw_q)))
2684 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685}
2686
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002687/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688
2689/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002690static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691{
2692 struct hci_acl_hdr *hdr = (void *) skb->data;
2693 struct hci_conn *conn;
2694 __u16 handle, flags;
2695
2696 skb_pull(skb, HCI_ACL_HDR_SIZE);
2697
2698 handle = __le16_to_cpu(hdr->handle);
2699 flags = hci_flags(handle);
2700 handle = hci_handle(handle);
2701
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002702 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2703 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704
2705 hdev->stat.acl_rx++;
2706
2707 hci_dev_lock(hdev);
2708 conn = hci_conn_hash_lookup_handle(hdev, handle);
2709 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002710
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002712 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002713
Johan Hedberg671267b2012-05-12 16:11:50 -03002714 hci_dev_lock(hdev);
2715 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2716 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2717 mgmt_device_connected(hdev, &conn->dst, conn->type,
2718 conn->dst_type, 0, NULL, 0,
2719 conn->dev_class);
2720 hci_dev_unlock(hdev);
2721
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002723 l2cap_recv_acldata(conn, skb, flags);
2724 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002726 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002727 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728 }
2729
2730 kfree_skb(skb);
2731}
2732
2733/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002734static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735{
2736 struct hci_sco_hdr *hdr = (void *) skb->data;
2737 struct hci_conn *conn;
2738 __u16 handle;
2739
2740 skb_pull(skb, HCI_SCO_HDR_SIZE);
2741
2742 handle = __le16_to_cpu(hdr->handle);
2743
2744 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2745
2746 hdev->stat.sco_rx++;
2747
2748 hci_dev_lock(hdev);
2749 conn = hci_conn_hash_lookup_handle(hdev, handle);
2750 hci_dev_unlock(hdev);
2751
2752 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002754 sco_recv_scodata(conn, skb);
2755 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002756 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002757 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002758 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 }
2760
2761 kfree_skb(skb);
2762}
2763
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002764static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002766 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 struct sk_buff *skb;
2768
2769 BT_DBG("%s", hdev->name);
2770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002772 /* Send copy to monitor */
2773 hci_send_to_monitor(hdev, skb);
2774
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 if (atomic_read(&hdev->promisc)) {
2776 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002777 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002778 }
2779
2780 if (test_bit(HCI_RAW, &hdev->flags)) {
2781 kfree_skb(skb);
2782 continue;
2783 }
2784
2785 if (test_bit(HCI_INIT, &hdev->flags)) {
2786 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002787 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 case HCI_ACLDATA_PKT:
2789 case HCI_SCODATA_PKT:
2790 kfree_skb(skb);
2791 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002792 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 }
2794
2795 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002796 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002798 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 hci_event_packet(hdev, skb);
2800 break;
2801
2802 case HCI_ACLDATA_PKT:
2803 BT_DBG("%s ACL data packet", hdev->name);
2804 hci_acldata_packet(hdev, skb);
2805 break;
2806
2807 case HCI_SCODATA_PKT:
2808 BT_DBG("%s SCO data packet", hdev->name);
2809 hci_scodata_packet(hdev, skb);
2810 break;
2811
2812 default:
2813 kfree_skb(skb);
2814 break;
2815 }
2816 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817}
2818
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002819static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002821 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 struct sk_buff *skb;
2823
2824 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2825
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002827 if (atomic_read(&hdev->cmd_cnt)) {
2828 skb = skb_dequeue(&hdev->cmd_q);
2829 if (!skb)
2830 return;
2831
Wei Yongjun7585b972009-02-25 18:29:52 +08002832 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002834 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2835 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 atomic_dec(&hdev->cmd_cnt);
2837 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002838 if (test_bit(HCI_RESET, &hdev->flags))
2839 del_timer(&hdev->cmd_timer);
2840 else
2841 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002842 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 } else {
2844 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002845 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 }
2847 }
2848}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002849
2850int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2851{
2852 /* General inquiry access code (GIAC) */
2853 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2854 struct hci_cp_inquiry cp;
2855
2856 BT_DBG("%s", hdev->name);
2857
2858 if (test_bit(HCI_INQUIRY, &hdev->flags))
2859 return -EINPROGRESS;
2860
Johan Hedberg46632622012-01-02 16:06:08 +02002861 inquiry_cache_flush(hdev);
2862
Andre Guedes2519a1f2011-11-07 11:45:24 -03002863 memset(&cp, 0, sizeof(cp));
2864 memcpy(&cp.lap, lap, sizeof(cp.lap));
2865 cp.length = length;
2866
2867 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2868}
Andre Guedes023d50492011-11-04 14:16:52 -03002869
2870int hci_cancel_inquiry(struct hci_dev *hdev)
2871{
2872 BT_DBG("%s", hdev->name);
2873
2874 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002875 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002876
2877 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2878}
Andre Guedes31f79562012-04-24 21:02:53 -03002879
2880u8 bdaddr_to_le(u8 bdaddr_type)
2881{
2882 switch (bdaddr_type) {
2883 case BDADDR_LE_PUBLIC:
2884 return ADDR_LE_DEV_PUBLIC;
2885
2886 default:
2887 /* Fallback to LE Random address type */
2888 return ADDR_LE_DEV_RANDOM;
2889 }
2890}