blob: 979556a2649af9867881c492595462b0ed9dfc6b [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Johan Hedbergab81cbf2010-12-15 13:53:18 +020036#define AUTO_OFF_TIMEOUT 2000
37
Marcel Holtmannb78752c2010-08-08 23:06:53 -040038static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020039static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020040static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
Linus Torvalds1da177e2005-04-16 15:20:36 -070042/* HCI device list */
43LIST_HEAD(hci_dev_list);
44DEFINE_RWLOCK(hci_dev_list_lock);
45
46/* HCI callback list */
47LIST_HEAD(hci_cb_list);
48DEFINE_RWLOCK(hci_cb_list_lock);
49
Sasha Levin3df92b32012-05-27 22:36:56 +020050/* HCI ID Numbering */
51static DEFINE_IDA(hci_index_ida);
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053/* ---- HCI notifications ---- */
54
Marcel Holtmann65164552005-10-28 19:20:48 +020055static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070056{
Marcel Holtmann040030e2012-02-20 14:50:37 +010057 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058}
59
60/* ---- HCI requests ---- */
61
Johan Hedberg23bb5762010-12-21 23:01:27 +020062void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070063{
Johan Hedberg23bb5762010-12-21 23:01:27 +020064 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
65
Johan Hedberga5040ef2011-01-10 13:28:59 +020066 /* If this is the init phase check if the completed command matches
67 * the last init command, and if not just return.
68 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020069 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
70 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020071 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020072 struct sk_buff *skb;
73
74 /* Some CSR based controllers generate a spontaneous
75 * reset complete event during init and any pending
76 * command will never be completed. In such a case we
77 * need to resend whatever was the last sent
78 * command.
79 */
80
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020081 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020082 return;
83
84 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
85 if (skb) {
86 skb_queue_head(&hdev->cmd_q, skb);
87 queue_work(hdev->workqueue, &hdev->cmd_work);
88 }
89
Johan Hedberg23bb5762010-12-21 23:01:27 +020090 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020091 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 if (hdev->req_status == HCI_REQ_PEND) {
94 hdev->req_result = result;
95 hdev->req_status = HCI_REQ_DONE;
96 wake_up_interruptible(&hdev->req_wait_q);
97 }
98}
99
100static void hci_req_cancel(struct hci_dev *hdev, int err)
101{
102 BT_DBG("%s err 0x%2.2x", hdev->name, err);
103
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = err;
106 hdev->req_status = HCI_REQ_CANCELED;
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
111/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300112static int __hci_request(struct hci_dev *hdev,
113 void (*req)(struct hci_dev *hdev, unsigned long opt),
114 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115{
116 DECLARE_WAITQUEUE(wait, current);
117 int err = 0;
118
119 BT_DBG("%s start", hdev->name);
120
121 hdev->req_status = HCI_REQ_PEND;
122
123 add_wait_queue(&hdev->req_wait_q, &wait);
124 set_current_state(TASK_INTERRUPTIBLE);
125
126 req(hdev, opt);
127 schedule_timeout(timeout);
128
129 remove_wait_queue(&hdev->req_wait_q, &wait);
130
131 if (signal_pending(current))
132 return -EINTR;
133
134 switch (hdev->req_status) {
135 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700136 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 break;
138
139 case HCI_REQ_CANCELED:
140 err = -hdev->req_result;
141 break;
142
143 default:
144 err = -ETIMEDOUT;
145 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700146 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
Johan Hedberga5040ef2011-01-10 13:28:59 +0200148 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149
150 BT_DBG("%s end: err %d", hdev->name, err);
151
152 return err;
153}
154
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300155static int hci_request(struct hci_dev *hdev,
156 void (*req)(struct hci_dev *hdev, unsigned long opt),
157 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 int ret;
160
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200161 if (!test_bit(HCI_UP, &hdev->flags))
162 return -ENETDOWN;
163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 /* Serialize all requests */
165 hci_req_lock(hdev);
166 ret = __hci_request(hdev, req, opt, timeout);
167 hci_req_unlock(hdev);
168
169 return ret;
170}
171
172static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
173{
174 BT_DBG("%s %ld", hdev->name, opt);
175
176 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300177 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200178 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200181static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200183 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800184 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200185 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200187 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Mandatory initialization */
190
191 /* Reset */
Szymon Janca6c511c2012-05-23 12:35:46 +0200192 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200193 set_bit(HCI_RESET, &hdev->flags);
194 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300195 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196
197 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200200 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200201 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200202
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200204 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200207 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
208
209 /* Read Class of Device */
210 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
211
212 /* Read Local Name */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214
215 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200216 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217
218 /* Optional initialization */
219
220 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200221 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200222 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300225 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200226 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200227
228 bacpy(&cp.bdaddr, BDADDR_ANY);
229 cp.delete_all = 1;
230 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
232
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200233static void amp_init(struct hci_dev *hdev)
234{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200235 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
236
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200237 /* Reset */
238 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
239
240 /* Read Local Version */
241 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300242
243 /* Read Local AMP Info */
244 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200245}
246
247static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
248{
249 struct sk_buff *skb;
250
251 BT_DBG("%s %ld", hdev->name, opt);
252
253 /* Driver initialization */
254
255 /* Special commands */
256 while ((skb = skb_dequeue(&hdev->driver_init))) {
257 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
258 skb->dev = (void *) hdev;
259
260 skb_queue_tail(&hdev->cmd_q, skb);
261 queue_work(hdev->workqueue, &hdev->cmd_work);
262 }
263 skb_queue_purge(&hdev->driver_init);
264
265 switch (hdev->dev_type) {
266 case HCI_BREDR:
267 bredr_init(hdev);
268 break;
269
270 case HCI_AMP:
271 amp_init(hdev);
272 break;
273
274 default:
275 BT_ERR("Unknown device type %d", hdev->dev_type);
276 break;
277 }
278
279}
280
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300281static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
282{
283 BT_DBG("%s", hdev->name);
284
285 /* Read LE buffer size */
286 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
287}
288
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
290{
291 __u8 scan = opt;
292
293 BT_DBG("%s %x", hdev->name, scan);
294
295 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200296 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
299static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
300{
301 __u8 auth = opt;
302
303 BT_DBG("%s %x", hdev->name, auth);
304
305 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200306 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307}
308
309static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
310{
311 __u8 encrypt = opt;
312
313 BT_DBG("%s %x", hdev->name, encrypt);
314
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200315 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200316 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317}
318
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200319static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
320{
321 __le16 policy = cpu_to_le16(opt);
322
Marcel Holtmanna418b892008-11-30 12:17:28 +0100323 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200324
325 /* Default link policy */
326 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
327}
328
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900329/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 * Device is held on return. */
331struct hci_dev *hci_dev_get(int index)
332{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200333 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 BT_DBG("%d", index);
336
337 if (index < 0)
338 return NULL;
339
340 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200341 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 if (d->id == index) {
343 hdev = hci_dev_hold(d);
344 break;
345 }
346 }
347 read_unlock(&hci_dev_list_lock);
348 return hdev;
349}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350
351/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200352
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200353bool hci_discovery_active(struct hci_dev *hdev)
354{
355 struct discovery_state *discov = &hdev->discovery;
356
Andre Guedes6fbe1952012-02-03 17:47:58 -0300357 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300358 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300359 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200360 return true;
361
Andre Guedes6fbe1952012-02-03 17:47:58 -0300362 default:
363 return false;
364 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200365}
366
Johan Hedbergff9ef572012-01-04 14:23:45 +0200367void hci_discovery_set_state(struct hci_dev *hdev, int state)
368{
369 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
370
371 if (hdev->discovery.state == state)
372 return;
373
374 switch (state) {
375 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300376 if (hdev->discovery.state != DISCOVERY_STARTING)
377 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200378 break;
379 case DISCOVERY_STARTING:
380 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300381 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200382 mgmt_discovering(hdev, 1);
383 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200384 case DISCOVERY_RESOLVING:
385 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200386 case DISCOVERY_STOPPING:
387 break;
388 }
389
390 hdev->discovery.state = state;
391}
392
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393static void inquiry_cache_flush(struct hci_dev *hdev)
394{
Johan Hedberg30883512012-01-04 14:16:21 +0200395 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200396 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397
Johan Hedberg561aafb2012-01-04 13:31:59 +0200398 list_for_each_entry_safe(p, n, &cache->all, all) {
399 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200400 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200402
403 INIT_LIST_HEAD(&cache->unknown);
404 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300407struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
408 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
Johan Hedberg30883512012-01-04 14:16:21 +0200410 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 struct inquiry_entry *e;
412
413 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
414
Johan Hedberg561aafb2012-01-04 13:31:59 +0200415 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200417 return e;
418 }
419
420 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Johan Hedberg561aafb2012-01-04 13:31:59 +0200423struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300424 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200425{
Johan Hedberg30883512012-01-04 14:16:21 +0200426 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200427 struct inquiry_entry *e;
428
429 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
430
431 list_for_each_entry(e, &cache->unknown, list) {
432 if (!bacmp(&e->data.bdaddr, bdaddr))
433 return e;
434 }
435
436 return NULL;
437}
438
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200439struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300440 bdaddr_t *bdaddr,
441 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200442{
443 struct discovery_state *cache = &hdev->discovery;
444 struct inquiry_entry *e;
445
446 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
447
448 list_for_each_entry(e, &cache->resolve, list) {
449 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
450 return e;
451 if (!bacmp(&e->data.bdaddr, bdaddr))
452 return e;
453 }
454
455 return NULL;
456}
457
Johan Hedberga3d4e202012-01-09 00:53:02 +0200458void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300459 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200460{
461 struct discovery_state *cache = &hdev->discovery;
462 struct list_head *pos = &cache->resolve;
463 struct inquiry_entry *p;
464
465 list_del(&ie->list);
466
467 list_for_each_entry(p, &cache->resolve, list) {
468 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300469 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200470 break;
471 pos = &p->list;
472 }
473
474 list_add(&ie->list, pos);
475}
476
Johan Hedberg31754052012-01-04 13:39:52 +0200477bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300478 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479{
Johan Hedberg30883512012-01-04 14:16:21 +0200480 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200481 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482
483 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
484
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200485 if (ssp)
486 *ssp = data->ssp_mode;
487
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200488 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200489 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200490 if (ie->data.ssp_mode && ssp)
491 *ssp = true;
492
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300494 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200495 ie->data.rssi = data->rssi;
496 hci_inquiry_cache_update_resolve(hdev, ie);
497 }
498
Johan Hedberg561aafb2012-01-04 13:31:59 +0200499 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200500 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200501
Johan Hedberg561aafb2012-01-04 13:31:59 +0200502 /* Entry not in the cache. Add new one. */
503 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
504 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200505 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200506
507 list_add(&ie->all, &cache->all);
508
509 if (name_known) {
510 ie->name_state = NAME_KNOWN;
511 } else {
512 ie->name_state = NAME_NOT_KNOWN;
513 list_add(&ie->list, &cache->unknown);
514 }
515
516update:
517 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300518 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200519 ie->name_state = NAME_KNOWN;
520 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 }
522
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200523 memcpy(&ie->data, data, sizeof(*data));
524 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200526
527 if (ie->name_state == NAME_NOT_KNOWN)
528 return false;
529
530 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531}
532
533static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
534{
Johan Hedberg30883512012-01-04 14:16:21 +0200535 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 struct inquiry_info *info = (struct inquiry_info *) buf;
537 struct inquiry_entry *e;
538 int copied = 0;
539
Johan Hedberg561aafb2012-01-04 13:31:59 +0200540 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200542
543 if (copied >= num)
544 break;
545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 bacpy(&info->bdaddr, &data->bdaddr);
547 info->pscan_rep_mode = data->pscan_rep_mode;
548 info->pscan_period_mode = data->pscan_period_mode;
549 info->pscan_mode = data->pscan_mode;
550 memcpy(info->dev_class, data->dev_class, 3);
551 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200552
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200554 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 }
556
557 BT_DBG("cache %p, copied %d", cache, copied);
558 return copied;
559}
560
561static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
562{
563 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
564 struct hci_cp_inquiry cp;
565
566 BT_DBG("%s", hdev->name);
567
568 if (test_bit(HCI_INQUIRY, &hdev->flags))
569 return;
570
571 /* Start Inquiry */
572 memcpy(&cp.lap, &ir->lap, 3);
573 cp.length = ir->length;
574 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200575 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700576}
577
578int hci_inquiry(void __user *arg)
579{
580 __u8 __user *ptr = arg;
581 struct hci_inquiry_req ir;
582 struct hci_dev *hdev;
583 int err = 0, do_inquiry = 0, max_rsp;
584 long timeo;
585 __u8 *buf;
586
587 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 return -EFAULT;
589
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200590 hdev = hci_dev_get(ir.dev_id);
591 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 return -ENODEV;
593
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300594 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900595 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300596 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 inquiry_cache_flush(hdev);
598 do_inquiry = 1;
599 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300600 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601
Marcel Holtmann04837f62006-07-03 10:02:33 +0200602 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200603
604 if (do_inquiry) {
605 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
606 if (err < 0)
607 goto done;
608 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300610 /* for unlimited number of responses we will use buffer with
611 * 255 entries
612 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
614
615 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
616 * copy it to the user space.
617 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100618 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200619 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620 err = -ENOMEM;
621 goto done;
622 }
623
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300624 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300626 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 BT_DBG("num_rsp %d", ir.num_rsp);
629
630 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
631 ptr += sizeof(ir);
632 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300633 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900635 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 err = -EFAULT;
637
638 kfree(buf);
639
640done:
641 hci_dev_put(hdev);
642 return err;
643}
644
645/* ---- HCI ioctl helpers ---- */
646
647int hci_dev_open(__u16 dev)
648{
649 struct hci_dev *hdev;
650 int ret = 0;
651
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200652 hdev = hci_dev_get(dev);
653 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654 return -ENODEV;
655
656 BT_DBG("%s %p", hdev->name, hdev);
657
658 hci_req_lock(hdev);
659
Johan Hovold94324962012-03-15 14:48:41 +0100660 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
661 ret = -ENODEV;
662 goto done;
663 }
664
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200665 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
666 ret = -ERFKILL;
667 goto done;
668 }
669
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670 if (test_bit(HCI_UP, &hdev->flags)) {
671 ret = -EALREADY;
672 goto done;
673 }
674
675 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
676 set_bit(HCI_RAW, &hdev->flags);
677
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200678 /* Treat all non BR/EDR controllers as raw devices if
679 enable_hs is not set */
680 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100681 set_bit(HCI_RAW, &hdev->flags);
682
Linus Torvalds1da177e2005-04-16 15:20:36 -0700683 if (hdev->open(hdev)) {
684 ret = -EIO;
685 goto done;
686 }
687
688 if (!test_bit(HCI_RAW, &hdev->flags)) {
689 atomic_set(&hdev->cmd_cnt, 1);
690 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200691 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Marcel Holtmann04837f62006-07-03 10:02:33 +0200693 ret = __hci_request(hdev, hci_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300694 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695
Andre Guedeseead27d2011-06-30 19:20:55 -0300696 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300697 ret = __hci_request(hdev, hci_le_init_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300698 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300699
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 clear_bit(HCI_INIT, &hdev->flags);
701 }
702
703 if (!ret) {
704 hci_dev_hold(hdev);
705 set_bit(HCI_UP, &hdev->flags);
706 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200707 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300708 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200709 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300710 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200711 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900712 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200714 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200715 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400716 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717
718 skb_queue_purge(&hdev->cmd_q);
719 skb_queue_purge(&hdev->rx_q);
720
721 if (hdev->flush)
722 hdev->flush(hdev);
723
724 if (hdev->sent_cmd) {
725 kfree_skb(hdev->sent_cmd);
726 hdev->sent_cmd = NULL;
727 }
728
729 hdev->close(hdev);
730 hdev->flags = 0;
731 }
732
733done:
734 hci_req_unlock(hdev);
735 hci_dev_put(hdev);
736 return ret;
737}
738
739static int hci_dev_do_close(struct hci_dev *hdev)
740{
741 BT_DBG("%s %p", hdev->name, hdev);
742
Andre Guedes28b75a82012-02-03 17:48:00 -0300743 cancel_work_sync(&hdev->le_scan);
744
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 hci_req_cancel(hdev, ENODEV);
746 hci_req_lock(hdev);
747
748 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300749 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 hci_req_unlock(hdev);
751 return 0;
752 }
753
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200754 /* Flush RX and TX works */
755 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400756 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200758 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200759 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200760 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200761 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200762 }
763
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200764 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200765 cancel_delayed_work(&hdev->service_cache);
766
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300767 cancel_delayed_work_sync(&hdev->le_scan_disable);
768
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300769 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 inquiry_cache_flush(hdev);
771 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300772 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773
774 hci_notify(hdev, HCI_DEV_DOWN);
775
776 if (hdev->flush)
777 hdev->flush(hdev);
778
779 /* Reset device */
780 skb_queue_purge(&hdev->cmd_q);
781 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200782 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200783 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 set_bit(HCI_INIT, &hdev->flags);
Marcel Holtmann04837f62006-07-03 10:02:33 +0200785 __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300786 msecs_to_jiffies(250));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 clear_bit(HCI_INIT, &hdev->flags);
788 }
789
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200790 /* flush cmd work */
791 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792
793 /* Drop queues */
794 skb_queue_purge(&hdev->rx_q);
795 skb_queue_purge(&hdev->cmd_q);
796 skb_queue_purge(&hdev->raw_q);
797
798 /* Drop last sent command */
799 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300800 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801 kfree_skb(hdev->sent_cmd);
802 hdev->sent_cmd = NULL;
803 }
804
805 /* After this point our queues are empty
806 * and no tasks are scheduled. */
807 hdev->close(hdev);
808
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100809 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
810 hci_dev_lock(hdev);
811 mgmt_powered(hdev, 0);
812 hci_dev_unlock(hdev);
813 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815 /* Clear flags */
816 hdev->flags = 0;
817
Johan Hedberge59fda82012-02-22 18:11:53 +0200818 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200819 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200820
Linus Torvalds1da177e2005-04-16 15:20:36 -0700821 hci_req_unlock(hdev);
822
823 hci_dev_put(hdev);
824 return 0;
825}
826
827int hci_dev_close(__u16 dev)
828{
829 struct hci_dev *hdev;
830 int err;
831
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200832 hdev = hci_dev_get(dev);
833 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100835
836 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
837 cancel_delayed_work(&hdev->power_off);
838
Linus Torvalds1da177e2005-04-16 15:20:36 -0700839 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 hci_dev_put(hdev);
842 return err;
843}
844
845int hci_dev_reset(__u16 dev)
846{
847 struct hci_dev *hdev;
848 int ret = 0;
849
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200850 hdev = hci_dev_get(dev);
851 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 return -ENODEV;
853
854 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855
856 if (!test_bit(HCI_UP, &hdev->flags))
857 goto done;
858
859 /* Drop queues */
860 skb_queue_purge(&hdev->rx_q);
861 skb_queue_purge(&hdev->cmd_q);
862
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300863 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 inquiry_cache_flush(hdev);
865 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300866 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867
868 if (hdev->flush)
869 hdev->flush(hdev);
870
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900871 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300872 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873
874 if (!test_bit(HCI_RAW, &hdev->flags))
Marcel Holtmann04837f62006-07-03 10:02:33 +0200875 ret = __hci_request(hdev, hci_reset_req, 0,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300876 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877
878done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879 hci_req_unlock(hdev);
880 hci_dev_put(hdev);
881 return ret;
882}
883
884int hci_dev_reset_stat(__u16 dev)
885{
886 struct hci_dev *hdev;
887 int ret = 0;
888
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200889 hdev = hci_dev_get(dev);
890 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 return -ENODEV;
892
893 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
894
895 hci_dev_put(hdev);
896
897 return ret;
898}
899
900int hci_dev_cmd(unsigned int cmd, void __user *arg)
901{
902 struct hci_dev *hdev;
903 struct hci_dev_req dr;
904 int err = 0;
905
906 if (copy_from_user(&dr, arg, sizeof(dr)))
907 return -EFAULT;
908
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200909 hdev = hci_dev_get(dr.dev_id);
910 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 return -ENODEV;
912
913 switch (cmd) {
914 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200915 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 break;
918
919 case HCISETENCRYPT:
920 if (!lmp_encrypt_capable(hdev)) {
921 err = -EOPNOTSUPP;
922 break;
923 }
924
925 if (!test_bit(HCI_AUTH, &hdev->flags)) {
926 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200927 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300928 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 if (err)
930 break;
931 }
932
Marcel Holtmann04837f62006-07-03 10:02:33 +0200933 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300934 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700935 break;
936
937 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200938 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300939 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 break;
941
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200942 case HCISETLINKPOL:
943 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300944 msecs_to_jiffies(HCI_INIT_TIMEOUT));
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200945 break;
946
947 case HCISETLINKMODE:
948 hdev->link_mode = ((__u16) dr.dev_opt) &
949 (HCI_LM_MASTER | HCI_LM_ACCEPT);
950 break;
951
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 case HCISETPTYPE:
953 hdev->pkt_type = (__u16) dr.dev_opt;
954 break;
955
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200957 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
958 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 break;
960
961 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200962 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
963 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700964 break;
965
966 default:
967 err = -EINVAL;
968 break;
969 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200970
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 hci_dev_put(hdev);
972 return err;
973}
974
975int hci_get_dev_list(void __user *arg)
976{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200977 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 struct hci_dev_list_req *dl;
979 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700980 int n = 0, size, err;
981 __u16 dev_num;
982
983 if (get_user(dev_num, (__u16 __user *) arg))
984 return -EFAULT;
985
986 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
987 return -EINVAL;
988
989 size = sizeof(*dl) + dev_num * sizeof(*dr);
990
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200991 dl = kzalloc(size, GFP_KERNEL);
992 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 return -ENOMEM;
994
995 dr = dl->dev_req;
996
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200997 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200998 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200999 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001000 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001001
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001002 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1003 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 (dr + n)->dev_id = hdev->id;
1006 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001007
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 if (++n >= dev_num)
1009 break;
1010 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001011 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012
1013 dl->dev_num = n;
1014 size = sizeof(*dl) + n * sizeof(*dr);
1015
1016 err = copy_to_user(arg, dl, size);
1017 kfree(dl);
1018
1019 return err ? -EFAULT : 0;
1020}
1021
1022int hci_get_dev_info(void __user *arg)
1023{
1024 struct hci_dev *hdev;
1025 struct hci_dev_info di;
1026 int err = 0;
1027
1028 if (copy_from_user(&di, arg, sizeof(di)))
1029 return -EFAULT;
1030
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001031 hdev = hci_dev_get(di.dev_id);
1032 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 return -ENODEV;
1034
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001035 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001036 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001037
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001038 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1039 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001040
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041 strcpy(di.name, hdev->name);
1042 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001043 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001044 di.flags = hdev->flags;
1045 di.pkt_type = hdev->pkt_type;
1046 di.acl_mtu = hdev->acl_mtu;
1047 di.acl_pkts = hdev->acl_pkts;
1048 di.sco_mtu = hdev->sco_mtu;
1049 di.sco_pkts = hdev->sco_pkts;
1050 di.link_policy = hdev->link_policy;
1051 di.link_mode = hdev->link_mode;
1052
1053 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1054 memcpy(&di.features, &hdev->features, sizeof(di.features));
1055
1056 if (copy_to_user(arg, &di, sizeof(di)))
1057 err = -EFAULT;
1058
1059 hci_dev_put(hdev);
1060
1061 return err;
1062}
1063
1064/* ---- Interface to HCI drivers ---- */
1065
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001066static int hci_rfkill_set_block(void *data, bool blocked)
1067{
1068 struct hci_dev *hdev = data;
1069
1070 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1071
1072 if (!blocked)
1073 return 0;
1074
1075 hci_dev_do_close(hdev);
1076
1077 return 0;
1078}
1079
1080static const struct rfkill_ops hci_rfkill_ops = {
1081 .set_block = hci_rfkill_set_block,
1082};
1083
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001084static void hci_power_on(struct work_struct *work)
1085{
1086 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1087
1088 BT_DBG("%s", hdev->name);
1089
1090 if (hci_dev_open(hdev->id) < 0)
1091 return;
1092
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001093 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Gustavo F. Padovan80b7ab32011-12-17 14:52:27 -02001094 schedule_delayed_work(&hdev->power_off,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001095 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001097 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001098 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001099}
1100
1101static void hci_power_off(struct work_struct *work)
1102{
Johan Hedberg32435532011-11-07 22:16:04 +02001103 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001104 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001105
1106 BT_DBG("%s", hdev->name);
1107
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001108 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001109}
1110
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001111static void hci_discov_off(struct work_struct *work)
1112{
1113 struct hci_dev *hdev;
1114 u8 scan = SCAN_PAGE;
1115
1116 hdev = container_of(work, struct hci_dev, discov_off.work);
1117
1118 BT_DBG("%s", hdev->name);
1119
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001120 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001121
1122 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1123
1124 hdev->discov_timeout = 0;
1125
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001126 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001127}
1128
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001129int hci_uuids_clear(struct hci_dev *hdev)
1130{
1131 struct list_head *p, *n;
1132
1133 list_for_each_safe(p, n, &hdev->uuids) {
1134 struct bt_uuid *uuid;
1135
1136 uuid = list_entry(p, struct bt_uuid, list);
1137
1138 list_del(p);
1139 kfree(uuid);
1140 }
1141
1142 return 0;
1143}
1144
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001145int hci_link_keys_clear(struct hci_dev *hdev)
1146{
1147 struct list_head *p, *n;
1148
1149 list_for_each_safe(p, n, &hdev->link_keys) {
1150 struct link_key *key;
1151
1152 key = list_entry(p, struct link_key, list);
1153
1154 list_del(p);
1155 kfree(key);
1156 }
1157
1158 return 0;
1159}
1160
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001161int hci_smp_ltks_clear(struct hci_dev *hdev)
1162{
1163 struct smp_ltk *k, *tmp;
1164
1165 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1166 list_del(&k->list);
1167 kfree(k);
1168 }
1169
1170 return 0;
1171}
1172
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001173struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1174{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001175 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001176
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001177 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001178 if (bacmp(bdaddr, &k->bdaddr) == 0)
1179 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001180
1181 return NULL;
1182}
1183
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301184static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001185 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001186{
1187 /* Legacy key */
1188 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301189 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001190
1191 /* Debug keys are insecure so don't store them persistently */
1192 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301193 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001194
1195 /* Changed combination key and there's no previous one */
1196 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301197 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001198
1199 /* Security mode 3 case */
1200 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301201 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001202
1203 /* Neither local nor remote side had no-bonding as requirement */
1204 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301205 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001206
1207 /* Local side had dedicated bonding as requirement */
1208 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301209 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001210
1211 /* Remote side had dedicated bonding as requirement */
1212 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301213 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001214
1215 /* If none of the above criteria match, then don't store the key
1216 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301217 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001218}
1219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001221{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001222 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001223
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001224 list_for_each_entry(k, &hdev->long_term_keys, list) {
1225 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001226 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001227 continue;
1228
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230 }
1231
1232 return NULL;
1233}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001235struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001236 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001237{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001238 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001239
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001240 list_for_each_entry(k, &hdev->long_term_keys, list)
1241 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001242 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001243 return k;
1244
1245 return NULL;
1246}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001247
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001248int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001249 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001250{
1251 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301252 u8 old_key_type;
1253 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001254
1255 old_key = hci_find_link_key(hdev, bdaddr);
1256 if (old_key) {
1257 old_key_type = old_key->type;
1258 key = old_key;
1259 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001260 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262 if (!key)
1263 return -ENOMEM;
1264 list_add(&key->list, &hdev->link_keys);
1265 }
1266
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1271 * previous key */
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001273 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001274 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001275 if (conn)
1276 conn->key_type = type;
1277 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001278
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001279 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001280 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001281 key->pin_len = pin_len;
1282
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001283 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001284 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001285 else
1286 key->type = type;
1287
Johan Hedberg4df378a2011-04-28 11:29:03 -07001288 if (!new_key)
1289 return 0;
1290
1291 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1292
Johan Hedberg744cf192011-11-08 20:40:14 +02001293 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001294
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301295 if (conn)
1296 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001297
1298 return 0;
1299}
1300
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001301int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001302 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001303 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001304{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001305 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001306
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001307 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1308 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001309
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001310 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1311 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001312 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001313 else {
1314 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001315 if (!key)
1316 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001317 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001318 }
1319
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001320 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001321 key->bdaddr_type = addr_type;
1322 memcpy(key->val, tk, sizeof(key->val));
1323 key->authenticated = authenticated;
1324 key->ediv = ediv;
1325 key->enc_size = enc_size;
1326 key->type = type;
1327 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001328
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001329 if (!new_key)
1330 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001331
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001332 if (type & HCI_SMP_LTK)
1333 mgmt_new_ltk(hdev, key, 1);
1334
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001335 return 0;
1336}
1337
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001338int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1339{
1340 struct link_key *key;
1341
1342 key = hci_find_link_key(hdev, bdaddr);
1343 if (!key)
1344 return -ENOENT;
1345
1346 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1347
1348 list_del(&key->list);
1349 kfree(key);
1350
1351 return 0;
1352}
1353
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001354int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355{
1356 struct smp_ltk *k, *tmp;
1357
1358 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1359 if (bacmp(bdaddr, &k->bdaddr))
1360 continue;
1361
1362 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364 list_del(&k->list);
1365 kfree(k);
1366 }
1367
1368 return 0;
1369}
1370
Ville Tervo6bd32322011-02-16 16:32:41 +02001371/* HCI command timer function */
1372static void hci_cmd_timer(unsigned long arg)
1373{
1374 struct hci_dev *hdev = (void *) arg;
1375
1376 BT_ERR("%s command tx timeout", hdev->name);
1377 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001378 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001379}
1380
Szymon Janc2763eda2011-03-22 13:12:22 +01001381struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001382 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001383{
1384 struct oob_data *data;
1385
1386 list_for_each_entry(data, &hdev->remote_oob_data, list)
1387 if (bacmp(bdaddr, &data->bdaddr) == 0)
1388 return data;
1389
1390 return NULL;
1391}
1392
1393int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394{
1395 struct oob_data *data;
1396
1397 data = hci_find_remote_oob_data(hdev, bdaddr);
1398 if (!data)
1399 return -ENOENT;
1400
1401 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403 list_del(&data->list);
1404 kfree(data);
1405
1406 return 0;
1407}
1408
1409int hci_remote_oob_data_clear(struct hci_dev *hdev)
1410{
1411 struct oob_data *data, *n;
1412
1413 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1414 list_del(&data->list);
1415 kfree(data);
1416 }
1417
1418 return 0;
1419}
1420
1421int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001422 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001423{
1424 struct oob_data *data;
1425
1426 data = hci_find_remote_oob_data(hdev, bdaddr);
1427
1428 if (!data) {
1429 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1430 if (!data)
1431 return -ENOMEM;
1432
1433 bacpy(&data->bdaddr, bdaddr);
1434 list_add(&data->list, &hdev->remote_oob_data);
1435 }
1436
1437 memcpy(data->hash, hash, sizeof(data->hash));
1438 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1439
1440 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1441
1442 return 0;
1443}
1444
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001445struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001446{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001447 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001449 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001450 if (bacmp(bdaddr, &b->bdaddr) == 0)
1451 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001452
1453 return NULL;
1454}
1455
1456int hci_blacklist_clear(struct hci_dev *hdev)
1457{
1458 struct list_head *p, *n;
1459
1460 list_for_each_safe(p, n, &hdev->blacklist) {
1461 struct bdaddr_list *b;
1462
1463 b = list_entry(p, struct bdaddr_list, list);
1464
1465 list_del(p);
1466 kfree(b);
1467 }
1468
1469 return 0;
1470}
1471
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001472int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001473{
1474 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001475
1476 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1477 return -EBADF;
1478
Antti Julku5e762442011-08-25 16:48:02 +03001479 if (hci_blacklist_lookup(hdev, bdaddr))
1480 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001481
1482 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001483 if (!entry)
1484 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001485
1486 bacpy(&entry->bdaddr, bdaddr);
1487
1488 list_add(&entry->list, &hdev->blacklist);
1489
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001490 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001491}
1492
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001493int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001494{
1495 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001496
Szymon Janc1ec918c2011-11-16 09:32:21 +01001497 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001498 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001499
1500 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001501 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001502 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001503
1504 list_del(&entry->list);
1505 kfree(entry);
1506
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001507 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508}
1509
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001510static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1511{
1512 struct le_scan_params *param = (struct le_scan_params *) opt;
1513 struct hci_cp_le_set_scan_param cp;
1514
1515 memset(&cp, 0, sizeof(cp));
1516 cp.type = param->type;
1517 cp.interval = cpu_to_le16(param->interval);
1518 cp.window = cpu_to_le16(param->window);
1519
1520 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1521}
1522
1523static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1524{
1525 struct hci_cp_le_set_scan_enable cp;
1526
1527 memset(&cp, 0, sizeof(cp));
1528 cp.enable = 1;
1529
1530 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531}
1532
1533static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001534 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001535{
1536 long timeo = msecs_to_jiffies(3000);
1537 struct le_scan_params param;
1538 int err;
1539
1540 BT_DBG("%s", hdev->name);
1541
1542 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1543 return -EINPROGRESS;
1544
1545 param.type = type;
1546 param.interval = interval;
1547 param.window = window;
1548
1549 hci_req_lock(hdev);
1550
1551 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001552 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001553 if (!err)
1554 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1555
1556 hci_req_unlock(hdev);
1557
1558 if (err < 0)
1559 return err;
1560
1561 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001562 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001563
1564 return 0;
1565}
1566
Andre Guedes7dbfac12012-03-15 16:52:07 -03001567int hci_cancel_le_scan(struct hci_dev *hdev)
1568{
1569 BT_DBG("%s", hdev->name);
1570
1571 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572 return -EALREADY;
1573
1574 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1575 struct hci_cp_le_set_scan_enable cp;
1576
1577 /* Send HCI command to disable LE Scan */
1578 memset(&cp, 0, sizeof(cp));
1579 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1580 }
1581
1582 return 0;
1583}
1584
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001585static void le_scan_disable_work(struct work_struct *work)
1586{
1587 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001588 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001589 struct hci_cp_le_set_scan_enable cp;
1590
1591 BT_DBG("%s", hdev->name);
1592
1593 memset(&cp, 0, sizeof(cp));
1594
1595 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596}
1597
Andre Guedes28b75a82012-02-03 17:48:00 -03001598static void le_scan_work(struct work_struct *work)
1599{
1600 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1601 struct le_scan_params *param = &hdev->le_scan_params;
1602
1603 BT_DBG("%s", hdev->name);
1604
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001605 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1606 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001607}
1608
1609int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001610 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001611{
1612 struct le_scan_params *param = &hdev->le_scan_params;
1613
1614 BT_DBG("%s", hdev->name);
1615
1616 if (work_busy(&hdev->le_scan))
1617 return -EINPROGRESS;
1618
1619 param->type = type;
1620 param->interval = interval;
1621 param->window = window;
1622 param->timeout = timeout;
1623
1624 queue_work(system_long_wq, &hdev->le_scan);
1625
1626 return 0;
1627}
1628
David Herrmann9be0dab2012-04-22 14:39:57 +02001629/* Alloc HCI device */
1630struct hci_dev *hci_alloc_dev(void)
1631{
1632 struct hci_dev *hdev;
1633
1634 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1635 if (!hdev)
1636 return NULL;
1637
David Herrmannb1b813d2012-04-22 14:39:58 +02001638 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1639 hdev->esco_type = (ESCO_HV1);
1640 hdev->link_mode = (HCI_LM_ACCEPT);
1641 hdev->io_capability = 0x03; /* No Input No Output */
1642
David Herrmannb1b813d2012-04-22 14:39:58 +02001643 hdev->sniff_max_interval = 800;
1644 hdev->sniff_min_interval = 80;
1645
1646 mutex_init(&hdev->lock);
1647 mutex_init(&hdev->req_lock);
1648
1649 INIT_LIST_HEAD(&hdev->mgmt_pending);
1650 INIT_LIST_HEAD(&hdev->blacklist);
1651 INIT_LIST_HEAD(&hdev->uuids);
1652 INIT_LIST_HEAD(&hdev->link_keys);
1653 INIT_LIST_HEAD(&hdev->long_term_keys);
1654 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001655
1656 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658 INIT_WORK(&hdev->tx_work, hci_tx_work);
1659 INIT_WORK(&hdev->power_on, hci_power_on);
1660 INIT_WORK(&hdev->le_scan, le_scan_work);
1661
David Herrmannb1b813d2012-04-22 14:39:58 +02001662 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
David Herrmann9be0dab2012-04-22 14:39:57 +02001666 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001667 skb_queue_head_init(&hdev->rx_q);
1668 skb_queue_head_init(&hdev->cmd_q);
1669 skb_queue_head_init(&hdev->raw_q);
1670
1671 init_waitqueue_head(&hdev->req_wait_q);
1672
1673 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1674
David Herrmannb1b813d2012-04-22 14:39:58 +02001675 hci_init_sysfs(hdev);
1676 discovery_init(hdev);
1677 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001678
1679 return hdev;
1680}
1681EXPORT_SYMBOL(hci_alloc_dev);
1682
1683/* Free HCI device */
1684void hci_free_dev(struct hci_dev *hdev)
1685{
1686 skb_queue_purge(&hdev->driver_init);
1687
1688 /* will free via device release */
1689 put_device(&hdev->dev);
1690}
1691EXPORT_SYMBOL(hci_free_dev);
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693/* Register HCI device */
1694int hci_register_dev(struct hci_dev *hdev)
1695{
David Herrmannb1b813d2012-04-22 14:39:58 +02001696 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
David Herrmann010666a2012-01-07 15:47:07 +01001698 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return -EINVAL;
1700
Mat Martineau08add512011-11-02 16:18:36 -07001701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1703 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001704 switch (hdev->dev_type) {
1705 case HCI_BREDR:
1706 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1707 break;
1708 case HCI_AMP:
1709 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1710 break;
1711 default:
1712 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001714
Sasha Levin3df92b32012-05-27 22:36:56 +02001715 if (id < 0)
1716 return id;
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001720
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
Sasha Levin3df92b32012-05-27 22:36:56 +02001723 write_lock(&hci_dev_list_lock);
1724 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001725 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001728 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001729 if (!hdev->workqueue) {
1730 error = -ENOMEM;
1731 goto err;
1732 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001733
David Herrmann33ca9542011-10-08 14:58:49 +02001734 error = hci_add_sysfs(hdev);
1735 if (error < 0)
1736 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001741 if (hdev->rfkill) {
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1745 }
1746 }
1747
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001748 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1749 set_bit(HCI_SETUP, &hdev->dev_flags);
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001750 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001751
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001753 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
1755 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001756
David Herrmann33ca9542011-10-08 14:58:49 +02001757err_wqueue:
1758 destroy_workqueue(hdev->workqueue);
1759err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001760 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001761 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001762 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001763 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001764
David Herrmann33ca9542011-10-08 14:58:49 +02001765 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001766}
1767EXPORT_SYMBOL(hci_register_dev);
1768
1769/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001770void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771{
Sasha Levin3df92b32012-05-27 22:36:56 +02001772 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001773
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001774 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001775
Johan Hovold94324962012-03-15 14:48:41 +01001776 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1777
Sasha Levin3df92b32012-05-27 22:36:56 +02001778 id = hdev->id;
1779
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001780 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001782 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783
1784 hci_dev_do_close(hdev);
1785
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301786 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001787 kfree_skb(hdev->reassembly[i]);
1788
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001789 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001790 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001791 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001792 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001793 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001794 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001795
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001796 /* mgmt_index_removed should take care of emptying the
1797 * pending list */
1798 BUG_ON(!list_empty(&hdev->mgmt_pending));
1799
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800 hci_notify(hdev, HCI_DEV_UNREG);
1801
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001802 if (hdev->rfkill) {
1803 rfkill_unregister(hdev->rfkill);
1804 rfkill_destroy(hdev->rfkill);
1805 }
1806
David Herrmannce242972011-10-08 14:58:48 +02001807 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001808
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001809 destroy_workqueue(hdev->workqueue);
1810
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001811 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001812 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001813 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001814 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001815 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001816 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001817 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001818
David Herrmanndc946bd2012-01-07 15:47:24 +01001819 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001820
1821 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001822}
1823EXPORT_SYMBOL(hci_unregister_dev);
1824
1825/* Suspend HCI device */
1826int hci_suspend_dev(struct hci_dev *hdev)
1827{
1828 hci_notify(hdev, HCI_DEV_SUSPEND);
1829 return 0;
1830}
1831EXPORT_SYMBOL(hci_suspend_dev);
1832
1833/* Resume HCI device */
1834int hci_resume_dev(struct hci_dev *hdev)
1835{
1836 hci_notify(hdev, HCI_DEV_RESUME);
1837 return 0;
1838}
1839EXPORT_SYMBOL(hci_resume_dev);
1840
Marcel Holtmann76bca882009-11-18 00:40:39 +01001841/* Receive frame from HCI drivers */
1842int hci_recv_frame(struct sk_buff *skb)
1843{
1844 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1845 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001846 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001847 kfree_skb(skb);
1848 return -ENXIO;
1849 }
1850
1851 /* Incomming skb */
1852 bt_cb(skb)->incoming = 1;
1853
1854 /* Time stamp */
1855 __net_timestamp(skb);
1856
Marcel Holtmann76bca882009-11-18 00:40:39 +01001857 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001858 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001859
Marcel Holtmann76bca882009-11-18 00:40:39 +01001860 return 0;
1861}
1862EXPORT_SYMBOL(hci_recv_frame);
1863
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301864static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001865 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301866{
1867 int len = 0;
1868 int hlen = 0;
1869 int remain = count;
1870 struct sk_buff *skb;
1871 struct bt_skb_cb *scb;
1872
1873 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001874 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301875 return -EILSEQ;
1876
1877 skb = hdev->reassembly[index];
1878
1879 if (!skb) {
1880 switch (type) {
1881 case HCI_ACLDATA_PKT:
1882 len = HCI_MAX_FRAME_SIZE;
1883 hlen = HCI_ACL_HDR_SIZE;
1884 break;
1885 case HCI_EVENT_PKT:
1886 len = HCI_MAX_EVENT_SIZE;
1887 hlen = HCI_EVENT_HDR_SIZE;
1888 break;
1889 case HCI_SCODATA_PKT:
1890 len = HCI_MAX_SCO_SIZE;
1891 hlen = HCI_SCO_HDR_SIZE;
1892 break;
1893 }
1894
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001895 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301896 if (!skb)
1897 return -ENOMEM;
1898
1899 scb = (void *) skb->cb;
1900 scb->expect = hlen;
1901 scb->pkt_type = type;
1902
1903 skb->dev = (void *) hdev;
1904 hdev->reassembly[index] = skb;
1905 }
1906
1907 while (count) {
1908 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001909 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301910
1911 memcpy(skb_put(skb, len), data, len);
1912
1913 count -= len;
1914 data += len;
1915 scb->expect -= len;
1916 remain = count;
1917
1918 switch (type) {
1919 case HCI_EVENT_PKT:
1920 if (skb->len == HCI_EVENT_HDR_SIZE) {
1921 struct hci_event_hdr *h = hci_event_hdr(skb);
1922 scb->expect = h->plen;
1923
1924 if (skb_tailroom(skb) < scb->expect) {
1925 kfree_skb(skb);
1926 hdev->reassembly[index] = NULL;
1927 return -ENOMEM;
1928 }
1929 }
1930 break;
1931
1932 case HCI_ACLDATA_PKT:
1933 if (skb->len == HCI_ACL_HDR_SIZE) {
1934 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1935 scb->expect = __le16_to_cpu(h->dlen);
1936
1937 if (skb_tailroom(skb) < scb->expect) {
1938 kfree_skb(skb);
1939 hdev->reassembly[index] = NULL;
1940 return -ENOMEM;
1941 }
1942 }
1943 break;
1944
1945 case HCI_SCODATA_PKT:
1946 if (skb->len == HCI_SCO_HDR_SIZE) {
1947 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1948 scb->expect = h->dlen;
1949
1950 if (skb_tailroom(skb) < scb->expect) {
1951 kfree_skb(skb);
1952 hdev->reassembly[index] = NULL;
1953 return -ENOMEM;
1954 }
1955 }
1956 break;
1957 }
1958
1959 if (scb->expect == 0) {
1960 /* Complete frame */
1961
1962 bt_cb(skb)->pkt_type = type;
1963 hci_recv_frame(skb);
1964
1965 hdev->reassembly[index] = NULL;
1966 return remain;
1967 }
1968 }
1969
1970 return remain;
1971}
1972
Marcel Holtmannef222012007-07-11 06:42:04 +02001973int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1974{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301975 int rem = 0;
1976
Marcel Holtmannef222012007-07-11 06:42:04 +02001977 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1978 return -EILSEQ;
1979
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001980 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001981 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301982 if (rem < 0)
1983 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001984
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301985 data += (count - rem);
1986 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001987 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001988
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301989 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001990}
1991EXPORT_SYMBOL(hci_recv_fragment);
1992
Suraj Sumangala99811512010-07-14 13:02:19 +05301993#define STREAM_REASSEMBLY 0
1994
1995int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1996{
1997 int type;
1998 int rem = 0;
1999
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002000 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302001 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2002
2003 if (!skb) {
2004 struct { char type; } *pkt;
2005
2006 /* Start of the frame */
2007 pkt = data;
2008 type = pkt->type;
2009
2010 data++;
2011 count--;
2012 } else
2013 type = bt_cb(skb)->pkt_type;
2014
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002015 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002016 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302017 if (rem < 0)
2018 return rem;
2019
2020 data += (count - rem);
2021 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002022 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302023
2024 return rem;
2025}
2026EXPORT_SYMBOL(hci_recv_stream_fragment);
2027
Linus Torvalds1da177e2005-04-16 15:20:36 -07002028/* ---- Interface to upper protocols ---- */
2029
Linus Torvalds1da177e2005-04-16 15:20:36 -07002030int hci_register_cb(struct hci_cb *cb)
2031{
2032 BT_DBG("%p name %s", cb, cb->name);
2033
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002034 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002036 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
2038 return 0;
2039}
2040EXPORT_SYMBOL(hci_register_cb);
2041
2042int hci_unregister_cb(struct hci_cb *cb)
2043{
2044 BT_DBG("%p name %s", cb, cb->name);
2045
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002046 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002048 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002049
2050 return 0;
2051}
2052EXPORT_SYMBOL(hci_unregister_cb);
2053
2054static int hci_send_frame(struct sk_buff *skb)
2055{
2056 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2057
2058 if (!hdev) {
2059 kfree_skb(skb);
2060 return -ENODEV;
2061 }
2062
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002063 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002065 /* Time stamp */
2066 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002068 /* Send copy to monitor */
2069 hci_send_to_monitor(hdev, skb);
2070
2071 if (atomic_read(&hdev->promisc)) {
2072 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002073 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 }
2075
2076 /* Get rid of skb owner, prior to sending to the driver. */
2077 skb_orphan(skb);
2078
2079 return hdev->send(skb);
2080}
2081
2082/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002083int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084{
2085 int len = HCI_COMMAND_HDR_SIZE + plen;
2086 struct hci_command_hdr *hdr;
2087 struct sk_buff *skb;
2088
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002089 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
2091 skb = bt_skb_alloc(len, GFP_ATOMIC);
2092 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002093 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 return -ENOMEM;
2095 }
2096
2097 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002098 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 hdr->plen = plen;
2100
2101 if (plen)
2102 memcpy(skb_put(skb, plen), param, plen);
2103
2104 BT_DBG("skb len %d", skb->len);
2105
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002106 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002108
Johan Hedberga5040ef2011-01-10 13:28:59 +02002109 if (test_bit(HCI_INIT, &hdev->flags))
2110 hdev->init_last_cmd = opcode;
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002113 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 return 0;
2116}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002119void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120{
2121 struct hci_command_hdr *hdr;
2122
2123 if (!hdev->sent_cmd)
2124 return NULL;
2125
2126 hdr = (void *) hdev->sent_cmd->data;
2127
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002128 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 return NULL;
2130
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002131 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2134}
2135
2136/* Send ACL data */
2137static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2138{
2139 struct hci_acl_hdr *hdr;
2140 int len = skb->len;
2141
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002142 skb_push(skb, HCI_ACL_HDR_SIZE);
2143 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002144 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002145 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2146 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147}
2148
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002149static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002150 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151{
2152 struct hci_dev *hdev = conn->hdev;
2153 struct sk_buff *list;
2154
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002155 skb->len = skb_headlen(skb);
2156 skb->data_len = 0;
2157
2158 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2159 hci_add_acl_hdr(skb, conn->handle, flags);
2160
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002161 list = skb_shinfo(skb)->frag_list;
2162 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002163 /* Non fragmented */
2164 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2165
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002166 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002167 } else {
2168 /* Fragmented */
2169 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2170
2171 skb_shinfo(skb)->frag_list = NULL;
2172
2173 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002174 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002176 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002177
2178 flags &= ~ACL_START;
2179 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180 do {
2181 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002184 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002185 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186
2187 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2188
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002189 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190 } while (list);
2191
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002192 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002194}
2195
2196void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2197{
2198 struct hci_conn *conn = chan->conn;
2199 struct hci_dev *hdev = conn->hdev;
2200
2201 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2202
2203 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002204
2205 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002207 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002211void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212{
2213 struct hci_dev *hdev = conn->hdev;
2214 struct hci_sco_hdr hdr;
2215
2216 BT_DBG("%s len %d", hdev->name, skb->len);
2217
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002218 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 hdr.dlen = skb->len;
2220
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002221 skb_push(skb, HCI_SCO_HDR_SIZE);
2222 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002223 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224
2225 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002226 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002227
Linus Torvalds1da177e2005-04-16 15:20:36 -07002228 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002229 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231
2232/* ---- HCI TX task (outgoing data) ---- */
2233
2234/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002235static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2236 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002237{
2238 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002239 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002240 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002242 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002243 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002244
2245 rcu_read_lock();
2246
2247 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002248 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002250
2251 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2252 continue;
2253
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254 num++;
2255
2256 if (c->sent < min) {
2257 min = c->sent;
2258 conn = c;
2259 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002260
2261 if (hci_conn_num(hdev, type) == num)
2262 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002265 rcu_read_unlock();
2266
Linus Torvalds1da177e2005-04-16 15:20:36 -07002267 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002268 int cnt, q;
2269
2270 switch (conn->type) {
2271 case ACL_LINK:
2272 cnt = hdev->acl_cnt;
2273 break;
2274 case SCO_LINK:
2275 case ESCO_LINK:
2276 cnt = hdev->sco_cnt;
2277 break;
2278 case LE_LINK:
2279 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2280 break;
2281 default:
2282 cnt = 0;
2283 BT_ERR("Unknown link type");
2284 }
2285
2286 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 *quote = q ? q : 1;
2288 } else
2289 *quote = 0;
2290
2291 BT_DBG("conn %p quote %d", conn, *quote);
2292 return conn;
2293}
2294
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002295static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296{
2297 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002298 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Ville Tervobae1f5d92011-02-10 22:38:53 -03002300 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002302 rcu_read_lock();
2303
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002305 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002306 if (c->type == type && c->sent) {
2307 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002308 hdev->name, batostr(&c->dst));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309 hci_acl_disconn(c, 0x13);
2310 }
2311 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002312
2313 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314}
2315
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002316static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2317 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002318{
2319 struct hci_conn_hash *h = &hdev->conn_hash;
2320 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002321 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002322 struct hci_conn *conn;
2323 int cnt, q, conn_num = 0;
2324
2325 BT_DBG("%s", hdev->name);
2326
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002327 rcu_read_lock();
2328
2329 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002330 struct hci_chan *tmp;
2331
2332 if (conn->type != type)
2333 continue;
2334
2335 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2336 continue;
2337
2338 conn_num++;
2339
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002340 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002341 struct sk_buff *skb;
2342
2343 if (skb_queue_empty(&tmp->data_q))
2344 continue;
2345
2346 skb = skb_peek(&tmp->data_q);
2347 if (skb->priority < cur_prio)
2348 continue;
2349
2350 if (skb->priority > cur_prio) {
2351 num = 0;
2352 min = ~0;
2353 cur_prio = skb->priority;
2354 }
2355
2356 num++;
2357
2358 if (conn->sent < min) {
2359 min = conn->sent;
2360 chan = tmp;
2361 }
2362 }
2363
2364 if (hci_conn_num(hdev, type) == conn_num)
2365 break;
2366 }
2367
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002368 rcu_read_unlock();
2369
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002370 if (!chan)
2371 return NULL;
2372
2373 switch (chan->conn->type) {
2374 case ACL_LINK:
2375 cnt = hdev->acl_cnt;
2376 break;
2377 case SCO_LINK:
2378 case ESCO_LINK:
2379 cnt = hdev->sco_cnt;
2380 break;
2381 case LE_LINK:
2382 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2383 break;
2384 default:
2385 cnt = 0;
2386 BT_ERR("Unknown link type");
2387 }
2388
2389 q = cnt / num;
2390 *quote = q ? q : 1;
2391 BT_DBG("chan %p quote %d", chan, *quote);
2392 return chan;
2393}
2394
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002395static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2396{
2397 struct hci_conn_hash *h = &hdev->conn_hash;
2398 struct hci_conn *conn;
2399 int num = 0;
2400
2401 BT_DBG("%s", hdev->name);
2402
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002403 rcu_read_lock();
2404
2405 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002406 struct hci_chan *chan;
2407
2408 if (conn->type != type)
2409 continue;
2410
2411 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412 continue;
2413
2414 num++;
2415
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002416 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002417 struct sk_buff *skb;
2418
2419 if (chan->sent) {
2420 chan->sent = 0;
2421 continue;
2422 }
2423
2424 if (skb_queue_empty(&chan->data_q))
2425 continue;
2426
2427 skb = skb_peek(&chan->data_q);
2428 if (skb->priority >= HCI_PRIO_MAX - 1)
2429 continue;
2430
2431 skb->priority = HCI_PRIO_MAX - 1;
2432
2433 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002434 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002435 }
2436
2437 if (hci_conn_num(hdev, type) == num)
2438 break;
2439 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002440
2441 rcu_read_unlock();
2442
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002443}
2444
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002445static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2446{
2447 /* Calculate count of blocks used by this packet */
2448 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2449}
2450
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002451static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002452{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002453 if (!test_bit(HCI_RAW, &hdev->flags)) {
2454 /* ACL tx timeout must be longer than maximum
2455 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002456 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002457 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002458 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002459 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002460}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002461
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002462static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002463{
2464 unsigned int cnt = hdev->acl_cnt;
2465 struct hci_chan *chan;
2466 struct sk_buff *skb;
2467 int quote;
2468
2469 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002470
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002471 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002472 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002473 u32 priority = (skb_peek(&chan->data_q))->priority;
2474 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002475 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002476 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002477
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002478 /* Stop if priority has changed */
2479 if (skb->priority < priority)
2480 break;
2481
2482 skb = skb_dequeue(&chan->data_q);
2483
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002484 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002485 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002486
Linus Torvalds1da177e2005-04-16 15:20:36 -07002487 hci_send_frame(skb);
2488 hdev->acl_last_tx = jiffies;
2489
2490 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002491 chan->sent++;
2492 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002493 }
2494 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002495
2496 if (cnt != hdev->acl_cnt)
2497 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002498}
2499
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002500static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002501{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002502 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002503 struct hci_chan *chan;
2504 struct sk_buff *skb;
2505 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002506
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002507 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002508
2509 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002510 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002511 u32 priority = (skb_peek(&chan->data_q))->priority;
2512 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2513 int blocks;
2514
2515 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002516 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002517
2518 /* Stop if priority has changed */
2519 if (skb->priority < priority)
2520 break;
2521
2522 skb = skb_dequeue(&chan->data_q);
2523
2524 blocks = __get_blocks(hdev, skb);
2525 if (blocks > hdev->block_cnt)
2526 return;
2527
2528 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002529 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002530
2531 hci_send_frame(skb);
2532 hdev->acl_last_tx = jiffies;
2533
2534 hdev->block_cnt -= blocks;
2535 quote -= blocks;
2536
2537 chan->sent += blocks;
2538 chan->conn->sent += blocks;
2539 }
2540 }
2541
2542 if (cnt != hdev->block_cnt)
2543 hci_prio_recalculate(hdev, ACL_LINK);
2544}
2545
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002546static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002547{
2548 BT_DBG("%s", hdev->name);
2549
2550 if (!hci_conn_num(hdev, ACL_LINK))
2551 return;
2552
2553 switch (hdev->flow_ctl_mode) {
2554 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2555 hci_sched_acl_pkt(hdev);
2556 break;
2557
2558 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2559 hci_sched_acl_blk(hdev);
2560 break;
2561 }
2562}
2563
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002565static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566{
2567 struct hci_conn *conn;
2568 struct sk_buff *skb;
2569 int quote;
2570
2571 BT_DBG("%s", hdev->name);
2572
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002573 if (!hci_conn_num(hdev, SCO_LINK))
2574 return;
2575
Linus Torvalds1da177e2005-04-16 15:20:36 -07002576 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2577 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2578 BT_DBG("skb %p len %d", skb, skb->len);
2579 hci_send_frame(skb);
2580
2581 conn->sent++;
2582 if (conn->sent == ~0)
2583 conn->sent = 0;
2584 }
2585 }
2586}
2587
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002588static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002589{
2590 struct hci_conn *conn;
2591 struct sk_buff *skb;
2592 int quote;
2593
2594 BT_DBG("%s", hdev->name);
2595
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002596 if (!hci_conn_num(hdev, ESCO_LINK))
2597 return;
2598
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002599 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2600 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002601 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2602 BT_DBG("skb %p len %d", skb, skb->len);
2603 hci_send_frame(skb);
2604
2605 conn->sent++;
2606 if (conn->sent == ~0)
2607 conn->sent = 0;
2608 }
2609 }
2610}
2611
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002612static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002613{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002614 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002615 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002616 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002617
2618 BT_DBG("%s", hdev->name);
2619
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002620 if (!hci_conn_num(hdev, LE_LINK))
2621 return;
2622
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002623 if (!test_bit(HCI_RAW, &hdev->flags)) {
2624 /* LE tx timeout must be longer than maximum
2625 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002626 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002627 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002628 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002629 }
2630
2631 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002632 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002633 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002634 u32 priority = (skb_peek(&chan->data_q))->priority;
2635 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002636 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002637 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002638
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002639 /* Stop if priority has changed */
2640 if (skb->priority < priority)
2641 break;
2642
2643 skb = skb_dequeue(&chan->data_q);
2644
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002645 hci_send_frame(skb);
2646 hdev->le_last_tx = jiffies;
2647
2648 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002649 chan->sent++;
2650 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002651 }
2652 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002653
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002654 if (hdev->le_pkts)
2655 hdev->le_cnt = cnt;
2656 else
2657 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002658
2659 if (cnt != tmp)
2660 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002661}
2662
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002663static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002665 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002666 struct sk_buff *skb;
2667
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002668 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002669 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002670
2671 /* Schedule queues and send stuff to HCI driver */
2672
2673 hci_sched_acl(hdev);
2674
2675 hci_sched_sco(hdev);
2676
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002677 hci_sched_esco(hdev);
2678
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002679 hci_sched_le(hdev);
2680
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681 /* Send next queued raw (unknown type) packet */
2682 while ((skb = skb_dequeue(&hdev->raw_q)))
2683 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684}
2685
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002686/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687
2688/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002689static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690{
2691 struct hci_acl_hdr *hdr = (void *) skb->data;
2692 struct hci_conn *conn;
2693 __u16 handle, flags;
2694
2695 skb_pull(skb, HCI_ACL_HDR_SIZE);
2696
2697 handle = __le16_to_cpu(hdr->handle);
2698 flags = hci_flags(handle);
2699 handle = hci_handle(handle);
2700
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002701 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len,
2702 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703
2704 hdev->stat.acl_rx++;
2705
2706 hci_dev_lock(hdev);
2707 conn = hci_conn_hash_lookup_handle(hdev, handle);
2708 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002709
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002711 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002712
Johan Hedberg671267b2012-05-12 16:11:50 -03002713 hci_dev_lock(hdev);
2714 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2715 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2716 mgmt_device_connected(hdev, &conn->dst, conn->type,
2717 conn->dst_type, 0, NULL, 0,
2718 conn->dev_class);
2719 hci_dev_unlock(hdev);
2720
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002722 l2cap_recv_acldata(conn, skb, flags);
2723 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002725 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002726 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 }
2728
2729 kfree_skb(skb);
2730}
2731
2732/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002733static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002734{
2735 struct hci_sco_hdr *hdr = (void *) skb->data;
2736 struct hci_conn *conn;
2737 __u16 handle;
2738
2739 skb_pull(skb, HCI_SCO_HDR_SIZE);
2740
2741 handle = __le16_to_cpu(hdr->handle);
2742
2743 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2744
2745 hdev->stat.sco_rx++;
2746
2747 hci_dev_lock(hdev);
2748 conn = hci_conn_hash_lookup_handle(hdev, handle);
2749 hci_dev_unlock(hdev);
2750
2751 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002753 sco_recv_scodata(conn, skb);
2754 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002756 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002757 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 }
2759
2760 kfree_skb(skb);
2761}
2762
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002763static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002765 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766 struct sk_buff *skb;
2767
2768 BT_DBG("%s", hdev->name);
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002771 /* Send copy to monitor */
2772 hci_send_to_monitor(hdev, skb);
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 if (atomic_read(&hdev->promisc)) {
2775 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002776 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 }
2778
2779 if (test_bit(HCI_RAW, &hdev->flags)) {
2780 kfree_skb(skb);
2781 continue;
2782 }
2783
2784 if (test_bit(HCI_INIT, &hdev->flags)) {
2785 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002786 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002787 case HCI_ACLDATA_PKT:
2788 case HCI_SCODATA_PKT:
2789 kfree_skb(skb);
2790 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002791 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 }
2793
2794 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002795 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002797 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 hci_event_packet(hdev, skb);
2799 break;
2800
2801 case HCI_ACLDATA_PKT:
2802 BT_DBG("%s ACL data packet", hdev->name);
2803 hci_acldata_packet(hdev, skb);
2804 break;
2805
2806 case HCI_SCODATA_PKT:
2807 BT_DBG("%s SCO data packet", hdev->name);
2808 hci_scodata_packet(hdev, skb);
2809 break;
2810
2811 default:
2812 kfree_skb(skb);
2813 break;
2814 }
2815 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816}
2817
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002818static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002820 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821 struct sk_buff *skb;
2822
2823 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2824
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002826 if (atomic_read(&hdev->cmd_cnt)) {
2827 skb = skb_dequeue(&hdev->cmd_q);
2828 if (!skb)
2829 return;
2830
Wei Yongjun7585b972009-02-25 18:29:52 +08002831 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002833 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2834 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835 atomic_dec(&hdev->cmd_cnt);
2836 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002837 if (test_bit(HCI_RESET, &hdev->flags))
2838 del_timer(&hdev->cmd_timer);
2839 else
2840 mod_timer(&hdev->cmd_timer,
Ville Tervo6bd32322011-02-16 16:32:41 +02002841 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 } else {
2843 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002844 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 }
2846 }
2847}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002848
2849int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2850{
2851 /* General inquiry access code (GIAC) */
2852 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2853 struct hci_cp_inquiry cp;
2854
2855 BT_DBG("%s", hdev->name);
2856
2857 if (test_bit(HCI_INQUIRY, &hdev->flags))
2858 return -EINPROGRESS;
2859
Johan Hedberg46632622012-01-02 16:06:08 +02002860 inquiry_cache_flush(hdev);
2861
Andre Guedes2519a1f2011-11-07 11:45:24 -03002862 memset(&cp, 0, sizeof(cp));
2863 memcpy(&cp.lap, lap, sizeof(cp.lap));
2864 cp.length = length;
2865
2866 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2867}
Andre Guedes023d50492011-11-04 14:16:52 -03002868
2869int hci_cancel_inquiry(struct hci_dev *hdev)
2870{
2871 BT_DBG("%s", hdev->name);
2872
2873 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002874 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002875
2876 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2877}
Andre Guedes31f79562012-04-24 21:02:53 -03002878
2879u8 bdaddr_to_le(u8 bdaddr_type)
2880{
2881 switch (bdaddr_type) {
2882 case BDADDR_LE_PUBLIC:
2883 return ADDR_LE_DEV_PUBLIC;
2884
2885 default:
2886 /* Fallback to LE Random address type */
2887 return ADDR_LE_DEV_RANDOM;
2888 }
2889}