blob: fa974a19d365e78031cc53977871a1e2b8df2016 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200181 struct hci_cp_delete_stored_link_key cp;
Marcel Holtmann1ebb9252005-11-08 09:57:21 -0800182 __le16 param;
Marcel Holtmann89f27832007-09-09 08:39:49 +0200183 __u8 flt_type;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200185 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 /* Mandatory initialization */
188
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200190 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200192 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200193 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200196 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 /* Read BD Address */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200199 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
200
201 /* Read Class of Device */
202 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
203
204 /* Read Local Name */
205 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206
207 /* Read Voice Setting */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200208 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209
210 /* Optional initialization */
211
212 /* Clear Event Filters */
Marcel Holtmann89f27832007-09-09 08:39:49 +0200213 flt_type = HCI_FLT_CLEAR_ALL;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200214 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 /* Connection accept timeout ~20 secs */
Andrei Emeltchenko82781e62012-05-25 11:38:27 +0300217 param = __constant_cpu_to_le16(0x7d00);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200218 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedbergb0916ea2011-01-10 13:44:55 +0200219
220 bacpy(&cp.bdaddr, BDADDR_ANY);
221 cp.delete_all = 1;
222 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223}
224
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200225static void amp_init(struct hci_dev *hdev)
226{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200227 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
228
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200229 /* Read Local Version */
230 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300231
232 /* Read Local AMP Info */
233 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234}
235
236static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
237{
238 struct sk_buff *skb;
239
240 BT_DBG("%s %ld", hdev->name, opt);
241
242 /* Driver initialization */
243
244 /* Special commands */
245 while ((skb = skb_dequeue(&hdev->driver_init))) {
246 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
247 skb->dev = (void *) hdev;
248
249 skb_queue_tail(&hdev->cmd_q, skb);
250 queue_work(hdev->workqueue, &hdev->cmd_work);
251 }
252 skb_queue_purge(&hdev->driver_init);
253
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300254 /* Reset */
255 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
256 hci_reset_req(hdev, 0);
257
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200258 switch (hdev->dev_type) {
259 case HCI_BREDR:
260 bredr_init(hdev);
261 break;
262
263 case HCI_AMP:
264 amp_init(hdev);
265 break;
266
267 default:
268 BT_ERR("Unknown device type %d", hdev->dev_type);
269 break;
270 }
271
272}
273
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300274static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
275{
276 BT_DBG("%s", hdev->name);
277
278 /* Read LE buffer size */
279 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
280}
281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283{
284 __u8 scan = opt;
285
286 BT_DBG("%s %x", hdev->name, scan);
287
288 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200289 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
291
292static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293{
294 __u8 auth = opt;
295
296 BT_DBG("%s %x", hdev->name, auth);
297
298 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200299 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300}
301
302static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303{
304 __u8 encrypt = opt;
305
306 BT_DBG("%s %x", hdev->name, encrypt);
307
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200308 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200309 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200312static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
313{
314 __le16 policy = cpu_to_le16(opt);
315
Marcel Holtmanna418b892008-11-30 12:17:28 +0100316 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200317
318 /* Default link policy */
319 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
320}
321
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900322/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323 * Device is held on return. */
324struct hci_dev *hci_dev_get(int index)
325{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200326 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
328 BT_DBG("%d", index);
329
330 if (index < 0)
331 return NULL;
332
333 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200334 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335 if (d->id == index) {
336 hdev = hci_dev_hold(d);
337 break;
338 }
339 }
340 read_unlock(&hci_dev_list_lock);
341 return hdev;
342}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343
344/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200345
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200346bool hci_discovery_active(struct hci_dev *hdev)
347{
348 struct discovery_state *discov = &hdev->discovery;
349
Andre Guedes6fbe1952012-02-03 17:47:58 -0300350 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300351 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300352 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200353 return true;
354
Andre Guedes6fbe1952012-02-03 17:47:58 -0300355 default:
356 return false;
357 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200358}
359
Johan Hedbergff9ef572012-01-04 14:23:45 +0200360void hci_discovery_set_state(struct hci_dev *hdev, int state)
361{
362 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
363
364 if (hdev->discovery.state == state)
365 return;
366
367 switch (state) {
368 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300369 if (hdev->discovery.state != DISCOVERY_STARTING)
370 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200371 break;
372 case DISCOVERY_STARTING:
373 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300374 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200375 mgmt_discovering(hdev, 1);
376 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200377 case DISCOVERY_RESOLVING:
378 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200379 case DISCOVERY_STOPPING:
380 break;
381 }
382
383 hdev->discovery.state = state;
384}
385
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386static void inquiry_cache_flush(struct hci_dev *hdev)
387{
Johan Hedberg30883512012-01-04 14:16:21 +0200388 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200389 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Johan Hedberg561aafb2012-01-04 13:31:59 +0200391 list_for_each_entry_safe(p, n, &cache->all, all) {
392 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200393 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200395
396 INIT_LIST_HEAD(&cache->unknown);
397 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398}
399
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300400struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
401 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402{
Johan Hedberg30883512012-01-04 14:16:21 +0200403 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 struct inquiry_entry *e;
405
406 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
407
Johan Hedberg561aafb2012-01-04 13:31:59 +0200408 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200410 return e;
411 }
412
413 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
Johan Hedberg561aafb2012-01-04 13:31:59 +0200416struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300417 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200418{
Johan Hedberg30883512012-01-04 14:16:21 +0200419 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200420 struct inquiry_entry *e;
421
422 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424 list_for_each_entry(e, &cache->unknown, list) {
425 if (!bacmp(&e->data.bdaddr, bdaddr))
426 return e;
427 }
428
429 return NULL;
430}
431
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200432struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300433 bdaddr_t *bdaddr,
434 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200435{
436 struct discovery_state *cache = &hdev->discovery;
437 struct inquiry_entry *e;
438
439 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
440
441 list_for_each_entry(e, &cache->resolve, list) {
442 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
443 return e;
444 if (!bacmp(&e->data.bdaddr, bdaddr))
445 return e;
446 }
447
448 return NULL;
449}
450
Johan Hedberga3d4e202012-01-09 00:53:02 +0200451void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300452 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200453{
454 struct discovery_state *cache = &hdev->discovery;
455 struct list_head *pos = &cache->resolve;
456 struct inquiry_entry *p;
457
458 list_del(&ie->list);
459
460 list_for_each_entry(p, &cache->resolve, list) {
461 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300462 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200463 break;
464 pos = &p->list;
465 }
466
467 list_add(&ie->list, pos);
468}
469
Johan Hedberg31754052012-01-04 13:39:52 +0200470bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300471 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472{
Johan Hedberg30883512012-01-04 14:16:21 +0200473 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200474 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475
476 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200478 if (ssp)
479 *ssp = data->ssp_mode;
480
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200481 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200482 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200483 if (ie->data.ssp_mode && ssp)
484 *ssp = true;
485
Johan Hedberga3d4e202012-01-09 00:53:02 +0200486 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300487 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200488 ie->data.rssi = data->rssi;
489 hci_inquiry_cache_update_resolve(hdev, ie);
490 }
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200493 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200494
Johan Hedberg561aafb2012-01-04 13:31:59 +0200495 /* Entry not in the cache. Add new one. */
496 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
497 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200498 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200499
500 list_add(&ie->all, &cache->all);
501
502 if (name_known) {
503 ie->name_state = NAME_KNOWN;
504 } else {
505 ie->name_state = NAME_NOT_KNOWN;
506 list_add(&ie->list, &cache->unknown);
507 }
508
509update:
510 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300511 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200512 ie->name_state = NAME_KNOWN;
513 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700514 }
515
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200516 memcpy(&ie->data, data, sizeof(*data));
517 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200519
520 if (ie->name_state == NAME_NOT_KNOWN)
521 return false;
522
523 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524}
525
526static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
527{
Johan Hedberg30883512012-01-04 14:16:21 +0200528 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529 struct inquiry_info *info = (struct inquiry_info *) buf;
530 struct inquiry_entry *e;
531 int copied = 0;
532
Johan Hedberg561aafb2012-01-04 13:31:59 +0200533 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200535
536 if (copied >= num)
537 break;
538
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 bacpy(&info->bdaddr, &data->bdaddr);
540 info->pscan_rep_mode = data->pscan_rep_mode;
541 info->pscan_period_mode = data->pscan_period_mode;
542 info->pscan_mode = data->pscan_mode;
543 memcpy(info->dev_class, data->dev_class, 3);
544 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200545
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200547 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 }
549
550 BT_DBG("cache %p, copied %d", cache, copied);
551 return copied;
552}
553
554static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
555{
556 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
557 struct hci_cp_inquiry cp;
558
559 BT_DBG("%s", hdev->name);
560
561 if (test_bit(HCI_INQUIRY, &hdev->flags))
562 return;
563
564 /* Start Inquiry */
565 memcpy(&cp.lap, &ir->lap, 3);
566 cp.length = ir->length;
567 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200568 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569}
570
571int hci_inquiry(void __user *arg)
572{
573 __u8 __user *ptr = arg;
574 struct hci_inquiry_req ir;
575 struct hci_dev *hdev;
576 int err = 0, do_inquiry = 0, max_rsp;
577 long timeo;
578 __u8 *buf;
579
580 if (copy_from_user(&ir, ptr, sizeof(ir)))
581 return -EFAULT;
582
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200583 hdev = hci_dev_get(ir.dev_id);
584 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 return -ENODEV;
586
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300587 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900588 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300589 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590 inquiry_cache_flush(hdev);
591 do_inquiry = 1;
592 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300593 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594
Marcel Holtmann04837f62006-07-03 10:02:33 +0200595 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200596
597 if (do_inquiry) {
598 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
599 if (err < 0)
600 goto done;
601 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300603 /* for unlimited number of responses we will use buffer with
604 * 255 entries
605 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
607
608 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
609 * copy it to the user space.
610 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100611 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200612 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 err = -ENOMEM;
614 goto done;
615 }
616
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300617 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300619 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620
621 BT_DBG("num_rsp %d", ir.num_rsp);
622
623 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
624 ptr += sizeof(ir);
625 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300626 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900628 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629 err = -EFAULT;
630
631 kfree(buf);
632
633done:
634 hci_dev_put(hdev);
635 return err;
636}
637
638/* ---- HCI ioctl helpers ---- */
639
640int hci_dev_open(__u16 dev)
641{
642 struct hci_dev *hdev;
643 int ret = 0;
644
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200645 hdev = hci_dev_get(dev);
646 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 return -ENODEV;
648
649 BT_DBG("%s %p", hdev->name, hdev);
650
651 hci_req_lock(hdev);
652
Johan Hovold94324962012-03-15 14:48:41 +0100653 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
654 ret = -ENODEV;
655 goto done;
656 }
657
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200658 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
659 ret = -ERFKILL;
660 goto done;
661 }
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 if (test_bit(HCI_UP, &hdev->flags)) {
664 ret = -EALREADY;
665 goto done;
666 }
667
668 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
669 set_bit(HCI_RAW, &hdev->flags);
670
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200671 /* Treat all non BR/EDR controllers as raw devices if
672 enable_hs is not set */
673 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100674 set_bit(HCI_RAW, &hdev->flags);
675
Linus Torvalds1da177e2005-04-16 15:20:36 -0700676 if (hdev->open(hdev)) {
677 ret = -EIO;
678 goto done;
679 }
680
681 if (!test_bit(HCI_RAW, &hdev->flags)) {
682 atomic_set(&hdev->cmd_cnt, 1);
683 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200684 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300686 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687
Andre Guedeseead27d2011-06-30 19:20:55 -0300688 if (lmp_host_le_capable(hdev))
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300689 ret = __hci_request(hdev, hci_le_init_req, 0,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300690 HCI_INIT_TIMEOUT);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300691
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 clear_bit(HCI_INIT, &hdev->flags);
693 }
694
695 if (!ret) {
696 hci_dev_hold(hdev);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300699 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
700 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300701 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200702 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300703 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200704 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900705 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200707 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200708 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400709 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710
711 skb_queue_purge(&hdev->cmd_q);
712 skb_queue_purge(&hdev->rx_q);
713
714 if (hdev->flush)
715 hdev->flush(hdev);
716
717 if (hdev->sent_cmd) {
718 kfree_skb(hdev->sent_cmd);
719 hdev->sent_cmd = NULL;
720 }
721
722 hdev->close(hdev);
723 hdev->flags = 0;
724 }
725
726done:
727 hci_req_unlock(hdev);
728 hci_dev_put(hdev);
729 return ret;
730}
731
732static int hci_dev_do_close(struct hci_dev *hdev)
733{
734 BT_DBG("%s %p", hdev->name, hdev);
735
Andre Guedes28b75a82012-02-03 17:48:00 -0300736 cancel_work_sync(&hdev->le_scan);
737
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 hci_req_cancel(hdev, ENODEV);
739 hci_req_lock(hdev);
740
741 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300742 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 hci_req_unlock(hdev);
744 return 0;
745 }
746
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200747 /* Flush RX and TX works */
748 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400749 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200751 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200752 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200753 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200754 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200755 }
756
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200757 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200758 cancel_delayed_work(&hdev->service_cache);
759
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300760 cancel_delayed_work_sync(&hdev->le_scan_disable);
761
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300762 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 inquiry_cache_flush(hdev);
764 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300765 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766
767 hci_notify(hdev, HCI_DEV_DOWN);
768
769 if (hdev->flush)
770 hdev->flush(hdev);
771
772 /* Reset device */
773 skb_queue_purge(&hdev->cmd_q);
774 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200775 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200776 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300778 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 clear_bit(HCI_INIT, &hdev->flags);
780 }
781
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200782 /* flush cmd work */
783 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784
785 /* Drop queues */
786 skb_queue_purge(&hdev->rx_q);
787 skb_queue_purge(&hdev->cmd_q);
788 skb_queue_purge(&hdev->raw_q);
789
790 /* Drop last sent command */
791 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300792 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 kfree_skb(hdev->sent_cmd);
794 hdev->sent_cmd = NULL;
795 }
796
797 /* After this point our queues are empty
798 * and no tasks are scheduled. */
799 hdev->close(hdev);
800
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300801 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
802 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100803 hci_dev_lock(hdev);
804 mgmt_powered(hdev, 0);
805 hci_dev_unlock(hdev);
806 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200807
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 /* Clear flags */
809 hdev->flags = 0;
810
Johan Hedberge59fda82012-02-22 18:11:53 +0200811 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200812 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200813
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 hci_req_unlock(hdev);
815
816 hci_dev_put(hdev);
817 return 0;
818}
819
820int hci_dev_close(__u16 dev)
821{
822 struct hci_dev *hdev;
823 int err;
824
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200825 hdev = hci_dev_get(dev);
826 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100828
829 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
830 cancel_delayed_work(&hdev->power_off);
831
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100833
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834 hci_dev_put(hdev);
835 return err;
836}
837
838int hci_dev_reset(__u16 dev)
839{
840 struct hci_dev *hdev;
841 int ret = 0;
842
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200843 hdev = hci_dev_get(dev);
844 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845 return -ENODEV;
846
847 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848
849 if (!test_bit(HCI_UP, &hdev->flags))
850 goto done;
851
852 /* Drop queues */
853 skb_queue_purge(&hdev->rx_q);
854 skb_queue_purge(&hdev->cmd_q);
855
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300856 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 inquiry_cache_flush(hdev);
858 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300859 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860
861 if (hdev->flush)
862 hdev->flush(hdev);
863
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900864 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300865 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
867 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300868 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 hci_req_unlock(hdev);
872 hci_dev_put(hdev);
873 return ret;
874}
875
876int hci_dev_reset_stat(__u16 dev)
877{
878 struct hci_dev *hdev;
879 int ret = 0;
880
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200881 hdev = hci_dev_get(dev);
882 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 return -ENODEV;
884
885 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
886
887 hci_dev_put(hdev);
888
889 return ret;
890}
891
892int hci_dev_cmd(unsigned int cmd, void __user *arg)
893{
894 struct hci_dev *hdev;
895 struct hci_dev_req dr;
896 int err = 0;
897
898 if (copy_from_user(&dr, arg, sizeof(dr)))
899 return -EFAULT;
900
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200901 hdev = hci_dev_get(dr.dev_id);
902 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 return -ENODEV;
904
905 switch (cmd) {
906 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200907 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300908 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909 break;
910
911 case HCISETENCRYPT:
912 if (!lmp_encrypt_capable(hdev)) {
913 err = -EOPNOTSUPP;
914 break;
915 }
916
917 if (!test_bit(HCI_AUTH, &hdev->flags)) {
918 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200919 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300920 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 if (err)
922 break;
923 }
924
Marcel Holtmann04837f62006-07-03 10:02:33 +0200925 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300926 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 break;
928
929 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200930 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300931 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 break;
933
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200934 case HCISETLINKPOL:
935 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300936 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200937 break;
938
939 case HCISETLINKMODE:
940 hdev->link_mode = ((__u16) dr.dev_opt) &
941 (HCI_LM_MASTER | HCI_LM_ACCEPT);
942 break;
943
Linus Torvalds1da177e2005-04-16 15:20:36 -0700944 case HCISETPTYPE:
945 hdev->pkt_type = (__u16) dr.dev_opt;
946 break;
947
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200949 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
950 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 break;
952
953 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200954 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
955 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956 break;
957
958 default:
959 err = -EINVAL;
960 break;
961 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200962
Linus Torvalds1da177e2005-04-16 15:20:36 -0700963 hci_dev_put(hdev);
964 return err;
965}
966
967int hci_get_dev_list(void __user *arg)
968{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200969 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 struct hci_dev_list_req *dl;
971 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 int n = 0, size, err;
973 __u16 dev_num;
974
975 if (get_user(dev_num, (__u16 __user *) arg))
976 return -EFAULT;
977
978 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
979 return -EINVAL;
980
981 size = sizeof(*dl) + dev_num * sizeof(*dr);
982
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200983 dl = kzalloc(size, GFP_KERNEL);
984 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985 return -ENOMEM;
986
987 dr = dl->dev_req;
988
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200989 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200990 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200991 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200992 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200993
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200994 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
995 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200996
Linus Torvalds1da177e2005-04-16 15:20:36 -0700997 (dr + n)->dev_id = hdev->id;
998 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200999
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 if (++n >= dev_num)
1001 break;
1002 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001003 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004
1005 dl->dev_num = n;
1006 size = sizeof(*dl) + n * sizeof(*dr);
1007
1008 err = copy_to_user(arg, dl, size);
1009 kfree(dl);
1010
1011 return err ? -EFAULT : 0;
1012}
1013
1014int hci_get_dev_info(void __user *arg)
1015{
1016 struct hci_dev *hdev;
1017 struct hci_dev_info di;
1018 int err = 0;
1019
1020 if (copy_from_user(&di, arg, sizeof(di)))
1021 return -EFAULT;
1022
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001023 hdev = hci_dev_get(di.dev_id);
1024 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 return -ENODEV;
1026
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001027 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001028 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001029
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001030 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1031 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001032
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 strcpy(di.name, hdev->name);
1034 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001035 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 di.flags = hdev->flags;
1037 di.pkt_type = hdev->pkt_type;
1038 di.acl_mtu = hdev->acl_mtu;
1039 di.acl_pkts = hdev->acl_pkts;
1040 di.sco_mtu = hdev->sco_mtu;
1041 di.sco_pkts = hdev->sco_pkts;
1042 di.link_policy = hdev->link_policy;
1043 di.link_mode = hdev->link_mode;
1044
1045 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1046 memcpy(&di.features, &hdev->features, sizeof(di.features));
1047
1048 if (copy_to_user(arg, &di, sizeof(di)))
1049 err = -EFAULT;
1050
1051 hci_dev_put(hdev);
1052
1053 return err;
1054}
1055
1056/* ---- Interface to HCI drivers ---- */
1057
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001058static int hci_rfkill_set_block(void *data, bool blocked)
1059{
1060 struct hci_dev *hdev = data;
1061
1062 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1063
1064 if (!blocked)
1065 return 0;
1066
1067 hci_dev_do_close(hdev);
1068
1069 return 0;
1070}
1071
1072static const struct rfkill_ops hci_rfkill_ops = {
1073 .set_block = hci_rfkill_set_block,
1074};
1075
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001076static void hci_power_on(struct work_struct *work)
1077{
1078 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1079
1080 BT_DBG("%s", hdev->name);
1081
1082 if (hci_dev_open(hdev->id) < 0)
1083 return;
1084
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001085 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001086 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001087
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001088 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001089 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001090}
1091
1092static void hci_power_off(struct work_struct *work)
1093{
Johan Hedberg32435532011-11-07 22:16:04 +02001094 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001095 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001096
1097 BT_DBG("%s", hdev->name);
1098
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001099 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001100}
1101
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001102static void hci_discov_off(struct work_struct *work)
1103{
1104 struct hci_dev *hdev;
1105 u8 scan = SCAN_PAGE;
1106
1107 hdev = container_of(work, struct hci_dev, discov_off.work);
1108
1109 BT_DBG("%s", hdev->name);
1110
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001111 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001112
1113 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1114
1115 hdev->discov_timeout = 0;
1116
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001117 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001118}
1119
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001120int hci_uuids_clear(struct hci_dev *hdev)
1121{
1122 struct list_head *p, *n;
1123
1124 list_for_each_safe(p, n, &hdev->uuids) {
1125 struct bt_uuid *uuid;
1126
1127 uuid = list_entry(p, struct bt_uuid, list);
1128
1129 list_del(p);
1130 kfree(uuid);
1131 }
1132
1133 return 0;
1134}
1135
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001136int hci_link_keys_clear(struct hci_dev *hdev)
1137{
1138 struct list_head *p, *n;
1139
1140 list_for_each_safe(p, n, &hdev->link_keys) {
1141 struct link_key *key;
1142
1143 key = list_entry(p, struct link_key, list);
1144
1145 list_del(p);
1146 kfree(key);
1147 }
1148
1149 return 0;
1150}
1151
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001152int hci_smp_ltks_clear(struct hci_dev *hdev)
1153{
1154 struct smp_ltk *k, *tmp;
1155
1156 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1157 list_del(&k->list);
1158 kfree(k);
1159 }
1160
1161 return 0;
1162}
1163
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001164struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1165{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001166 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001167
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001168 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001169 if (bacmp(bdaddr, &k->bdaddr) == 0)
1170 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001171
1172 return NULL;
1173}
1174
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301175static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001176 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001177{
1178 /* Legacy key */
1179 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301180 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001181
1182 /* Debug keys are insecure so don't store them persistently */
1183 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301184 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001185
1186 /* Changed combination key and there's no previous one */
1187 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301188 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001189
1190 /* Security mode 3 case */
1191 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301192 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001193
1194 /* Neither local nor remote side had no-bonding as requirement */
1195 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301196 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001197
1198 /* Local side had dedicated bonding as requirement */
1199 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301200 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001201
1202 /* Remote side had dedicated bonding as requirement */
1203 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301204 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001205
1206 /* If none of the above criteria match, then don't store the key
1207 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301208 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001209}
1210
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001211struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001212{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001213 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001214
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001215 list_for_each_entry(k, &hdev->long_term_keys, list) {
1216 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001217 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001218 continue;
1219
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001220 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001221 }
1222
1223 return NULL;
1224}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001225
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001226struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001227 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001228{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001229 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001230
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001231 list_for_each_entry(k, &hdev->long_term_keys, list)
1232 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001233 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001234 return k;
1235
1236 return NULL;
1237}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001238
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001239int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001240 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001241{
1242 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301243 u8 old_key_type;
1244 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001245
1246 old_key = hci_find_link_key(hdev, bdaddr);
1247 if (old_key) {
1248 old_key_type = old_key->type;
1249 key = old_key;
1250 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001251 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001252 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1253 if (!key)
1254 return -ENOMEM;
1255 list_add(&key->list, &hdev->link_keys);
1256 }
1257
1258 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1259
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001260 /* Some buggy controller combinations generate a changed
1261 * combination key for legacy pairing even when there's no
1262 * previous key */
1263 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001264 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001265 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001266 if (conn)
1267 conn->key_type = type;
1268 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001269
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001270 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001271 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001272 key->pin_len = pin_len;
1273
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001274 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001275 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001276 else
1277 key->type = type;
1278
Johan Hedberg4df378a2011-04-28 11:29:03 -07001279 if (!new_key)
1280 return 0;
1281
1282 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1283
Johan Hedberg744cf192011-11-08 20:40:14 +02001284 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001285
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301286 if (conn)
1287 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001288
1289 return 0;
1290}
1291
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001292int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001293 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001294 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001295{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001296 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001297
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001298 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1299 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001300
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001301 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1302 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001303 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001304 else {
1305 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001306 if (!key)
1307 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001308 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001309 }
1310
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001311 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001312 key->bdaddr_type = addr_type;
1313 memcpy(key->val, tk, sizeof(key->val));
1314 key->authenticated = authenticated;
1315 key->ediv = ediv;
1316 key->enc_size = enc_size;
1317 key->type = type;
1318 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001319
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001320 if (!new_key)
1321 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001322
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001323 if (type & HCI_SMP_LTK)
1324 mgmt_new_ltk(hdev, key, 1);
1325
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001326 return 0;
1327}
1328
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001329int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1330{
1331 struct link_key *key;
1332
1333 key = hci_find_link_key(hdev, bdaddr);
1334 if (!key)
1335 return -ENOENT;
1336
1337 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1338
1339 list_del(&key->list);
1340 kfree(key);
1341
1342 return 0;
1343}
1344
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001345int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1346{
1347 struct smp_ltk *k, *tmp;
1348
1349 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1350 if (bacmp(bdaddr, &k->bdaddr))
1351 continue;
1352
1353 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1354
1355 list_del(&k->list);
1356 kfree(k);
1357 }
1358
1359 return 0;
1360}
1361
Ville Tervo6bd32322011-02-16 16:32:41 +02001362/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001363static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001364{
1365 struct hci_dev *hdev = (void *) arg;
1366
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001367 if (hdev->sent_cmd) {
1368 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1369 u16 opcode = __le16_to_cpu(sent->opcode);
1370
1371 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1372 } else {
1373 BT_ERR("%s command tx timeout", hdev->name);
1374 }
1375
Ville Tervo6bd32322011-02-16 16:32:41 +02001376 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001377 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001378}
1379
Szymon Janc2763eda2011-03-22 13:12:22 +01001380struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001381 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001382{
1383 struct oob_data *data;
1384
1385 list_for_each_entry(data, &hdev->remote_oob_data, list)
1386 if (bacmp(bdaddr, &data->bdaddr) == 0)
1387 return data;
1388
1389 return NULL;
1390}
1391
1392int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1393{
1394 struct oob_data *data;
1395
1396 data = hci_find_remote_oob_data(hdev, bdaddr);
1397 if (!data)
1398 return -ENOENT;
1399
1400 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1401
1402 list_del(&data->list);
1403 kfree(data);
1404
1405 return 0;
1406}
1407
1408int hci_remote_oob_data_clear(struct hci_dev *hdev)
1409{
1410 struct oob_data *data, *n;
1411
1412 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1413 list_del(&data->list);
1414 kfree(data);
1415 }
1416
1417 return 0;
1418}
1419
1420int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001421 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001422{
1423 struct oob_data *data;
1424
1425 data = hci_find_remote_oob_data(hdev, bdaddr);
1426
1427 if (!data) {
1428 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1429 if (!data)
1430 return -ENOMEM;
1431
1432 bacpy(&data->bdaddr, bdaddr);
1433 list_add(&data->list, &hdev->remote_oob_data);
1434 }
1435
1436 memcpy(data->hash, hash, sizeof(data->hash));
1437 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1438
1439 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1440
1441 return 0;
1442}
1443
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001444struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001445{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001446 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001447
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001448 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001449 if (bacmp(bdaddr, &b->bdaddr) == 0)
1450 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001451
1452 return NULL;
1453}
1454
1455int hci_blacklist_clear(struct hci_dev *hdev)
1456{
1457 struct list_head *p, *n;
1458
1459 list_for_each_safe(p, n, &hdev->blacklist) {
1460 struct bdaddr_list *b;
1461
1462 b = list_entry(p, struct bdaddr_list, list);
1463
1464 list_del(p);
1465 kfree(b);
1466 }
1467
1468 return 0;
1469}
1470
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001471int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001472{
1473 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001474
1475 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1476 return -EBADF;
1477
Antti Julku5e762442011-08-25 16:48:02 +03001478 if (hci_blacklist_lookup(hdev, bdaddr))
1479 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001480
1481 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001482 if (!entry)
1483 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001484
1485 bacpy(&entry->bdaddr, bdaddr);
1486
1487 list_add(&entry->list, &hdev->blacklist);
1488
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001489 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001490}
1491
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001492int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001493{
1494 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001495
Szymon Janc1ec918c2011-11-16 09:32:21 +01001496 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001497 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001498
1499 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001500 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001501 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001502
1503 list_del(&entry->list);
1504 kfree(entry);
1505
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001506 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001507}
1508
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001509static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1510{
1511 struct le_scan_params *param = (struct le_scan_params *) opt;
1512 struct hci_cp_le_set_scan_param cp;
1513
1514 memset(&cp, 0, sizeof(cp));
1515 cp.type = param->type;
1516 cp.interval = cpu_to_le16(param->interval);
1517 cp.window = cpu_to_le16(param->window);
1518
1519 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1520}
1521
1522static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1523{
1524 struct hci_cp_le_set_scan_enable cp;
1525
1526 memset(&cp, 0, sizeof(cp));
1527 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001528 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001529
1530 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1531}
1532
1533static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001534 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001535{
1536 long timeo = msecs_to_jiffies(3000);
1537 struct le_scan_params param;
1538 int err;
1539
1540 BT_DBG("%s", hdev->name);
1541
1542 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1543 return -EINPROGRESS;
1544
1545 param.type = type;
1546 param.interval = interval;
1547 param.window = window;
1548
1549 hci_req_lock(hdev);
1550
1551 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001552 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001553 if (!err)
1554 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1555
1556 hci_req_unlock(hdev);
1557
1558 if (err < 0)
1559 return err;
1560
1561 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001562 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001563
1564 return 0;
1565}
1566
Andre Guedes7dbfac12012-03-15 16:52:07 -03001567int hci_cancel_le_scan(struct hci_dev *hdev)
1568{
1569 BT_DBG("%s", hdev->name);
1570
1571 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1572 return -EALREADY;
1573
1574 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1575 struct hci_cp_le_set_scan_enable cp;
1576
1577 /* Send HCI command to disable LE Scan */
1578 memset(&cp, 0, sizeof(cp));
1579 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1580 }
1581
1582 return 0;
1583}
1584
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001585static void le_scan_disable_work(struct work_struct *work)
1586{
1587 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001588 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001589 struct hci_cp_le_set_scan_enable cp;
1590
1591 BT_DBG("%s", hdev->name);
1592
1593 memset(&cp, 0, sizeof(cp));
1594
1595 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1596}
1597
Andre Guedes28b75a82012-02-03 17:48:00 -03001598static void le_scan_work(struct work_struct *work)
1599{
1600 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1601 struct le_scan_params *param = &hdev->le_scan_params;
1602
1603 BT_DBG("%s", hdev->name);
1604
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001605 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1606 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001607}
1608
1609int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001610 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001611{
1612 struct le_scan_params *param = &hdev->le_scan_params;
1613
1614 BT_DBG("%s", hdev->name);
1615
1616 if (work_busy(&hdev->le_scan))
1617 return -EINPROGRESS;
1618
1619 param->type = type;
1620 param->interval = interval;
1621 param->window = window;
1622 param->timeout = timeout;
1623
1624 queue_work(system_long_wq, &hdev->le_scan);
1625
1626 return 0;
1627}
1628
David Herrmann9be0dab2012-04-22 14:39:57 +02001629/* Alloc HCI device */
1630struct hci_dev *hci_alloc_dev(void)
1631{
1632 struct hci_dev *hdev;
1633
1634 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1635 if (!hdev)
1636 return NULL;
1637
David Herrmannb1b813d2012-04-22 14:39:58 +02001638 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1639 hdev->esco_type = (ESCO_HV1);
1640 hdev->link_mode = (HCI_LM_ACCEPT);
1641 hdev->io_capability = 0x03; /* No Input No Output */
1642
David Herrmannb1b813d2012-04-22 14:39:58 +02001643 hdev->sniff_max_interval = 800;
1644 hdev->sniff_min_interval = 80;
1645
1646 mutex_init(&hdev->lock);
1647 mutex_init(&hdev->req_lock);
1648
1649 INIT_LIST_HEAD(&hdev->mgmt_pending);
1650 INIT_LIST_HEAD(&hdev->blacklist);
1651 INIT_LIST_HEAD(&hdev->uuids);
1652 INIT_LIST_HEAD(&hdev->link_keys);
1653 INIT_LIST_HEAD(&hdev->long_term_keys);
1654 INIT_LIST_HEAD(&hdev->remote_oob_data);
David Herrmannb1b813d2012-04-22 14:39:58 +02001655
1656 INIT_WORK(&hdev->rx_work, hci_rx_work);
1657 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1658 INIT_WORK(&hdev->tx_work, hci_tx_work);
1659 INIT_WORK(&hdev->power_on, hci_power_on);
1660 INIT_WORK(&hdev->le_scan, le_scan_work);
1661
David Herrmannb1b813d2012-04-22 14:39:58 +02001662 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1663 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1664 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1665
David Herrmann9be0dab2012-04-22 14:39:57 +02001666 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001667 skb_queue_head_init(&hdev->rx_q);
1668 skb_queue_head_init(&hdev->cmd_q);
1669 skb_queue_head_init(&hdev->raw_q);
1670
1671 init_waitqueue_head(&hdev->req_wait_q);
1672
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001673 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001674
David Herrmannb1b813d2012-04-22 14:39:58 +02001675 hci_init_sysfs(hdev);
1676 discovery_init(hdev);
1677 hci_conn_hash_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001678
1679 return hdev;
1680}
1681EXPORT_SYMBOL(hci_alloc_dev);
1682
1683/* Free HCI device */
1684void hci_free_dev(struct hci_dev *hdev)
1685{
1686 skb_queue_purge(&hdev->driver_init);
1687
1688 /* will free via device release */
1689 put_device(&hdev->dev);
1690}
1691EXPORT_SYMBOL(hci_free_dev);
1692
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693/* Register HCI device */
1694int hci_register_dev(struct hci_dev *hdev)
1695{
David Herrmannb1b813d2012-04-22 14:39:58 +02001696 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697
David Herrmann010666a2012-01-07 15:47:07 +01001698 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699 return -EINVAL;
1700
Mat Martineau08add512011-11-02 16:18:36 -07001701 /* Do not allow HCI_AMP devices to register at index 0,
1702 * so the index can be used as the AMP controller ID.
1703 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001704 switch (hdev->dev_type) {
1705 case HCI_BREDR:
1706 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1707 break;
1708 case HCI_AMP:
1709 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1710 break;
1711 default:
1712 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001714
Sasha Levin3df92b32012-05-27 22:36:56 +02001715 if (id < 0)
1716 return id;
1717
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 sprintf(hdev->name, "hci%d", id);
1719 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001720
1721 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1722
Sasha Levin3df92b32012-05-27 22:36:56 +02001723 write_lock(&hci_dev_list_lock);
1724 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001725 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001727 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001728 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001729 if (!hdev->workqueue) {
1730 error = -ENOMEM;
1731 goto err;
1732 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001733
David Herrmann33ca9542011-10-08 14:58:49 +02001734 error = hci_add_sysfs(hdev);
1735 if (error < 0)
1736 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001738 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001739 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1740 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001741 if (hdev->rfkill) {
1742 if (rfkill_register(hdev->rfkill) < 0) {
1743 rfkill_destroy(hdev->rfkill);
1744 hdev->rfkill = NULL;
1745 }
1746 }
1747
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001748 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001749
1750 if (hdev->dev_type != HCI_AMP)
1751 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1752
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001753 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001754
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001756 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
1758 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001759
David Herrmann33ca9542011-10-08 14:58:49 +02001760err_wqueue:
1761 destroy_workqueue(hdev->workqueue);
1762err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001763 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001764 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001765 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001766 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001767
David Herrmann33ca9542011-10-08 14:58:49 +02001768 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769}
1770EXPORT_SYMBOL(hci_register_dev);
1771
1772/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001773void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001774{
Sasha Levin3df92b32012-05-27 22:36:56 +02001775 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001776
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001777 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Johan Hovold94324962012-03-15 14:48:41 +01001779 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1780
Sasha Levin3df92b32012-05-27 22:36:56 +02001781 id = hdev->id;
1782
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001783 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001785 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001786
1787 hci_dev_do_close(hdev);
1788
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301789 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001790 kfree_skb(hdev->reassembly[i]);
1791
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001792 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001793 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001794 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001795 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001796 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001797 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001798
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001799 /* mgmt_index_removed should take care of emptying the
1800 * pending list */
1801 BUG_ON(!list_empty(&hdev->mgmt_pending));
1802
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803 hci_notify(hdev, HCI_DEV_UNREG);
1804
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001805 if (hdev->rfkill) {
1806 rfkill_unregister(hdev->rfkill);
1807 rfkill_destroy(hdev->rfkill);
1808 }
1809
David Herrmannce242972011-10-08 14:58:48 +02001810 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001811
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001812 destroy_workqueue(hdev->workqueue);
1813
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001814 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001815 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001816 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001817 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001818 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001819 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001820 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001821
David Herrmanndc946bd2012-01-07 15:47:24 +01001822 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001823
1824 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001825}
1826EXPORT_SYMBOL(hci_unregister_dev);
1827
1828/* Suspend HCI device */
1829int hci_suspend_dev(struct hci_dev *hdev)
1830{
1831 hci_notify(hdev, HCI_DEV_SUSPEND);
1832 return 0;
1833}
1834EXPORT_SYMBOL(hci_suspend_dev);
1835
1836/* Resume HCI device */
1837int hci_resume_dev(struct hci_dev *hdev)
1838{
1839 hci_notify(hdev, HCI_DEV_RESUME);
1840 return 0;
1841}
1842EXPORT_SYMBOL(hci_resume_dev);
1843
Marcel Holtmann76bca882009-11-18 00:40:39 +01001844/* Receive frame from HCI drivers */
1845int hci_recv_frame(struct sk_buff *skb)
1846{
1847 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1848 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001849 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001850 kfree_skb(skb);
1851 return -ENXIO;
1852 }
1853
1854 /* Incomming skb */
1855 bt_cb(skb)->incoming = 1;
1856
1857 /* Time stamp */
1858 __net_timestamp(skb);
1859
Marcel Holtmann76bca882009-11-18 00:40:39 +01001860 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001861 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001862
Marcel Holtmann76bca882009-11-18 00:40:39 +01001863 return 0;
1864}
1865EXPORT_SYMBOL(hci_recv_frame);
1866
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301867static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001868 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301869{
1870 int len = 0;
1871 int hlen = 0;
1872 int remain = count;
1873 struct sk_buff *skb;
1874 struct bt_skb_cb *scb;
1875
1876 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001877 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301878 return -EILSEQ;
1879
1880 skb = hdev->reassembly[index];
1881
1882 if (!skb) {
1883 switch (type) {
1884 case HCI_ACLDATA_PKT:
1885 len = HCI_MAX_FRAME_SIZE;
1886 hlen = HCI_ACL_HDR_SIZE;
1887 break;
1888 case HCI_EVENT_PKT:
1889 len = HCI_MAX_EVENT_SIZE;
1890 hlen = HCI_EVENT_HDR_SIZE;
1891 break;
1892 case HCI_SCODATA_PKT:
1893 len = HCI_MAX_SCO_SIZE;
1894 hlen = HCI_SCO_HDR_SIZE;
1895 break;
1896 }
1897
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001898 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301899 if (!skb)
1900 return -ENOMEM;
1901
1902 scb = (void *) skb->cb;
1903 scb->expect = hlen;
1904 scb->pkt_type = type;
1905
1906 skb->dev = (void *) hdev;
1907 hdev->reassembly[index] = skb;
1908 }
1909
1910 while (count) {
1911 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001912 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301913
1914 memcpy(skb_put(skb, len), data, len);
1915
1916 count -= len;
1917 data += len;
1918 scb->expect -= len;
1919 remain = count;
1920
1921 switch (type) {
1922 case HCI_EVENT_PKT:
1923 if (skb->len == HCI_EVENT_HDR_SIZE) {
1924 struct hci_event_hdr *h = hci_event_hdr(skb);
1925 scb->expect = h->plen;
1926
1927 if (skb_tailroom(skb) < scb->expect) {
1928 kfree_skb(skb);
1929 hdev->reassembly[index] = NULL;
1930 return -ENOMEM;
1931 }
1932 }
1933 break;
1934
1935 case HCI_ACLDATA_PKT:
1936 if (skb->len == HCI_ACL_HDR_SIZE) {
1937 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1938 scb->expect = __le16_to_cpu(h->dlen);
1939
1940 if (skb_tailroom(skb) < scb->expect) {
1941 kfree_skb(skb);
1942 hdev->reassembly[index] = NULL;
1943 return -ENOMEM;
1944 }
1945 }
1946 break;
1947
1948 case HCI_SCODATA_PKT:
1949 if (skb->len == HCI_SCO_HDR_SIZE) {
1950 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1951 scb->expect = h->dlen;
1952
1953 if (skb_tailroom(skb) < scb->expect) {
1954 kfree_skb(skb);
1955 hdev->reassembly[index] = NULL;
1956 return -ENOMEM;
1957 }
1958 }
1959 break;
1960 }
1961
1962 if (scb->expect == 0) {
1963 /* Complete frame */
1964
1965 bt_cb(skb)->pkt_type = type;
1966 hci_recv_frame(skb);
1967
1968 hdev->reassembly[index] = NULL;
1969 return remain;
1970 }
1971 }
1972
1973 return remain;
1974}
1975
Marcel Holtmannef222012007-07-11 06:42:04 +02001976int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1977{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301978 int rem = 0;
1979
Marcel Holtmannef222012007-07-11 06:42:04 +02001980 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1981 return -EILSEQ;
1982
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001983 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001984 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301985 if (rem < 0)
1986 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001987
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301988 data += (count - rem);
1989 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001990 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001991
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301992 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001993}
1994EXPORT_SYMBOL(hci_recv_fragment);
1995
Suraj Sumangala99811512010-07-14 13:02:19 +05301996#define STREAM_REASSEMBLY 0
1997
1998int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1999{
2000 int type;
2001 int rem = 0;
2002
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002003 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302004 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2005
2006 if (!skb) {
2007 struct { char type; } *pkt;
2008
2009 /* Start of the frame */
2010 pkt = data;
2011 type = pkt->type;
2012
2013 data++;
2014 count--;
2015 } else
2016 type = bt_cb(skb)->pkt_type;
2017
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002018 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002019 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302020 if (rem < 0)
2021 return rem;
2022
2023 data += (count - rem);
2024 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002025 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302026
2027 return rem;
2028}
2029EXPORT_SYMBOL(hci_recv_stream_fragment);
2030
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031/* ---- Interface to upper protocols ---- */
2032
Linus Torvalds1da177e2005-04-16 15:20:36 -07002033int hci_register_cb(struct hci_cb *cb)
2034{
2035 BT_DBG("%p name %s", cb, cb->name);
2036
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002037 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002038 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002039 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002040
2041 return 0;
2042}
2043EXPORT_SYMBOL(hci_register_cb);
2044
2045int hci_unregister_cb(struct hci_cb *cb)
2046{
2047 BT_DBG("%p name %s", cb, cb->name);
2048
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002049 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002051 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002052
2053 return 0;
2054}
2055EXPORT_SYMBOL(hci_unregister_cb);
2056
2057static int hci_send_frame(struct sk_buff *skb)
2058{
2059 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2060
2061 if (!hdev) {
2062 kfree_skb(skb);
2063 return -ENODEV;
2064 }
2065
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002066 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002068 /* Time stamp */
2069 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002071 /* Send copy to monitor */
2072 hci_send_to_monitor(hdev, skb);
2073
2074 if (atomic_read(&hdev->promisc)) {
2075 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002076 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 }
2078
2079 /* Get rid of skb owner, prior to sending to the driver. */
2080 skb_orphan(skb);
2081
2082 return hdev->send(skb);
2083}
2084
2085/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002086int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087{
2088 int len = HCI_COMMAND_HDR_SIZE + plen;
2089 struct hci_command_hdr *hdr;
2090 struct sk_buff *skb;
2091
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002092 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093
2094 skb = bt_skb_alloc(len, GFP_ATOMIC);
2095 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002096 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097 return -ENOMEM;
2098 }
2099
2100 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002101 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102 hdr->plen = plen;
2103
2104 if (plen)
2105 memcpy(skb_put(skb, plen), param, plen);
2106
2107 BT_DBG("skb len %d", skb->len);
2108
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002109 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002111
Johan Hedberga5040ef2011-01-10 13:28:59 +02002112 if (test_bit(HCI_INIT, &hdev->flags))
2113 hdev->init_last_cmd = opcode;
2114
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002116 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117
2118 return 0;
2119}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120
2121/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002122void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123{
2124 struct hci_command_hdr *hdr;
2125
2126 if (!hdev->sent_cmd)
2127 return NULL;
2128
2129 hdr = (void *) hdev->sent_cmd->data;
2130
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002131 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132 return NULL;
2133
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002134 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2137}
2138
2139/* Send ACL data */
2140static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2141{
2142 struct hci_acl_hdr *hdr;
2143 int len = skb->len;
2144
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002145 skb_push(skb, HCI_ACL_HDR_SIZE);
2146 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002147 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002148 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2149 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150}
2151
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002152static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002153 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154{
2155 struct hci_dev *hdev = conn->hdev;
2156 struct sk_buff *list;
2157
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002158 skb->len = skb_headlen(skb);
2159 skb->data_len = 0;
2160
2161 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2162 hci_add_acl_hdr(skb, conn->handle, flags);
2163
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002164 list = skb_shinfo(skb)->frag_list;
2165 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166 /* Non fragmented */
2167 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2168
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002169 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002170 } else {
2171 /* Fragmented */
2172 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2173
2174 skb_shinfo(skb)->frag_list = NULL;
2175
2176 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002177 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002179 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002180
2181 flags &= ~ACL_START;
2182 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 do {
2184 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002185
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002187 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002188 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189
2190 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2191
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002192 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193 } while (list);
2194
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002195 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002197}
2198
2199void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2200{
2201 struct hci_conn *conn = chan->conn;
2202 struct hci_dev *hdev = conn->hdev;
2203
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002204 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002205
2206 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002207
2208 hci_queue_acl(conn, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002210 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002214void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
2216 struct hci_dev *hdev = conn->hdev;
2217 struct hci_sco_hdr hdr;
2218
2219 BT_DBG("%s len %d", hdev->name, skb->len);
2220
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002221 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222 hdr.dlen = skb->len;
2223
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002224 skb_push(skb, HCI_SCO_HDR_SIZE);
2225 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002226 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227
2228 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002229 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002230
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002232 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002234
2235/* ---- HCI TX task (outgoing data) ---- */
2236
2237/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002238static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2239 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002240{
2241 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002242 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002243 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002245 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002247
2248 rcu_read_lock();
2249
2250 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002251 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002252 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002253
2254 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2255 continue;
2256
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 num++;
2258
2259 if (c->sent < min) {
2260 min = c->sent;
2261 conn = c;
2262 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002263
2264 if (hci_conn_num(hdev, type) == num)
2265 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002266 }
2267
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002268 rcu_read_unlock();
2269
Linus Torvalds1da177e2005-04-16 15:20:36 -07002270 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002271 int cnt, q;
2272
2273 switch (conn->type) {
2274 case ACL_LINK:
2275 cnt = hdev->acl_cnt;
2276 break;
2277 case SCO_LINK:
2278 case ESCO_LINK:
2279 cnt = hdev->sco_cnt;
2280 break;
2281 case LE_LINK:
2282 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2283 break;
2284 default:
2285 cnt = 0;
2286 BT_ERR("Unknown link type");
2287 }
2288
2289 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002290 *quote = q ? q : 1;
2291 } else
2292 *quote = 0;
2293
2294 BT_DBG("conn %p quote %d", conn, *quote);
2295 return conn;
2296}
2297
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002298static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299{
2300 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002301 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
Ville Tervobae1f5d92011-02-10 22:38:53 -03002303 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002305 rcu_read_lock();
2306
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002308 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002309 if (c->type == type && c->sent) {
2310 BT_ERR("%s killing stalled connection %s",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002311 hdev->name, batostr(&c->dst));
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002312 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002313 }
2314 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002315
2316 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317}
2318
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002319static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2320 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002321{
2322 struct hci_conn_hash *h = &hdev->conn_hash;
2323 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002324 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002325 struct hci_conn *conn;
2326 int cnt, q, conn_num = 0;
2327
2328 BT_DBG("%s", hdev->name);
2329
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002330 rcu_read_lock();
2331
2332 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002333 struct hci_chan *tmp;
2334
2335 if (conn->type != type)
2336 continue;
2337
2338 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2339 continue;
2340
2341 conn_num++;
2342
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002343 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002344 struct sk_buff *skb;
2345
2346 if (skb_queue_empty(&tmp->data_q))
2347 continue;
2348
2349 skb = skb_peek(&tmp->data_q);
2350 if (skb->priority < cur_prio)
2351 continue;
2352
2353 if (skb->priority > cur_prio) {
2354 num = 0;
2355 min = ~0;
2356 cur_prio = skb->priority;
2357 }
2358
2359 num++;
2360
2361 if (conn->sent < min) {
2362 min = conn->sent;
2363 chan = tmp;
2364 }
2365 }
2366
2367 if (hci_conn_num(hdev, type) == conn_num)
2368 break;
2369 }
2370
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002371 rcu_read_unlock();
2372
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002373 if (!chan)
2374 return NULL;
2375
2376 switch (chan->conn->type) {
2377 case ACL_LINK:
2378 cnt = hdev->acl_cnt;
2379 break;
2380 case SCO_LINK:
2381 case ESCO_LINK:
2382 cnt = hdev->sco_cnt;
2383 break;
2384 case LE_LINK:
2385 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2386 break;
2387 default:
2388 cnt = 0;
2389 BT_ERR("Unknown link type");
2390 }
2391
2392 q = cnt / num;
2393 *quote = q ? q : 1;
2394 BT_DBG("chan %p quote %d", chan, *quote);
2395 return chan;
2396}
2397
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002398static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2399{
2400 struct hci_conn_hash *h = &hdev->conn_hash;
2401 struct hci_conn *conn;
2402 int num = 0;
2403
2404 BT_DBG("%s", hdev->name);
2405
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002406 rcu_read_lock();
2407
2408 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002409 struct hci_chan *chan;
2410
2411 if (conn->type != type)
2412 continue;
2413
2414 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2415 continue;
2416
2417 num++;
2418
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002419 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002420 struct sk_buff *skb;
2421
2422 if (chan->sent) {
2423 chan->sent = 0;
2424 continue;
2425 }
2426
2427 if (skb_queue_empty(&chan->data_q))
2428 continue;
2429
2430 skb = skb_peek(&chan->data_q);
2431 if (skb->priority >= HCI_PRIO_MAX - 1)
2432 continue;
2433
2434 skb->priority = HCI_PRIO_MAX - 1;
2435
2436 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002437 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002438 }
2439
2440 if (hci_conn_num(hdev, type) == num)
2441 break;
2442 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002443
2444 rcu_read_unlock();
2445
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002446}
2447
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002448static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2449{
2450 /* Calculate count of blocks used by this packet */
2451 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2452}
2453
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002454static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002455{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002456 if (!test_bit(HCI_RAW, &hdev->flags)) {
2457 /* ACL tx timeout must be longer than maximum
2458 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002459 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002460 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002461 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002462 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002463}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002464
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002465static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002466{
2467 unsigned int cnt = hdev->acl_cnt;
2468 struct hci_chan *chan;
2469 struct sk_buff *skb;
2470 int quote;
2471
2472 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002473
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002474 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002475 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002476 u32 priority = (skb_peek(&chan->data_q))->priority;
2477 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002478 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002479 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002480
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002481 /* Stop if priority has changed */
2482 if (skb->priority < priority)
2483 break;
2484
2485 skb = skb_dequeue(&chan->data_q);
2486
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002487 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002488 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002489
Linus Torvalds1da177e2005-04-16 15:20:36 -07002490 hci_send_frame(skb);
2491 hdev->acl_last_tx = jiffies;
2492
2493 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002494 chan->sent++;
2495 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002496 }
2497 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002498
2499 if (cnt != hdev->acl_cnt)
2500 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002501}
2502
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002503static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002504{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002505 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002506 struct hci_chan *chan;
2507 struct sk_buff *skb;
2508 int quote;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002510 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002511
2512 while (hdev->block_cnt > 0 &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002513 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002514 u32 priority = (skb_peek(&chan->data_q))->priority;
2515 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2516 int blocks;
2517
2518 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002519 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002520
2521 /* Stop if priority has changed */
2522 if (skb->priority < priority)
2523 break;
2524
2525 skb = skb_dequeue(&chan->data_q);
2526
2527 blocks = __get_blocks(hdev, skb);
2528 if (blocks > hdev->block_cnt)
2529 return;
2530
2531 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002532 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002533
2534 hci_send_frame(skb);
2535 hdev->acl_last_tx = jiffies;
2536
2537 hdev->block_cnt -= blocks;
2538 quote -= blocks;
2539
2540 chan->sent += blocks;
2541 chan->conn->sent += blocks;
2542 }
2543 }
2544
2545 if (cnt != hdev->block_cnt)
2546 hci_prio_recalculate(hdev, ACL_LINK);
2547}
2548
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002549static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002550{
2551 BT_DBG("%s", hdev->name);
2552
2553 if (!hci_conn_num(hdev, ACL_LINK))
2554 return;
2555
2556 switch (hdev->flow_ctl_mode) {
2557 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2558 hci_sched_acl_pkt(hdev);
2559 break;
2560
2561 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2562 hci_sched_acl_blk(hdev);
2563 break;
2564 }
2565}
2566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002568static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569{
2570 struct hci_conn *conn;
2571 struct sk_buff *skb;
2572 int quote;
2573
2574 BT_DBG("%s", hdev->name);
2575
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002576 if (!hci_conn_num(hdev, SCO_LINK))
2577 return;
2578
Linus Torvalds1da177e2005-04-16 15:20:36 -07002579 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2580 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2581 BT_DBG("skb %p len %d", skb, skb->len);
2582 hci_send_frame(skb);
2583
2584 conn->sent++;
2585 if (conn->sent == ~0)
2586 conn->sent = 0;
2587 }
2588 }
2589}
2590
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002591static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002592{
2593 struct hci_conn *conn;
2594 struct sk_buff *skb;
2595 int quote;
2596
2597 BT_DBG("%s", hdev->name);
2598
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002599 if (!hci_conn_num(hdev, ESCO_LINK))
2600 return;
2601
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002602 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2603 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002604 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2605 BT_DBG("skb %p len %d", skb, skb->len);
2606 hci_send_frame(skb);
2607
2608 conn->sent++;
2609 if (conn->sent == ~0)
2610 conn->sent = 0;
2611 }
2612 }
2613}
2614
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002615static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002616{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002617 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002618 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002619 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002620
2621 BT_DBG("%s", hdev->name);
2622
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002623 if (!hci_conn_num(hdev, LE_LINK))
2624 return;
2625
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002626 if (!test_bit(HCI_RAW, &hdev->flags)) {
2627 /* LE tx timeout must be longer than maximum
2628 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002629 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002630 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002631 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002632 }
2633
2634 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002635 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002636 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002637 u32 priority = (skb_peek(&chan->data_q))->priority;
2638 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002639 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002640 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002641
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002642 /* Stop if priority has changed */
2643 if (skb->priority < priority)
2644 break;
2645
2646 skb = skb_dequeue(&chan->data_q);
2647
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648 hci_send_frame(skb);
2649 hdev->le_last_tx = jiffies;
2650
2651 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002652 chan->sent++;
2653 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002654 }
2655 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002656
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002657 if (hdev->le_pkts)
2658 hdev->le_cnt = cnt;
2659 else
2660 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002661
2662 if (cnt != tmp)
2663 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002664}
2665
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002666static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002668 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 struct sk_buff *skb;
2670
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002671 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002672 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673
2674 /* Schedule queues and send stuff to HCI driver */
2675
2676 hci_sched_acl(hdev);
2677
2678 hci_sched_sco(hdev);
2679
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002680 hci_sched_esco(hdev);
2681
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002682 hci_sched_le(hdev);
2683
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684 /* Send next queued raw (unknown type) packet */
2685 while ((skb = skb_dequeue(&hdev->raw_q)))
2686 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687}
2688
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002689/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
2691/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002692static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002693{
2694 struct hci_acl_hdr *hdr = (void *) skb->data;
2695 struct hci_conn *conn;
2696 __u16 handle, flags;
2697
2698 skb_pull(skb, HCI_ACL_HDR_SIZE);
2699
2700 handle = __le16_to_cpu(hdr->handle);
2701 flags = hci_flags(handle);
2702 handle = hci_handle(handle);
2703
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002704 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002705 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002706
2707 hdev->stat.acl_rx++;
2708
2709 hci_dev_lock(hdev);
2710 conn = hci_conn_hash_lookup_handle(hdev, handle);
2711 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002712
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002714 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002715
Johan Hedberg671267b2012-05-12 16:11:50 -03002716 hci_dev_lock(hdev);
2717 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2718 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2719 mgmt_device_connected(hdev, &conn->dst, conn->type,
2720 conn->dst_type, 0, NULL, 0,
2721 conn->dev_class);
2722 hci_dev_unlock(hdev);
2723
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002725 l2cap_recv_acldata(conn, skb, flags);
2726 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002728 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002729 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730 }
2731
2732 kfree_skb(skb);
2733}
2734
2735/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002736static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737{
2738 struct hci_sco_hdr *hdr = (void *) skb->data;
2739 struct hci_conn *conn;
2740 __u16 handle;
2741
2742 skb_pull(skb, HCI_SCO_HDR_SIZE);
2743
2744 handle = __le16_to_cpu(hdr->handle);
2745
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002746 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
2748 hdev->stat.sco_rx++;
2749
2750 hci_dev_lock(hdev);
2751 conn = hci_conn_hash_lookup_handle(hdev, handle);
2752 hci_dev_unlock(hdev);
2753
2754 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002756 sco_recv_scodata(conn, skb);
2757 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002759 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002760 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761 }
2762
2763 kfree_skb(skb);
2764}
2765
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002766static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002768 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769 struct sk_buff *skb;
2770
2771 BT_DBG("%s", hdev->name);
2772
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002774 /* Send copy to monitor */
2775 hci_send_to_monitor(hdev, skb);
2776
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777 if (atomic_read(&hdev->promisc)) {
2778 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002779 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780 }
2781
2782 if (test_bit(HCI_RAW, &hdev->flags)) {
2783 kfree_skb(skb);
2784 continue;
2785 }
2786
2787 if (test_bit(HCI_INIT, &hdev->flags)) {
2788 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002789 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 case HCI_ACLDATA_PKT:
2791 case HCI_SCODATA_PKT:
2792 kfree_skb(skb);
2793 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002794 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 }
2796
2797 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002798 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002800 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002801 hci_event_packet(hdev, skb);
2802 break;
2803
2804 case HCI_ACLDATA_PKT:
2805 BT_DBG("%s ACL data packet", hdev->name);
2806 hci_acldata_packet(hdev, skb);
2807 break;
2808
2809 case HCI_SCODATA_PKT:
2810 BT_DBG("%s SCO data packet", hdev->name);
2811 hci_scodata_packet(hdev, skb);
2812 break;
2813
2814 default:
2815 kfree_skb(skb);
2816 break;
2817 }
2818 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819}
2820
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002821static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002823 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 struct sk_buff *skb;
2825
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002826 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2827 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002830 if (atomic_read(&hdev->cmd_cnt)) {
2831 skb = skb_dequeue(&hdev->cmd_q);
2832 if (!skb)
2833 return;
2834
Wei Yongjun7585b972009-02-25 18:29:52 +08002835 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002837 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2838 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 atomic_dec(&hdev->cmd_cnt);
2840 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002841 if (test_bit(HCI_RESET, &hdev->flags))
2842 del_timer(&hdev->cmd_timer);
2843 else
2844 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002845 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846 } else {
2847 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002848 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002849 }
2850 }
2851}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002852
2853int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2854{
2855 /* General inquiry access code (GIAC) */
2856 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2857 struct hci_cp_inquiry cp;
2858
2859 BT_DBG("%s", hdev->name);
2860
2861 if (test_bit(HCI_INQUIRY, &hdev->flags))
2862 return -EINPROGRESS;
2863
Johan Hedberg46632622012-01-02 16:06:08 +02002864 inquiry_cache_flush(hdev);
2865
Andre Guedes2519a1f2011-11-07 11:45:24 -03002866 memset(&cp, 0, sizeof(cp));
2867 memcpy(&cp.lap, lap, sizeof(cp.lap));
2868 cp.length = length;
2869
2870 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2871}
Andre Guedes023d50492011-11-04 14:16:52 -03002872
2873int hci_cancel_inquiry(struct hci_dev *hdev)
2874{
2875 BT_DBG("%s", hdev->name);
2876
2877 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002878 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002879
2880 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2881}
Andre Guedes31f79562012-04-24 21:02:53 -03002882
2883u8 bdaddr_to_le(u8 bdaddr_type)
2884{
2885 switch (bdaddr_type) {
2886 case BDADDR_LE_PUBLIC:
2887 return ADDR_LE_DEV_PUBLIC;
2888
2889 default:
2890 /* Fallback to LE Random address type */
2891 return ADDR_LE_DEV_RANDOM;
2892 }
2893}