blob: 551df8a6f983ef08df1be23db96b7d9a421d8a04 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200110static int __hci_req_sync(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Johan Hedberg01178cd2013-03-05 20:37:41 +0200153static int hci_req_sync(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200164 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200186 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void amp_init(struct hci_dev *hdev)
191{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200267 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
Marcel Holtmanna418b892008-11-30 12:17:28 +0100275 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900281/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200285 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200304
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
Andre Guedes6fbe1952012-02-03 17:47:58 -0300309 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300310 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300311 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200312 return true;
313
Andre Guedes6fbe1952012-02-03 17:47:58 -0300314 default:
315 return false;
316 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200317}
318
Johan Hedbergff9ef572012-01-04 14:23:45 +0200319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200330 break;
331 case DISCOVERY_STARTING:
332 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300333 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200334 mgmt_discovering(hdev, 1);
335 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200336 case DISCOVERY_RESOLVING:
337 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
Johan Hedberg30883512012-01-04 14:16:21 +0200347 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200348 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Johan Hedberg561aafb2012-01-04 13:31:59 +0200350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200352 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Johan Hedberg30883512012-01-04 14:16:21 +0200362 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct inquiry_entry *e;
364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300365 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200369 return e;
370 }
371
372 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300376 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200377{
Johan Hedberg30883512012-01-04 14:16:21 +0200378 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 struct inquiry_entry *e;
380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300381 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300392 bdaddr_t *bdaddr,
393 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
Johan Hedberga3d4e202012-01-09 00:53:02 +0200410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300411 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
Johan Hedberg31754052012-01-04 13:39:52 +0200429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300430 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Johan Hedberg30883512012-01-04 14:16:21 +0200432 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200433 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Szymon Janc2b2fec42012-11-20 11:38:54 +0100437 hci_remove_remote_oob_data(hdev, &data->bdaddr);
438
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200439 if (ssp)
440 *ssp = data->ssp_mode;
441
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200442 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200443 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200444 if (ie->data.ssp_mode && ssp)
445 *ssp = true;
446
Johan Hedberga3d4e202012-01-09 00:53:02 +0200447 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300448 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200449 ie->data.rssi = data->rssi;
450 hci_inquiry_cache_update_resolve(hdev, ie);
451 }
452
Johan Hedberg561aafb2012-01-04 13:31:59 +0200453 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200454 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200455
Johan Hedberg561aafb2012-01-04 13:31:59 +0200456 /* Entry not in the cache. Add new one. */
457 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
458 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200459 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200460
461 list_add(&ie->all, &cache->all);
462
463 if (name_known) {
464 ie->name_state = NAME_KNOWN;
465 } else {
466 ie->name_state = NAME_NOT_KNOWN;
467 list_add(&ie->list, &cache->unknown);
468 }
469
470update:
471 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300472 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200473 ie->name_state = NAME_KNOWN;
474 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 }
476
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200477 memcpy(&ie->data, data, sizeof(*data));
478 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200480
481 if (ie->name_state == NAME_NOT_KNOWN)
482 return false;
483
484 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486
487static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
488{
Johan Hedberg30883512012-01-04 14:16:21 +0200489 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 struct inquiry_info *info = (struct inquiry_info *) buf;
491 struct inquiry_entry *e;
492 int copied = 0;
493
Johan Hedberg561aafb2012-01-04 13:31:59 +0200494 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200496
497 if (copied >= num)
498 break;
499
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 bacpy(&info->bdaddr, &data->bdaddr);
501 info->pscan_rep_mode = data->pscan_rep_mode;
502 info->pscan_period_mode = data->pscan_period_mode;
503 info->pscan_mode = data->pscan_mode;
504 memcpy(info->dev_class, data->dev_class, 3);
505 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200508 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 }
510
511 BT_DBG("cache %p, copied %d", cache, copied);
512 return copied;
513}
514
515static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
516{
517 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
518 struct hci_cp_inquiry cp;
519
520 BT_DBG("%s", hdev->name);
521
522 if (test_bit(HCI_INQUIRY, &hdev->flags))
523 return;
524
525 /* Start Inquiry */
526 memcpy(&cp.lap, &ir->lap, 3);
527 cp.length = ir->length;
528 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200529 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530}
531
532int hci_inquiry(void __user *arg)
533{
534 __u8 __user *ptr = arg;
535 struct hci_inquiry_req ir;
536 struct hci_dev *hdev;
537 int err = 0, do_inquiry = 0, max_rsp;
538 long timeo;
539 __u8 *buf;
540
541 if (copy_from_user(&ir, ptr, sizeof(ir)))
542 return -EFAULT;
543
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200544 hdev = hci_dev_get(ir.dev_id);
545 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 return -ENODEV;
547
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300548 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900549 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300550 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 inquiry_cache_flush(hdev);
552 do_inquiry = 1;
553 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300554 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555
Marcel Holtmann04837f62006-07-03 10:02:33 +0200556 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200557
558 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200559 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
560 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200561 if (err < 0)
562 goto done;
563 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300565 /* for unlimited number of responses we will use buffer with
566 * 255 entries
567 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
569
570 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
571 * copy it to the user space.
572 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100573 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200574 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 err = -ENOMEM;
576 goto done;
577 }
578
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300579 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300581 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583 BT_DBG("num_rsp %d", ir.num_rsp);
584
585 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
586 ptr += sizeof(ir);
587 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300588 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900590 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 err = -EFAULT;
592
593 kfree(buf);
594
595done:
596 hci_dev_put(hdev);
597 return err;
598}
599
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100600static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
601{
602 u8 ad_len = 0, flags = 0;
603 size_t name_len;
604
605 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
606 flags |= LE_AD_GENERAL;
607
608 if (!lmp_bredr_capable(hdev))
609 flags |= LE_AD_NO_BREDR;
610
611 if (lmp_le_br_capable(hdev))
612 flags |= LE_AD_SIM_LE_BREDR_CTRL;
613
614 if (lmp_host_le_br_capable(hdev))
615 flags |= LE_AD_SIM_LE_BREDR_HOST;
616
617 if (flags) {
618 BT_DBG("adv flags 0x%02x", flags);
619
620 ptr[0] = 2;
621 ptr[1] = EIR_FLAGS;
622 ptr[2] = flags;
623
624 ad_len += 3;
625 ptr += 3;
626 }
627
628 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
629 ptr[0] = 2;
630 ptr[1] = EIR_TX_POWER;
631 ptr[2] = (u8) hdev->adv_tx_power;
632
633 ad_len += 3;
634 ptr += 3;
635 }
636
637 name_len = strlen(hdev->dev_name);
638 if (name_len > 0) {
639 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
640
641 if (name_len > max_len) {
642 name_len = max_len;
643 ptr[1] = EIR_NAME_SHORT;
644 } else
645 ptr[1] = EIR_NAME_COMPLETE;
646
647 ptr[0] = name_len + 1;
648
649 memcpy(ptr + 2, hdev->dev_name, name_len);
650
651 ad_len += (name_len + 2);
652 ptr += (name_len + 2);
653 }
654
655 return ad_len;
656}
657
658int hci_update_ad(struct hci_dev *hdev)
659{
660 struct hci_cp_le_set_adv_data cp;
661 u8 len;
662 int err;
663
664 hci_dev_lock(hdev);
665
666 if (!lmp_le_capable(hdev)) {
667 err = -EINVAL;
668 goto unlock;
669 }
670
671 memset(&cp, 0, sizeof(cp));
672
673 len = create_ad(hdev, cp.data);
674
675 if (hdev->adv_data_len == len &&
676 memcmp(cp.data, hdev->adv_data, len) == 0) {
677 err = 0;
678 goto unlock;
679 }
680
681 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
682 hdev->adv_data_len = len;
683
684 cp.length = len;
685 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
686
687unlock:
688 hci_dev_unlock(hdev);
689
690 return err;
691}
692
Linus Torvalds1da177e2005-04-16 15:20:36 -0700693/* ---- HCI ioctl helpers ---- */
694
695int hci_dev_open(__u16 dev)
696{
697 struct hci_dev *hdev;
698 int ret = 0;
699
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200700 hdev = hci_dev_get(dev);
701 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700702 return -ENODEV;
703
704 BT_DBG("%s %p", hdev->name, hdev);
705
706 hci_req_lock(hdev);
707
Johan Hovold94324962012-03-15 14:48:41 +0100708 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
709 ret = -ENODEV;
710 goto done;
711 }
712
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200713 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
714 ret = -ERFKILL;
715 goto done;
716 }
717
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 if (test_bit(HCI_UP, &hdev->flags)) {
719 ret = -EALREADY;
720 goto done;
721 }
722
723 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
724 set_bit(HCI_RAW, &hdev->flags);
725
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200726 /* Treat all non BR/EDR controllers as raw devices if
727 enable_hs is not set */
728 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100729 set_bit(HCI_RAW, &hdev->flags);
730
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 if (hdev->open(hdev)) {
732 ret = -EIO;
733 goto done;
734 }
735
736 if (!test_bit(HCI_RAW, &hdev->flags)) {
737 atomic_set(&hdev->cmd_cnt, 1);
738 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200739 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700740
Johan Hedberg01178cd2013-03-05 20:37:41 +0200741 ret = __hci_req_sync(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742
743 clear_bit(HCI_INIT, &hdev->flags);
744 }
745
746 if (!ret) {
747 hci_dev_hold(hdev);
748 set_bit(HCI_UP, &hdev->flags);
749 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100750 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300751 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
752 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300753 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200754 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300755 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200756 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900757 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200759 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200760 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400761 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700762
763 skb_queue_purge(&hdev->cmd_q);
764 skb_queue_purge(&hdev->rx_q);
765
766 if (hdev->flush)
767 hdev->flush(hdev);
768
769 if (hdev->sent_cmd) {
770 kfree_skb(hdev->sent_cmd);
771 hdev->sent_cmd = NULL;
772 }
773
774 hdev->close(hdev);
775 hdev->flags = 0;
776 }
777
778done:
779 hci_req_unlock(hdev);
780 hci_dev_put(hdev);
781 return ret;
782}
783
784static int hci_dev_do_close(struct hci_dev *hdev)
785{
786 BT_DBG("%s %p", hdev->name, hdev);
787
Andre Guedes28b75a82012-02-03 17:48:00 -0300788 cancel_work_sync(&hdev->le_scan);
789
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300790 cancel_delayed_work(&hdev->power_off);
791
Linus Torvalds1da177e2005-04-16 15:20:36 -0700792 hci_req_cancel(hdev, ENODEV);
793 hci_req_lock(hdev);
794
795 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300796 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 hci_req_unlock(hdev);
798 return 0;
799 }
800
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200801 /* Flush RX and TX works */
802 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400803 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200805 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200806 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200807 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200808 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200809 }
810
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200811 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200812 cancel_delayed_work(&hdev->service_cache);
813
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300814 cancel_delayed_work_sync(&hdev->le_scan_disable);
815
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300816 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817 inquiry_cache_flush(hdev);
818 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300819 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700820
821 hci_notify(hdev, HCI_DEV_DOWN);
822
823 if (hdev->flush)
824 hdev->flush(hdev);
825
826 /* Reset device */
827 skb_queue_purge(&hdev->cmd_q);
828 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200829 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200830 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200832 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 clear_bit(HCI_INIT, &hdev->flags);
834 }
835
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200836 /* flush cmd work */
837 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838
839 /* Drop queues */
840 skb_queue_purge(&hdev->rx_q);
841 skb_queue_purge(&hdev->cmd_q);
842 skb_queue_purge(&hdev->raw_q);
843
844 /* Drop last sent command */
845 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300846 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700847 kfree_skb(hdev->sent_cmd);
848 hdev->sent_cmd = NULL;
849 }
850
851 /* After this point our queues are empty
852 * and no tasks are scheduled. */
853 hdev->close(hdev);
854
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300855 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
856 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100857 hci_dev_lock(hdev);
858 mgmt_powered(hdev, 0);
859 hci_dev_unlock(hdev);
860 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200861
Linus Torvalds1da177e2005-04-16 15:20:36 -0700862 /* Clear flags */
863 hdev->flags = 0;
864
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +0200865 /* Controller radio is available but is currently powered down */
866 hdev->amp_status = 0;
867
Johan Hedberge59fda82012-02-22 18:11:53 +0200868 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200869 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200870
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 hci_req_unlock(hdev);
872
873 hci_dev_put(hdev);
874 return 0;
875}
876
877int hci_dev_close(__u16 dev)
878{
879 struct hci_dev *hdev;
880 int err;
881
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200882 hdev = hci_dev_get(dev);
883 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100885
886 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
887 cancel_delayed_work(&hdev->power_off);
888
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 hci_dev_put(hdev);
892 return err;
893}
894
895int hci_dev_reset(__u16 dev)
896{
897 struct hci_dev *hdev;
898 int ret = 0;
899
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200900 hdev = hci_dev_get(dev);
901 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902 return -ENODEV;
903
904 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905
906 if (!test_bit(HCI_UP, &hdev->flags))
907 goto done;
908
909 /* Drop queues */
910 skb_queue_purge(&hdev->rx_q);
911 skb_queue_purge(&hdev->cmd_q);
912
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300913 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 inquiry_cache_flush(hdev);
915 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300916 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
918 if (hdev->flush)
919 hdev->flush(hdev);
920
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900921 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300922 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923
924 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +0200925 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700926
927done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 hci_req_unlock(hdev);
929 hci_dev_put(hdev);
930 return ret;
931}
932
933int hci_dev_reset_stat(__u16 dev)
934{
935 struct hci_dev *hdev;
936 int ret = 0;
937
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200938 hdev = hci_dev_get(dev);
939 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 return -ENODEV;
941
942 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
943
944 hci_dev_put(hdev);
945
946 return ret;
947}
948
949int hci_dev_cmd(unsigned int cmd, void __user *arg)
950{
951 struct hci_dev *hdev;
952 struct hci_dev_req dr;
953 int err = 0;
954
955 if (copy_from_user(&dr, arg, sizeof(dr)))
956 return -EFAULT;
957
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200958 hdev = hci_dev_get(dr.dev_id);
959 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 return -ENODEV;
961
962 switch (cmd) {
963 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +0200964 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
965 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700966 break;
967
968 case HCISETENCRYPT:
969 if (!lmp_encrypt_capable(hdev)) {
970 err = -EOPNOTSUPP;
971 break;
972 }
973
974 if (!test_bit(HCI_AUTH, &hdev->flags)) {
975 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +0200976 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
977 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 if (err)
979 break;
980 }
981
Johan Hedberg01178cd2013-03-05 20:37:41 +0200982 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
983 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 break;
985
986 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +0200987 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
988 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 break;
990
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200991 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +0200992 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
993 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200994 break;
995
996 case HCISETLINKMODE:
997 hdev->link_mode = ((__u16) dr.dev_opt) &
998 (HCI_LM_MASTER | HCI_LM_ACCEPT);
999 break;
1000
Linus Torvalds1da177e2005-04-16 15:20:36 -07001001 case HCISETPTYPE:
1002 hdev->pkt_type = (__u16) dr.dev_opt;
1003 break;
1004
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001006 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1007 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001008 break;
1009
1010 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001011 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1012 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013 break;
1014
1015 default:
1016 err = -EINVAL;
1017 break;
1018 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001019
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020 hci_dev_put(hdev);
1021 return err;
1022}
1023
1024int hci_get_dev_list(void __user *arg)
1025{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001026 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 struct hci_dev_list_req *dl;
1028 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001029 int n = 0, size, err;
1030 __u16 dev_num;
1031
1032 if (get_user(dev_num, (__u16 __user *) arg))
1033 return -EFAULT;
1034
1035 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1036 return -EINVAL;
1037
1038 size = sizeof(*dl) + dev_num * sizeof(*dr);
1039
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001040 dl = kzalloc(size, GFP_KERNEL);
1041 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 return -ENOMEM;
1043
1044 dr = dl->dev_req;
1045
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001046 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001047 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001048 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001049 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001050
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001051 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1052 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001053
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054 (dr + n)->dev_id = hdev->id;
1055 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 if (++n >= dev_num)
1058 break;
1059 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001060 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
1062 dl->dev_num = n;
1063 size = sizeof(*dl) + n * sizeof(*dr);
1064
1065 err = copy_to_user(arg, dl, size);
1066 kfree(dl);
1067
1068 return err ? -EFAULT : 0;
1069}
1070
1071int hci_get_dev_info(void __user *arg)
1072{
1073 struct hci_dev *hdev;
1074 struct hci_dev_info di;
1075 int err = 0;
1076
1077 if (copy_from_user(&di, arg, sizeof(di)))
1078 return -EFAULT;
1079
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001080 hdev = hci_dev_get(di.dev_id);
1081 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082 return -ENODEV;
1083
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001084 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001085 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001086
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001087 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1088 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001089
Linus Torvalds1da177e2005-04-16 15:20:36 -07001090 strcpy(di.name, hdev->name);
1091 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001092 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001093 di.flags = hdev->flags;
1094 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001095 if (lmp_bredr_capable(hdev)) {
1096 di.acl_mtu = hdev->acl_mtu;
1097 di.acl_pkts = hdev->acl_pkts;
1098 di.sco_mtu = hdev->sco_mtu;
1099 di.sco_pkts = hdev->sco_pkts;
1100 } else {
1101 di.acl_mtu = hdev->le_mtu;
1102 di.acl_pkts = hdev->le_pkts;
1103 di.sco_mtu = 0;
1104 di.sco_pkts = 0;
1105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 di.link_policy = hdev->link_policy;
1107 di.link_mode = hdev->link_mode;
1108
1109 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1110 memcpy(&di.features, &hdev->features, sizeof(di.features));
1111
1112 if (copy_to_user(arg, &di, sizeof(di)))
1113 err = -EFAULT;
1114
1115 hci_dev_put(hdev);
1116
1117 return err;
1118}
1119
1120/* ---- Interface to HCI drivers ---- */
1121
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001122static int hci_rfkill_set_block(void *data, bool blocked)
1123{
1124 struct hci_dev *hdev = data;
1125
1126 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1127
1128 if (!blocked)
1129 return 0;
1130
1131 hci_dev_do_close(hdev);
1132
1133 return 0;
1134}
1135
1136static const struct rfkill_ops hci_rfkill_ops = {
1137 .set_block = hci_rfkill_set_block,
1138};
1139
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001140static void hci_power_on(struct work_struct *work)
1141{
1142 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1143
1144 BT_DBG("%s", hdev->name);
1145
1146 if (hci_dev_open(hdev->id) < 0)
1147 return;
1148
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001149 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001150 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1151 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001152
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001153 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001154 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001155}
1156
1157static void hci_power_off(struct work_struct *work)
1158{
Johan Hedberg32435532011-11-07 22:16:04 +02001159 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001160 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001161
1162 BT_DBG("%s", hdev->name);
1163
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001164 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001165}
1166
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001167static void hci_discov_off(struct work_struct *work)
1168{
1169 struct hci_dev *hdev;
1170 u8 scan = SCAN_PAGE;
1171
1172 hdev = container_of(work, struct hci_dev, discov_off.work);
1173
1174 BT_DBG("%s", hdev->name);
1175
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001176 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001177
1178 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1179
1180 hdev->discov_timeout = 0;
1181
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001182 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001183}
1184
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001185int hci_uuids_clear(struct hci_dev *hdev)
1186{
Johan Hedberg48210022013-01-27 00:31:28 +02001187 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001188
Johan Hedberg48210022013-01-27 00:31:28 +02001189 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1190 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001191 kfree(uuid);
1192 }
1193
1194 return 0;
1195}
1196
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001197int hci_link_keys_clear(struct hci_dev *hdev)
1198{
1199 struct list_head *p, *n;
1200
1201 list_for_each_safe(p, n, &hdev->link_keys) {
1202 struct link_key *key;
1203
1204 key = list_entry(p, struct link_key, list);
1205
1206 list_del(p);
1207 kfree(key);
1208 }
1209
1210 return 0;
1211}
1212
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001213int hci_smp_ltks_clear(struct hci_dev *hdev)
1214{
1215 struct smp_ltk *k, *tmp;
1216
1217 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1218 list_del(&k->list);
1219 kfree(k);
1220 }
1221
1222 return 0;
1223}
1224
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001225struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001227 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001228
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001229 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001230 if (bacmp(bdaddr, &k->bdaddr) == 0)
1231 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001232
1233 return NULL;
1234}
1235
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301236static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001237 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001238{
1239 /* Legacy key */
1240 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301241 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001242
1243 /* Debug keys are insecure so don't store them persistently */
1244 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301245 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001246
1247 /* Changed combination key and there's no previous one */
1248 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301249 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001250
1251 /* Security mode 3 case */
1252 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301253 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001254
1255 /* Neither local nor remote side had no-bonding as requirement */
1256 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301257 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001258
1259 /* Local side had dedicated bonding as requirement */
1260 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301261 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001262
1263 /* Remote side had dedicated bonding as requirement */
1264 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301265 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001266
1267 /* If none of the above criteria match, then don't store the key
1268 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301269 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001270}
1271
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001272struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001274 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 list_for_each_entry(k, &hdev->long_term_keys, list) {
1277 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001278 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279 continue;
1280
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001281 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001282 }
1283
1284 return NULL;
1285}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001286
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001287struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001288 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001289{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001290 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001291
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001292 list_for_each_entry(k, &hdev->long_term_keys, list)
1293 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001294 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001295 return k;
1296
1297 return NULL;
1298}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001299
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001300int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001301 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001302{
1303 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301304 u8 old_key_type;
1305 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001306
1307 old_key = hci_find_link_key(hdev, bdaddr);
1308 if (old_key) {
1309 old_key_type = old_key->type;
1310 key = old_key;
1311 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001312 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001313 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1314 if (!key)
1315 return -ENOMEM;
1316 list_add(&key->list, &hdev->link_keys);
1317 }
1318
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001319 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001320
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001321 /* Some buggy controller combinations generate a changed
1322 * combination key for legacy pairing even when there's no
1323 * previous key */
1324 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001325 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001326 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001327 if (conn)
1328 conn->key_type = type;
1329 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001330
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001331 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001332 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001333 key->pin_len = pin_len;
1334
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001335 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001336 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001337 else
1338 key->type = type;
1339
Johan Hedberg4df378a2011-04-28 11:29:03 -07001340 if (!new_key)
1341 return 0;
1342
1343 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1344
Johan Hedberg744cf192011-11-08 20:40:14 +02001345 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001346
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301347 if (conn)
1348 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001349
1350 return 0;
1351}
1352
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001353int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001354 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001355 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001356{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001357 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001358
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001359 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1360 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001361
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001362 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1363 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001364 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001365 else {
1366 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001367 if (!key)
1368 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001369 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001370 }
1371
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001372 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001373 key->bdaddr_type = addr_type;
1374 memcpy(key->val, tk, sizeof(key->val));
1375 key->authenticated = authenticated;
1376 key->ediv = ediv;
1377 key->enc_size = enc_size;
1378 key->type = type;
1379 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001380
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001381 if (!new_key)
1382 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001383
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001384 if (type & HCI_SMP_LTK)
1385 mgmt_new_ltk(hdev, key, 1);
1386
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001387 return 0;
1388}
1389
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001390int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1391{
1392 struct link_key *key;
1393
1394 key = hci_find_link_key(hdev, bdaddr);
1395 if (!key)
1396 return -ENOENT;
1397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001398 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001399
1400 list_del(&key->list);
1401 kfree(key);
1402
1403 return 0;
1404}
1405
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001406int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1407{
1408 struct smp_ltk *k, *tmp;
1409
1410 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1411 if (bacmp(bdaddr, &k->bdaddr))
1412 continue;
1413
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001414 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001415
1416 list_del(&k->list);
1417 kfree(k);
1418 }
1419
1420 return 0;
1421}
1422
Ville Tervo6bd32322011-02-16 16:32:41 +02001423/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001424static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001425{
1426 struct hci_dev *hdev = (void *) arg;
1427
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001428 if (hdev->sent_cmd) {
1429 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1430 u16 opcode = __le16_to_cpu(sent->opcode);
1431
1432 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1433 } else {
1434 BT_ERR("%s command tx timeout", hdev->name);
1435 }
1436
Ville Tervo6bd32322011-02-16 16:32:41 +02001437 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001438 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001439}
1440
Szymon Janc2763eda2011-03-22 13:12:22 +01001441struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001442 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001443{
1444 struct oob_data *data;
1445
1446 list_for_each_entry(data, &hdev->remote_oob_data, list)
1447 if (bacmp(bdaddr, &data->bdaddr) == 0)
1448 return data;
1449
1450 return NULL;
1451}
1452
1453int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1454{
1455 struct oob_data *data;
1456
1457 data = hci_find_remote_oob_data(hdev, bdaddr);
1458 if (!data)
1459 return -ENOENT;
1460
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001461 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001462
1463 list_del(&data->list);
1464 kfree(data);
1465
1466 return 0;
1467}
1468
1469int hci_remote_oob_data_clear(struct hci_dev *hdev)
1470{
1471 struct oob_data *data, *n;
1472
1473 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1474 list_del(&data->list);
1475 kfree(data);
1476 }
1477
1478 return 0;
1479}
1480
1481int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001482 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001483{
1484 struct oob_data *data;
1485
1486 data = hci_find_remote_oob_data(hdev, bdaddr);
1487
1488 if (!data) {
1489 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1490 if (!data)
1491 return -ENOMEM;
1492
1493 bacpy(&data->bdaddr, bdaddr);
1494 list_add(&data->list, &hdev->remote_oob_data);
1495 }
1496
1497 memcpy(data->hash, hash, sizeof(data->hash));
1498 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1499
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001500 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001501
1502 return 0;
1503}
1504
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001505struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001506{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001507 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001508
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001509 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001510 if (bacmp(bdaddr, &b->bdaddr) == 0)
1511 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001512
1513 return NULL;
1514}
1515
1516int hci_blacklist_clear(struct hci_dev *hdev)
1517{
1518 struct list_head *p, *n;
1519
1520 list_for_each_safe(p, n, &hdev->blacklist) {
1521 struct bdaddr_list *b;
1522
1523 b = list_entry(p, struct bdaddr_list, list);
1524
1525 list_del(p);
1526 kfree(b);
1527 }
1528
1529 return 0;
1530}
1531
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001532int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001533{
1534 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001535
1536 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1537 return -EBADF;
1538
Antti Julku5e762442011-08-25 16:48:02 +03001539 if (hci_blacklist_lookup(hdev, bdaddr))
1540 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001541
1542 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001543 if (!entry)
1544 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001545
1546 bacpy(&entry->bdaddr, bdaddr);
1547
1548 list_add(&entry->list, &hdev->blacklist);
1549
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001550 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001551}
1552
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001553int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001554{
1555 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001556
Szymon Janc1ec918c2011-11-16 09:32:21 +01001557 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001558 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001559
1560 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001561 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001562 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001563
1564 list_del(&entry->list);
1565 kfree(entry);
1566
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001567 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001568}
1569
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001570static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1571{
1572 struct le_scan_params *param = (struct le_scan_params *) opt;
1573 struct hci_cp_le_set_scan_param cp;
1574
1575 memset(&cp, 0, sizeof(cp));
1576 cp.type = param->type;
1577 cp.interval = cpu_to_le16(param->interval);
1578 cp.window = cpu_to_le16(param->window);
1579
1580 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1581}
1582
1583static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1584{
1585 struct hci_cp_le_set_scan_enable cp;
1586
1587 memset(&cp, 0, sizeof(cp));
1588 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001589 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001590
1591 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1592}
1593
1594static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001595 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001596{
1597 long timeo = msecs_to_jiffies(3000);
1598 struct le_scan_params param;
1599 int err;
1600
1601 BT_DBG("%s", hdev->name);
1602
1603 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1604 return -EINPROGRESS;
1605
1606 param.type = type;
1607 param.interval = interval;
1608 param.window = window;
1609
1610 hci_req_lock(hdev);
1611
Johan Hedberg01178cd2013-03-05 20:37:41 +02001612 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1613 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001614 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001615 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001616
1617 hci_req_unlock(hdev);
1618
1619 if (err < 0)
1620 return err;
1621
Johan Hedberg46818ed2013-01-14 22:33:52 +02001622 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1623 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001624
1625 return 0;
1626}
1627
Andre Guedes7dbfac12012-03-15 16:52:07 -03001628int hci_cancel_le_scan(struct hci_dev *hdev)
1629{
1630 BT_DBG("%s", hdev->name);
1631
1632 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1633 return -EALREADY;
1634
1635 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1636 struct hci_cp_le_set_scan_enable cp;
1637
1638 /* Send HCI command to disable LE Scan */
1639 memset(&cp, 0, sizeof(cp));
1640 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1641 }
1642
1643 return 0;
1644}
1645
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001646static void le_scan_disable_work(struct work_struct *work)
1647{
1648 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001649 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001650 struct hci_cp_le_set_scan_enable cp;
1651
1652 BT_DBG("%s", hdev->name);
1653
1654 memset(&cp, 0, sizeof(cp));
1655
1656 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1657}
1658
Andre Guedes28b75a82012-02-03 17:48:00 -03001659static void le_scan_work(struct work_struct *work)
1660{
1661 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1662 struct le_scan_params *param = &hdev->le_scan_params;
1663
1664 BT_DBG("%s", hdev->name);
1665
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001666 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1667 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001668}
1669
1670int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001671 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001672{
1673 struct le_scan_params *param = &hdev->le_scan_params;
1674
1675 BT_DBG("%s", hdev->name);
1676
Johan Hedbergf1550472012-10-24 21:12:03 +03001677 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1678 return -ENOTSUPP;
1679
Andre Guedes28b75a82012-02-03 17:48:00 -03001680 if (work_busy(&hdev->le_scan))
1681 return -EINPROGRESS;
1682
1683 param->type = type;
1684 param->interval = interval;
1685 param->window = window;
1686 param->timeout = timeout;
1687
1688 queue_work(system_long_wq, &hdev->le_scan);
1689
1690 return 0;
1691}
1692
David Herrmann9be0dab2012-04-22 14:39:57 +02001693/* Alloc HCI device */
1694struct hci_dev *hci_alloc_dev(void)
1695{
1696 struct hci_dev *hdev;
1697
1698 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1699 if (!hdev)
1700 return NULL;
1701
David Herrmannb1b813d2012-04-22 14:39:58 +02001702 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1703 hdev->esco_type = (ESCO_HV1);
1704 hdev->link_mode = (HCI_LM_ACCEPT);
1705 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001706 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1707 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001708
David Herrmannb1b813d2012-04-22 14:39:58 +02001709 hdev->sniff_max_interval = 800;
1710 hdev->sniff_min_interval = 80;
1711
1712 mutex_init(&hdev->lock);
1713 mutex_init(&hdev->req_lock);
1714
1715 INIT_LIST_HEAD(&hdev->mgmt_pending);
1716 INIT_LIST_HEAD(&hdev->blacklist);
1717 INIT_LIST_HEAD(&hdev->uuids);
1718 INIT_LIST_HEAD(&hdev->link_keys);
1719 INIT_LIST_HEAD(&hdev->long_term_keys);
1720 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001721 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001722
1723 INIT_WORK(&hdev->rx_work, hci_rx_work);
1724 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1725 INIT_WORK(&hdev->tx_work, hci_tx_work);
1726 INIT_WORK(&hdev->power_on, hci_power_on);
1727 INIT_WORK(&hdev->le_scan, le_scan_work);
1728
David Herrmannb1b813d2012-04-22 14:39:58 +02001729 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1730 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1731 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1732
David Herrmann9be0dab2012-04-22 14:39:57 +02001733 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001734 skb_queue_head_init(&hdev->rx_q);
1735 skb_queue_head_init(&hdev->cmd_q);
1736 skb_queue_head_init(&hdev->raw_q);
1737
1738 init_waitqueue_head(&hdev->req_wait_q);
1739
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001740 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001741
David Herrmannb1b813d2012-04-22 14:39:58 +02001742 hci_init_sysfs(hdev);
1743 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001744
1745 return hdev;
1746}
1747EXPORT_SYMBOL(hci_alloc_dev);
1748
1749/* Free HCI device */
1750void hci_free_dev(struct hci_dev *hdev)
1751{
1752 skb_queue_purge(&hdev->driver_init);
1753
1754 /* will free via device release */
1755 put_device(&hdev->dev);
1756}
1757EXPORT_SYMBOL(hci_free_dev);
1758
Linus Torvalds1da177e2005-04-16 15:20:36 -07001759/* Register HCI device */
1760int hci_register_dev(struct hci_dev *hdev)
1761{
David Herrmannb1b813d2012-04-22 14:39:58 +02001762 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001763
David Herrmann010666a2012-01-07 15:47:07 +01001764 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765 return -EINVAL;
1766
Mat Martineau08add512011-11-02 16:18:36 -07001767 /* Do not allow HCI_AMP devices to register at index 0,
1768 * so the index can be used as the AMP controller ID.
1769 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001770 switch (hdev->dev_type) {
1771 case HCI_BREDR:
1772 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1773 break;
1774 case HCI_AMP:
1775 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1776 break;
1777 default:
1778 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001779 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001780
Sasha Levin3df92b32012-05-27 22:36:56 +02001781 if (id < 0)
1782 return id;
1783
Linus Torvalds1da177e2005-04-16 15:20:36 -07001784 sprintf(hdev->name, "hci%d", id);
1785 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001786
1787 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1788
Sasha Levin3df92b32012-05-27 22:36:56 +02001789 write_lock(&hci_dev_list_lock);
1790 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001791 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001793 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001794 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001795 if (!hdev->workqueue) {
1796 error = -ENOMEM;
1797 goto err;
1798 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001799
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001800 hdev->req_workqueue = alloc_workqueue(hdev->name,
1801 WQ_HIGHPRI | WQ_UNBOUND |
1802 WQ_MEM_RECLAIM, 1);
1803 if (!hdev->req_workqueue) {
1804 destroy_workqueue(hdev->workqueue);
1805 error = -ENOMEM;
1806 goto err;
1807 }
1808
David Herrmann33ca9542011-10-08 14:58:49 +02001809 error = hci_add_sysfs(hdev);
1810 if (error < 0)
1811 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001813 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001814 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1815 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001816 if (hdev->rfkill) {
1817 if (rfkill_register(hdev->rfkill) < 0) {
1818 rfkill_destroy(hdev->rfkill);
1819 hdev->rfkill = NULL;
1820 }
1821 }
1822
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001823 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001824
1825 if (hdev->dev_type != HCI_AMP)
1826 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1827
Linus Torvalds1da177e2005-04-16 15:20:36 -07001828 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001829 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001830
Johan Hedberg19202572013-01-14 22:33:51 +02001831 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07001832
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001834
David Herrmann33ca9542011-10-08 14:58:49 +02001835err_wqueue:
1836 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001837 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02001838err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001839 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001840 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001841 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001842 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001843
David Herrmann33ca9542011-10-08 14:58:49 +02001844 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001845}
1846EXPORT_SYMBOL(hci_register_dev);
1847
1848/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001849void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001850{
Sasha Levin3df92b32012-05-27 22:36:56 +02001851 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001852
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001853 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001854
Johan Hovold94324962012-03-15 14:48:41 +01001855 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1856
Sasha Levin3df92b32012-05-27 22:36:56 +02001857 id = hdev->id;
1858
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001859 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001860 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001861 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001862
1863 hci_dev_do_close(hdev);
1864
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301865 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001866 kfree_skb(hdev->reassembly[i]);
1867
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02001868 cancel_work_sync(&hdev->power_on);
1869
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001870 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001871 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001872 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001873 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001874 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001875 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001876
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001877 /* mgmt_index_removed should take care of emptying the
1878 * pending list */
1879 BUG_ON(!list_empty(&hdev->mgmt_pending));
1880
Linus Torvalds1da177e2005-04-16 15:20:36 -07001881 hci_notify(hdev, HCI_DEV_UNREG);
1882
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001883 if (hdev->rfkill) {
1884 rfkill_unregister(hdev->rfkill);
1885 rfkill_destroy(hdev->rfkill);
1886 }
1887
David Herrmannce242972011-10-08 14:58:48 +02001888 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001889
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001890 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02001891 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001892
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001893 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001894 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001895 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001896 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001897 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001898 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001899 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001900
David Herrmanndc946bd2012-01-07 15:47:24 +01001901 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001902
1903 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001904}
1905EXPORT_SYMBOL(hci_unregister_dev);
1906
1907/* Suspend HCI device */
1908int hci_suspend_dev(struct hci_dev *hdev)
1909{
1910 hci_notify(hdev, HCI_DEV_SUSPEND);
1911 return 0;
1912}
1913EXPORT_SYMBOL(hci_suspend_dev);
1914
1915/* Resume HCI device */
1916int hci_resume_dev(struct hci_dev *hdev)
1917{
1918 hci_notify(hdev, HCI_DEV_RESUME);
1919 return 0;
1920}
1921EXPORT_SYMBOL(hci_resume_dev);
1922
Marcel Holtmann76bca882009-11-18 00:40:39 +01001923/* Receive frame from HCI drivers */
1924int hci_recv_frame(struct sk_buff *skb)
1925{
1926 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1927 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001928 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001929 kfree_skb(skb);
1930 return -ENXIO;
1931 }
1932
Jorrit Schippersd82603c2012-12-27 17:33:02 +01001933 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01001934 bt_cb(skb)->incoming = 1;
1935
1936 /* Time stamp */
1937 __net_timestamp(skb);
1938
Marcel Holtmann76bca882009-11-18 00:40:39 +01001939 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001940 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001941
Marcel Holtmann76bca882009-11-18 00:40:39 +01001942 return 0;
1943}
1944EXPORT_SYMBOL(hci_recv_frame);
1945
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301946static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001947 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301948{
1949 int len = 0;
1950 int hlen = 0;
1951 int remain = count;
1952 struct sk_buff *skb;
1953 struct bt_skb_cb *scb;
1954
1955 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001956 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301957 return -EILSEQ;
1958
1959 skb = hdev->reassembly[index];
1960
1961 if (!skb) {
1962 switch (type) {
1963 case HCI_ACLDATA_PKT:
1964 len = HCI_MAX_FRAME_SIZE;
1965 hlen = HCI_ACL_HDR_SIZE;
1966 break;
1967 case HCI_EVENT_PKT:
1968 len = HCI_MAX_EVENT_SIZE;
1969 hlen = HCI_EVENT_HDR_SIZE;
1970 break;
1971 case HCI_SCODATA_PKT:
1972 len = HCI_MAX_SCO_SIZE;
1973 hlen = HCI_SCO_HDR_SIZE;
1974 break;
1975 }
1976
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001977 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301978 if (!skb)
1979 return -ENOMEM;
1980
1981 scb = (void *) skb->cb;
1982 scb->expect = hlen;
1983 scb->pkt_type = type;
1984
1985 skb->dev = (void *) hdev;
1986 hdev->reassembly[index] = skb;
1987 }
1988
1989 while (count) {
1990 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001991 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301992
1993 memcpy(skb_put(skb, len), data, len);
1994
1995 count -= len;
1996 data += len;
1997 scb->expect -= len;
1998 remain = count;
1999
2000 switch (type) {
2001 case HCI_EVENT_PKT:
2002 if (skb->len == HCI_EVENT_HDR_SIZE) {
2003 struct hci_event_hdr *h = hci_event_hdr(skb);
2004 scb->expect = h->plen;
2005
2006 if (skb_tailroom(skb) < scb->expect) {
2007 kfree_skb(skb);
2008 hdev->reassembly[index] = NULL;
2009 return -ENOMEM;
2010 }
2011 }
2012 break;
2013
2014 case HCI_ACLDATA_PKT:
2015 if (skb->len == HCI_ACL_HDR_SIZE) {
2016 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2017 scb->expect = __le16_to_cpu(h->dlen);
2018
2019 if (skb_tailroom(skb) < scb->expect) {
2020 kfree_skb(skb);
2021 hdev->reassembly[index] = NULL;
2022 return -ENOMEM;
2023 }
2024 }
2025 break;
2026
2027 case HCI_SCODATA_PKT:
2028 if (skb->len == HCI_SCO_HDR_SIZE) {
2029 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2030 scb->expect = h->dlen;
2031
2032 if (skb_tailroom(skb) < scb->expect) {
2033 kfree_skb(skb);
2034 hdev->reassembly[index] = NULL;
2035 return -ENOMEM;
2036 }
2037 }
2038 break;
2039 }
2040
2041 if (scb->expect == 0) {
2042 /* Complete frame */
2043
2044 bt_cb(skb)->pkt_type = type;
2045 hci_recv_frame(skb);
2046
2047 hdev->reassembly[index] = NULL;
2048 return remain;
2049 }
2050 }
2051
2052 return remain;
2053}
2054
Marcel Holtmannef222012007-07-11 06:42:04 +02002055int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2056{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302057 int rem = 0;
2058
Marcel Holtmannef222012007-07-11 06:42:04 +02002059 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2060 return -EILSEQ;
2061
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002062 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002063 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302064 if (rem < 0)
2065 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002066
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302067 data += (count - rem);
2068 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002069 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002070
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302071 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002072}
2073EXPORT_SYMBOL(hci_recv_fragment);
2074
Suraj Sumangala99811512010-07-14 13:02:19 +05302075#define STREAM_REASSEMBLY 0
2076
2077int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2078{
2079 int type;
2080 int rem = 0;
2081
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002082 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302083 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2084
2085 if (!skb) {
2086 struct { char type; } *pkt;
2087
2088 /* Start of the frame */
2089 pkt = data;
2090 type = pkt->type;
2091
2092 data++;
2093 count--;
2094 } else
2095 type = bt_cb(skb)->pkt_type;
2096
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002097 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002098 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302099 if (rem < 0)
2100 return rem;
2101
2102 data += (count - rem);
2103 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002104 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302105
2106 return rem;
2107}
2108EXPORT_SYMBOL(hci_recv_stream_fragment);
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110/* ---- Interface to upper protocols ---- */
2111
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112int hci_register_cb(struct hci_cb *cb)
2113{
2114 BT_DBG("%p name %s", cb, cb->name);
2115
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002116 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002118 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 return 0;
2121}
2122EXPORT_SYMBOL(hci_register_cb);
2123
2124int hci_unregister_cb(struct hci_cb *cb)
2125{
2126 BT_DBG("%p name %s", cb, cb->name);
2127
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002128 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002130 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 return 0;
2133}
2134EXPORT_SYMBOL(hci_unregister_cb);
2135
2136static int hci_send_frame(struct sk_buff *skb)
2137{
2138 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2139
2140 if (!hdev) {
2141 kfree_skb(skb);
2142 return -ENODEV;
2143 }
2144
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002145 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002147 /* Time stamp */
2148 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002150 /* Send copy to monitor */
2151 hci_send_to_monitor(hdev, skb);
2152
2153 if (atomic_read(&hdev->promisc)) {
2154 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002155 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156 }
2157
2158 /* Get rid of skb owner, prior to sending to the driver. */
2159 skb_orphan(skb);
2160
2161 return hdev->send(skb);
2162}
2163
2164/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002165int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002166{
2167 int len = HCI_COMMAND_HDR_SIZE + plen;
2168 struct hci_command_hdr *hdr;
2169 struct sk_buff *skb;
2170
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002171 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172
2173 skb = bt_skb_alloc(len, GFP_ATOMIC);
2174 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002175 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176 return -ENOMEM;
2177 }
2178
2179 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002180 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002181 hdr->plen = plen;
2182
2183 if (plen)
2184 memcpy(skb_put(skb, plen), param, plen);
2185
2186 BT_DBG("skb len %d", skb->len);
2187
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002188 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002190
Johan Hedberga5040ef2011-01-10 13:28:59 +02002191 if (test_bit(HCI_INIT, &hdev->flags))
2192 hdev->init_last_cmd = opcode;
2193
Linus Torvalds1da177e2005-04-16 15:20:36 -07002194 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002195 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196
2197 return 0;
2198}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199
2200/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002201void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202{
2203 struct hci_command_hdr *hdr;
2204
2205 if (!hdev->sent_cmd)
2206 return NULL;
2207
2208 hdr = (void *) hdev->sent_cmd->data;
2209
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002210 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211 return NULL;
2212
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002213 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214
2215 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2216}
2217
2218/* Send ACL data */
2219static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2220{
2221 struct hci_acl_hdr *hdr;
2222 int len = skb->len;
2223
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002224 skb_push(skb, HCI_ACL_HDR_SIZE);
2225 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002226 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002227 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2228 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002229}
2230
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002231static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002232 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002233{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002234 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 struct hci_dev *hdev = conn->hdev;
2236 struct sk_buff *list;
2237
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002238 skb->len = skb_headlen(skb);
2239 skb->data_len = 0;
2240
2241 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002242
2243 switch (hdev->dev_type) {
2244 case HCI_BREDR:
2245 hci_add_acl_hdr(skb, conn->handle, flags);
2246 break;
2247 case HCI_AMP:
2248 hci_add_acl_hdr(skb, chan->handle, flags);
2249 break;
2250 default:
2251 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2252 return;
2253 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002254
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002255 list = skb_shinfo(skb)->frag_list;
2256 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002257 /* Non fragmented */
2258 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2259
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002260 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 } else {
2262 /* Fragmented */
2263 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2264
2265 skb_shinfo(skb)->frag_list = NULL;
2266
2267 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002268 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002270 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002271
2272 flags &= ~ACL_START;
2273 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274 do {
2275 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002276
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002278 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002279 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
2281 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2282
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002283 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002284 } while (list);
2285
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002286 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002287 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002288}
2289
2290void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2291{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002292 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002293
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002294 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002295
2296 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002297
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002298 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002300 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002302
2303/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002304void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305{
2306 struct hci_dev *hdev = conn->hdev;
2307 struct hci_sco_hdr hdr;
2308
2309 BT_DBG("%s len %d", hdev->name, skb->len);
2310
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002311 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002312 hdr.dlen = skb->len;
2313
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002314 skb_push(skb, HCI_SCO_HDR_SIZE);
2315 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002316 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317
2318 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002319 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002320
Linus Torvalds1da177e2005-04-16 15:20:36 -07002321 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002322 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002323}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324
2325/* ---- HCI TX task (outgoing data) ---- */
2326
2327/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002328static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2329 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002330{
2331 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002332 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002333 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002334
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002335 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002336 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002337
2338 rcu_read_lock();
2339
2340 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002341 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002342 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002343
2344 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2345 continue;
2346
Linus Torvalds1da177e2005-04-16 15:20:36 -07002347 num++;
2348
2349 if (c->sent < min) {
2350 min = c->sent;
2351 conn = c;
2352 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002353
2354 if (hci_conn_num(hdev, type) == num)
2355 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002356 }
2357
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002358 rcu_read_unlock();
2359
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002361 int cnt, q;
2362
2363 switch (conn->type) {
2364 case ACL_LINK:
2365 cnt = hdev->acl_cnt;
2366 break;
2367 case SCO_LINK:
2368 case ESCO_LINK:
2369 cnt = hdev->sco_cnt;
2370 break;
2371 case LE_LINK:
2372 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2373 break;
2374 default:
2375 cnt = 0;
2376 BT_ERR("Unknown link type");
2377 }
2378
2379 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002380 *quote = q ? q : 1;
2381 } else
2382 *quote = 0;
2383
2384 BT_DBG("conn %p quote %d", conn, *quote);
2385 return conn;
2386}
2387
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002388static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002389{
2390 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002391 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002392
Ville Tervobae1f5d92011-02-10 22:38:53 -03002393 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002394
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002395 rcu_read_lock();
2396
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002398 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002399 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002400 BT_ERR("%s killing stalled connection %pMR",
2401 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002402 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002403 }
2404 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002405
2406 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407}
2408
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002409static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2410 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002411{
2412 struct hci_conn_hash *h = &hdev->conn_hash;
2413 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002414 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002415 struct hci_conn *conn;
2416 int cnt, q, conn_num = 0;
2417
2418 BT_DBG("%s", hdev->name);
2419
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002420 rcu_read_lock();
2421
2422 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002423 struct hci_chan *tmp;
2424
2425 if (conn->type != type)
2426 continue;
2427
2428 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2429 continue;
2430
2431 conn_num++;
2432
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002433 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002434 struct sk_buff *skb;
2435
2436 if (skb_queue_empty(&tmp->data_q))
2437 continue;
2438
2439 skb = skb_peek(&tmp->data_q);
2440 if (skb->priority < cur_prio)
2441 continue;
2442
2443 if (skb->priority > cur_prio) {
2444 num = 0;
2445 min = ~0;
2446 cur_prio = skb->priority;
2447 }
2448
2449 num++;
2450
2451 if (conn->sent < min) {
2452 min = conn->sent;
2453 chan = tmp;
2454 }
2455 }
2456
2457 if (hci_conn_num(hdev, type) == conn_num)
2458 break;
2459 }
2460
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002461 rcu_read_unlock();
2462
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002463 if (!chan)
2464 return NULL;
2465
2466 switch (chan->conn->type) {
2467 case ACL_LINK:
2468 cnt = hdev->acl_cnt;
2469 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002470 case AMP_LINK:
2471 cnt = hdev->block_cnt;
2472 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002473 case SCO_LINK:
2474 case ESCO_LINK:
2475 cnt = hdev->sco_cnt;
2476 break;
2477 case LE_LINK:
2478 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2479 break;
2480 default:
2481 cnt = 0;
2482 BT_ERR("Unknown link type");
2483 }
2484
2485 q = cnt / num;
2486 *quote = q ? q : 1;
2487 BT_DBG("chan %p quote %d", chan, *quote);
2488 return chan;
2489}
2490
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002491static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2492{
2493 struct hci_conn_hash *h = &hdev->conn_hash;
2494 struct hci_conn *conn;
2495 int num = 0;
2496
2497 BT_DBG("%s", hdev->name);
2498
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002499 rcu_read_lock();
2500
2501 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002502 struct hci_chan *chan;
2503
2504 if (conn->type != type)
2505 continue;
2506
2507 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2508 continue;
2509
2510 num++;
2511
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002512 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002513 struct sk_buff *skb;
2514
2515 if (chan->sent) {
2516 chan->sent = 0;
2517 continue;
2518 }
2519
2520 if (skb_queue_empty(&chan->data_q))
2521 continue;
2522
2523 skb = skb_peek(&chan->data_q);
2524 if (skb->priority >= HCI_PRIO_MAX - 1)
2525 continue;
2526
2527 skb->priority = HCI_PRIO_MAX - 1;
2528
2529 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002530 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002531 }
2532
2533 if (hci_conn_num(hdev, type) == num)
2534 break;
2535 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002536
2537 rcu_read_unlock();
2538
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002539}
2540
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002541static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2542{
2543 /* Calculate count of blocks used by this packet */
2544 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2545}
2546
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002547static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002548{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002549 if (!test_bit(HCI_RAW, &hdev->flags)) {
2550 /* ACL tx timeout must be longer than maximum
2551 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002552 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002553 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002554 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002556}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002558static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002559{
2560 unsigned int cnt = hdev->acl_cnt;
2561 struct hci_chan *chan;
2562 struct sk_buff *skb;
2563 int quote;
2564
2565 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002566
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002567 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002568 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002569 u32 priority = (skb_peek(&chan->data_q))->priority;
2570 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002571 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002572 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002573
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002574 /* Stop if priority has changed */
2575 if (skb->priority < priority)
2576 break;
2577
2578 skb = skb_dequeue(&chan->data_q);
2579
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002580 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002581 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002582
Linus Torvalds1da177e2005-04-16 15:20:36 -07002583 hci_send_frame(skb);
2584 hdev->acl_last_tx = jiffies;
2585
2586 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002587 chan->sent++;
2588 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002589 }
2590 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002591
2592 if (cnt != hdev->acl_cnt)
2593 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002594}
2595
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002596static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002597{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002598 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002599 struct hci_chan *chan;
2600 struct sk_buff *skb;
2601 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002602 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002603
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002604 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002605
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002606 BT_DBG("%s", hdev->name);
2607
2608 if (hdev->dev_type == HCI_AMP)
2609 type = AMP_LINK;
2610 else
2611 type = ACL_LINK;
2612
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002613 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002614 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002615 u32 priority = (skb_peek(&chan->data_q))->priority;
2616 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2617 int blocks;
2618
2619 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002620 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002621
2622 /* Stop if priority has changed */
2623 if (skb->priority < priority)
2624 break;
2625
2626 skb = skb_dequeue(&chan->data_q);
2627
2628 blocks = __get_blocks(hdev, skb);
2629 if (blocks > hdev->block_cnt)
2630 return;
2631
2632 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002633 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002634
2635 hci_send_frame(skb);
2636 hdev->acl_last_tx = jiffies;
2637
2638 hdev->block_cnt -= blocks;
2639 quote -= blocks;
2640
2641 chan->sent += blocks;
2642 chan->conn->sent += blocks;
2643 }
2644 }
2645
2646 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002647 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002648}
2649
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002650static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002651{
2652 BT_DBG("%s", hdev->name);
2653
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002654 /* No ACL link over BR/EDR controller */
2655 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2656 return;
2657
2658 /* No AMP link over AMP controller */
2659 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002660 return;
2661
2662 switch (hdev->flow_ctl_mode) {
2663 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2664 hci_sched_acl_pkt(hdev);
2665 break;
2666
2667 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2668 hci_sched_acl_blk(hdev);
2669 break;
2670 }
2671}
2672
Linus Torvalds1da177e2005-04-16 15:20:36 -07002673/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002674static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675{
2676 struct hci_conn *conn;
2677 struct sk_buff *skb;
2678 int quote;
2679
2680 BT_DBG("%s", hdev->name);
2681
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002682 if (!hci_conn_num(hdev, SCO_LINK))
2683 return;
2684
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2686 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2687 BT_DBG("skb %p len %d", skb, skb->len);
2688 hci_send_frame(skb);
2689
2690 conn->sent++;
2691 if (conn->sent == ~0)
2692 conn->sent = 0;
2693 }
2694 }
2695}
2696
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002697static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002698{
2699 struct hci_conn *conn;
2700 struct sk_buff *skb;
2701 int quote;
2702
2703 BT_DBG("%s", hdev->name);
2704
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002705 if (!hci_conn_num(hdev, ESCO_LINK))
2706 return;
2707
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002708 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2709 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002710 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2711 BT_DBG("skb %p len %d", skb, skb->len);
2712 hci_send_frame(skb);
2713
2714 conn->sent++;
2715 if (conn->sent == ~0)
2716 conn->sent = 0;
2717 }
2718 }
2719}
2720
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002721static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002722{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002723 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002724 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002725 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002726
2727 BT_DBG("%s", hdev->name);
2728
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002729 if (!hci_conn_num(hdev, LE_LINK))
2730 return;
2731
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002732 if (!test_bit(HCI_RAW, &hdev->flags)) {
2733 /* LE tx timeout must be longer than maximum
2734 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002735 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002736 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002737 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002738 }
2739
2740 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002741 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002742 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002743 u32 priority = (skb_peek(&chan->data_q))->priority;
2744 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002745 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002746 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002747
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002748 /* Stop if priority has changed */
2749 if (skb->priority < priority)
2750 break;
2751
2752 skb = skb_dequeue(&chan->data_q);
2753
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002754 hci_send_frame(skb);
2755 hdev->le_last_tx = jiffies;
2756
2757 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002758 chan->sent++;
2759 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002760 }
2761 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002762
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002763 if (hdev->le_pkts)
2764 hdev->le_cnt = cnt;
2765 else
2766 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002767
2768 if (cnt != tmp)
2769 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002770}
2771
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002772static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002773{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002774 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002775 struct sk_buff *skb;
2776
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002777 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002778 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002779
2780 /* Schedule queues and send stuff to HCI driver */
2781
2782 hci_sched_acl(hdev);
2783
2784 hci_sched_sco(hdev);
2785
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002786 hci_sched_esco(hdev);
2787
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002788 hci_sched_le(hdev);
2789
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 /* Send next queued raw (unknown type) packet */
2791 while ((skb = skb_dequeue(&hdev->raw_q)))
2792 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793}
2794
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002795/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
2797/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002798static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799{
2800 struct hci_acl_hdr *hdr = (void *) skb->data;
2801 struct hci_conn *conn;
2802 __u16 handle, flags;
2803
2804 skb_pull(skb, HCI_ACL_HDR_SIZE);
2805
2806 handle = __le16_to_cpu(hdr->handle);
2807 flags = hci_flags(handle);
2808 handle = hci_handle(handle);
2809
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002810 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002811 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002812
2813 hdev->stat.acl_rx++;
2814
2815 hci_dev_lock(hdev);
2816 conn = hci_conn_hash_lookup_handle(hdev, handle);
2817 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002818
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002820 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002821
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002823 l2cap_recv_acldata(conn, skb, flags);
2824 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002826 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002827 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 }
2829
2830 kfree_skb(skb);
2831}
2832
2833/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002834static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002835{
2836 struct hci_sco_hdr *hdr = (void *) skb->data;
2837 struct hci_conn *conn;
2838 __u16 handle;
2839
2840 skb_pull(skb, HCI_SCO_HDR_SIZE);
2841
2842 handle = __le16_to_cpu(hdr->handle);
2843
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002844 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 hdev->stat.sco_rx++;
2847
2848 hci_dev_lock(hdev);
2849 conn = hci_conn_hash_lookup_handle(hdev, handle);
2850 hci_dev_unlock(hdev);
2851
2852 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002854 sco_recv_scodata(conn, skb);
2855 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002857 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002858 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 }
2860
2861 kfree_skb(skb);
2862}
2863
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002864static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002865{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002866 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 struct sk_buff *skb;
2868
2869 BT_DBG("%s", hdev->name);
2870
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002872 /* Send copy to monitor */
2873 hci_send_to_monitor(hdev, skb);
2874
Linus Torvalds1da177e2005-04-16 15:20:36 -07002875 if (atomic_read(&hdev->promisc)) {
2876 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002877 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878 }
2879
2880 if (test_bit(HCI_RAW, &hdev->flags)) {
2881 kfree_skb(skb);
2882 continue;
2883 }
2884
2885 if (test_bit(HCI_INIT, &hdev->flags)) {
2886 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002887 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002888 case HCI_ACLDATA_PKT:
2889 case HCI_SCODATA_PKT:
2890 kfree_skb(skb);
2891 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002892 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002893 }
2894
2895 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002896 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002898 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002899 hci_event_packet(hdev, skb);
2900 break;
2901
2902 case HCI_ACLDATA_PKT:
2903 BT_DBG("%s ACL data packet", hdev->name);
2904 hci_acldata_packet(hdev, skb);
2905 break;
2906
2907 case HCI_SCODATA_PKT:
2908 BT_DBG("%s SCO data packet", hdev->name);
2909 hci_scodata_packet(hdev, skb);
2910 break;
2911
2912 default:
2913 kfree_skb(skb);
2914 break;
2915 }
2916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917}
2918
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002919static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002920{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002921 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 struct sk_buff *skb;
2923
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002924 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2925 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Linus Torvalds1da177e2005-04-16 15:20:36 -07002927 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002928 if (atomic_read(&hdev->cmd_cnt)) {
2929 skb = skb_dequeue(&hdev->cmd_q);
2930 if (!skb)
2931 return;
2932
Wei Yongjun7585b972009-02-25 18:29:52 +08002933 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002935 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2936 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937 atomic_dec(&hdev->cmd_cnt);
2938 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002939 if (test_bit(HCI_RESET, &hdev->flags))
2940 del_timer(&hdev->cmd_timer);
2941 else
2942 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002943 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 } else {
2945 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002946 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 }
2948 }
2949}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002950
2951int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2952{
2953 /* General inquiry access code (GIAC) */
2954 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2955 struct hci_cp_inquiry cp;
2956
2957 BT_DBG("%s", hdev->name);
2958
2959 if (test_bit(HCI_INQUIRY, &hdev->flags))
2960 return -EINPROGRESS;
2961
Johan Hedberg46632622012-01-02 16:06:08 +02002962 inquiry_cache_flush(hdev);
2963
Andre Guedes2519a1f2011-11-07 11:45:24 -03002964 memset(&cp, 0, sizeof(cp));
2965 memcpy(&cp.lap, lap, sizeof(cp.lap));
2966 cp.length = length;
2967
2968 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2969}
Andre Guedes023d50492011-11-04 14:16:52 -03002970
2971int hci_cancel_inquiry(struct hci_dev *hdev)
2972{
2973 BT_DBG("%s", hdev->name);
2974
2975 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002976 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002977
2978 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2979}
Andre Guedes31f79562012-04-24 21:02:53 -03002980
2981u8 bdaddr_to_le(u8 bdaddr_type)
2982{
2983 switch (bdaddr_type) {
2984 case BDADDR_LE_PUBLIC:
2985 return ADDR_LE_DEV_PUBLIC;
2986
2987 default:
2988 /* Fallback to LE Random address type */
2989 return ADDR_LE_DEV_RANDOM;
2990 }
2991}