blob: 5a3400d8a6e5c078e37c154daa2bdb376086fbb8 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200186 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void amp_init(struct hci_dev *hdev)
191{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200267 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
Marcel Holtmanna418b892008-11-30 12:17:28 +0100275 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900281/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200285 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200304
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
Andre Guedes6fbe1952012-02-03 17:47:58 -0300309 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300310 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300311 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200312 return true;
313
Andre Guedes6fbe1952012-02-03 17:47:58 -0300314 default:
315 return false;
316 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200317}
318
Johan Hedbergff9ef572012-01-04 14:23:45 +0200319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200330 break;
331 case DISCOVERY_STARTING:
332 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300333 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200334 mgmt_discovering(hdev, 1);
335 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200336 case DISCOVERY_RESOLVING:
337 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
Johan Hedberg30883512012-01-04 14:16:21 +0200347 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200348 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Johan Hedberg561aafb2012-01-04 13:31:59 +0200350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200352 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Johan Hedberg30883512012-01-04 14:16:21 +0200362 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct inquiry_entry *e;
364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300365 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200369 return e;
370 }
371
372 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300376 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200377{
Johan Hedberg30883512012-01-04 14:16:21 +0200378 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 struct inquiry_entry *e;
380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300381 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300392 bdaddr_t *bdaddr,
393 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
Johan Hedberga3d4e202012-01-09 00:53:02 +0200410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300411 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
Johan Hedberg31754052012-01-04 13:39:52 +0200429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300430 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Johan Hedberg30883512012-01-04 14:16:21 +0200432 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200433 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200437 if (ssp)
438 *ssp = data->ssp_mode;
439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200441 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
Johan Hedberga3d4e202012-01-09 00:53:02 +0200445 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300446 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
Johan Hedberg561aafb2012-01-04 13:31:59 +0200451 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200452 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200453
Johan Hedberg561aafb2012-01-04 13:31:59 +0200454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200457 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300470 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200494
495 if (copied >= num)
496 break;
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200506 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return -ENODEV;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300552 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Marcel Holtmann04837f62006-07-03 10:02:33 +0200554 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200571 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 err = -ENOMEM;
573 goto done;
574 }
575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300576 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300578 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300585 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900587 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
597/* ---- HCI ioctl helpers ---- */
598
599int hci_dev_open(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200604 hdev = hci_dev_get(dev);
605 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
Johan Hovold94324962012-03-15 14:48:41 +0100612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100633 set_bit(HCI_RAW, &hdev->flags);
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200643 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300656 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200657 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300658 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200659 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900660 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200662 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200663 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400664 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687static int hci_dev_do_close(struct hci_dev *hdev)
688{
689 BT_DBG("%s %p", hdev->name, hdev);
690
Andre Guedes28b75a82012-02-03 17:48:00 -0300691 cancel_work_sync(&hdev->le_scan);
692
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300693 cancel_delayed_work(&hdev->power_off);
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300699 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 hci_req_unlock(hdev);
701 return 0;
702 }
703
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400706 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200708 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200709 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200710 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200712 }
713
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200715 cancel_delayed_work(&hdev->service_cache);
716
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300722 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200732 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300749 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 /* Clear flags */
766 hdev->flags = 0;
767
Johan Hedberge59fda82012-02-22 18:11:53 +0200768 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775}
776
777int hci_dev_close(__u16 dev)
778{
779 struct hci_dev *hdev;
780 int err;
781
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200782 hdev = hci_dev_get(dev);
783 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_dev_reset(__u16 dev)
796{
797 struct hci_dev *hdev;
798 int ret = 0;
799
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200800 hdev = hci_dev_get(dev);
801 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return -ENODEV;
803
804 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300813 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300816 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900821 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
827done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831}
832
833int hci_dev_reset_stat(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838 hdev = hci_dev_get(dev);
839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847}
848
849int hci_dev_cmd(unsigned int cmd, void __user *arg)
850{
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300865 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300877 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (err)
879 break;
880 }
881
Marcel Holtmann04837f62006-07-03 10:02:33 +0200882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300883 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 break;
885
886 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200891 case HCISETLINKPOL:
892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300893 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200894 break;
895
896 case HCISETLINKMODE:
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
903 break;
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 break;
909
910 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 hci_dev_put(hdev);
921 return err;
922}
923
924int hci_get_dev_list(void __user *arg)
925{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200926 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200946 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200947 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200949 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200950
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 if (++n >= dev_num)
958 break;
959 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200960 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969}
970
971int hci_get_dev_info(void __user *arg)
972{
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 return -ENODEV;
983
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200985 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +0300995 if (lmp_bredr_capable(hdev)) {
996 di.acl_mtu = hdev->acl_mtu;
997 di.acl_pkts = hdev->acl_pkts;
998 di.sco_mtu = hdev->sco_mtu;
999 di.sco_pkts = hdev->sco_pkts;
1000 } else {
1001 di.acl_mtu = hdev->le_mtu;
1002 di.acl_pkts = hdev->le_pkts;
1003 di.sco_mtu = 0;
1004 di.sco_pkts = 0;
1005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 di.link_policy = hdev->link_policy;
1007 di.link_mode = hdev->link_mode;
1008
1009 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1010 memcpy(&di.features, &hdev->features, sizeof(di.features));
1011
1012 if (copy_to_user(arg, &di, sizeof(di)))
1013 err = -EFAULT;
1014
1015 hci_dev_put(hdev);
1016
1017 return err;
1018}
1019
1020/* ---- Interface to HCI drivers ---- */
1021
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001022static int hci_rfkill_set_block(void *data, bool blocked)
1023{
1024 struct hci_dev *hdev = data;
1025
1026 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1027
1028 if (!blocked)
1029 return 0;
1030
1031 hci_dev_do_close(hdev);
1032
1033 return 0;
1034}
1035
1036static const struct rfkill_ops hci_rfkill_ops = {
1037 .set_block = hci_rfkill_set_block,
1038};
1039
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001040static void hci_power_on(struct work_struct *work)
1041{
1042 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1043
1044 BT_DBG("%s", hdev->name);
1045
1046 if (hci_dev_open(hdev->id) < 0)
1047 return;
1048
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001049 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001050 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001051
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001052 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001053 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001054}
1055
1056static void hci_power_off(struct work_struct *work)
1057{
Johan Hedberg32435532011-11-07 22:16:04 +02001058 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001059 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001060
1061 BT_DBG("%s", hdev->name);
1062
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001063 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001064}
1065
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001066static void hci_discov_off(struct work_struct *work)
1067{
1068 struct hci_dev *hdev;
1069 u8 scan = SCAN_PAGE;
1070
1071 hdev = container_of(work, struct hci_dev, discov_off.work);
1072
1073 BT_DBG("%s", hdev->name);
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001076
1077 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1078
1079 hdev->discov_timeout = 0;
1080
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001081 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001082}
1083
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001084int hci_uuids_clear(struct hci_dev *hdev)
1085{
1086 struct list_head *p, *n;
1087
1088 list_for_each_safe(p, n, &hdev->uuids) {
1089 struct bt_uuid *uuid;
1090
1091 uuid = list_entry(p, struct bt_uuid, list);
1092
1093 list_del(p);
1094 kfree(uuid);
1095 }
1096
1097 return 0;
1098}
1099
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001100int hci_link_keys_clear(struct hci_dev *hdev)
1101{
1102 struct list_head *p, *n;
1103
1104 list_for_each_safe(p, n, &hdev->link_keys) {
1105 struct link_key *key;
1106
1107 key = list_entry(p, struct link_key, list);
1108
1109 list_del(p);
1110 kfree(key);
1111 }
1112
1113 return 0;
1114}
1115
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001116int hci_smp_ltks_clear(struct hci_dev *hdev)
1117{
1118 struct smp_ltk *k, *tmp;
1119
1120 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1121 list_del(&k->list);
1122 kfree(k);
1123 }
1124
1125 return 0;
1126}
1127
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1129{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001130 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001131
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001132 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001133 if (bacmp(bdaddr, &k->bdaddr) == 0)
1134 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001135
1136 return NULL;
1137}
1138
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301139static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001140 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001141{
1142 /* Legacy key */
1143 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301144 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001145
1146 /* Debug keys are insecure so don't store them persistently */
1147 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301148 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001149
1150 /* Changed combination key and there's no previous one */
1151 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301152 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001153
1154 /* Security mode 3 case */
1155 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301156 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001157
1158 /* Neither local nor remote side had no-bonding as requirement */
1159 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301160 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001161
1162 /* Local side had dedicated bonding as requirement */
1163 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301164 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001165
1166 /* Remote side had dedicated bonding as requirement */
1167 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301168 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001169
1170 /* If none of the above criteria match, then don't store the key
1171 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301172 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001173}
1174
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001175struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001176{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001177 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001178
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001179 list_for_each_entry(k, &hdev->long_term_keys, list) {
1180 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001182 continue;
1183
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001184 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001185 }
1186
1187 return NULL;
1188}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001189
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001190struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001191 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001192{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001193 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001194
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001195 list_for_each_entry(k, &hdev->long_term_keys, list)
1196 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001197 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001198 return k;
1199
1200 return NULL;
1201}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001202
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001204 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001205{
1206 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301207 u8 old_key_type;
1208 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001209
1210 old_key = hci_find_link_key(hdev, bdaddr);
1211 if (old_key) {
1212 old_key_type = old_key->type;
1213 key = old_key;
1214 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001215 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001216 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1217 if (!key)
1218 return -ENOMEM;
1219 list_add(&key->list, &hdev->link_keys);
1220 }
1221
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001222 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001223
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001224 /* Some buggy controller combinations generate a changed
1225 * combination key for legacy pairing even when there's no
1226 * previous key */
1227 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001228 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001230 if (conn)
1231 conn->key_type = type;
1232 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001233
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001234 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001235 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001236 key->pin_len = pin_len;
1237
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001238 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001239 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001240 else
1241 key->type = type;
1242
Johan Hedberg4df378a2011-04-28 11:29:03 -07001243 if (!new_key)
1244 return 0;
1245
1246 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1247
Johan Hedberg744cf192011-11-08 20:40:14 +02001248 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001249
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301250 if (conn)
1251 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001252
1253 return 0;
1254}
1255
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001257 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001258 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001259{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001260 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001261
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001262 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1263 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001265 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1266 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001267 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001268 else {
1269 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001270 if (!key)
1271 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001272 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273 }
1274
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 key->bdaddr_type = addr_type;
1277 memcpy(key->val, tk, sizeof(key->val));
1278 key->authenticated = authenticated;
1279 key->ediv = ediv;
1280 key->enc_size = enc_size;
1281 key->type = type;
1282 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001284 if (!new_key)
1285 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001286
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001287 if (type & HCI_SMP_LTK)
1288 mgmt_new_ltk(hdev, key, 1);
1289
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001290 return 0;
1291}
1292
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1294{
1295 struct link_key *key;
1296
1297 key = hci_find_link_key(hdev, bdaddr);
1298 if (!key)
1299 return -ENOENT;
1300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001302
1303 list_del(&key->list);
1304 kfree(key);
1305
1306 return 0;
1307}
1308
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001309int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct smp_ltk *k, *tmp;
1312
1313 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1314 if (bacmp(bdaddr, &k->bdaddr))
1315 continue;
1316
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001317 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001318
1319 list_del(&k->list);
1320 kfree(k);
1321 }
1322
1323 return 0;
1324}
1325
Ville Tervo6bd32322011-02-16 16:32:41 +02001326/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001327static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001328{
1329 struct hci_dev *hdev = (void *) arg;
1330
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001331 if (hdev->sent_cmd) {
1332 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1333 u16 opcode = __le16_to_cpu(sent->opcode);
1334
1335 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1336 } else {
1337 BT_ERR("%s command tx timeout", hdev->name);
1338 }
1339
Ville Tervo6bd32322011-02-16 16:32:41 +02001340 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001341 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001342}
1343
Szymon Janc2763eda2011-03-22 13:12:22 +01001344struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001345 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001346{
1347 struct oob_data *data;
1348
1349 list_for_each_entry(data, &hdev->remote_oob_data, list)
1350 if (bacmp(bdaddr, &data->bdaddr) == 0)
1351 return data;
1352
1353 return NULL;
1354}
1355
1356int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357{
1358 struct oob_data *data;
1359
1360 data = hci_find_remote_oob_data(hdev, bdaddr);
1361 if (!data)
1362 return -ENOENT;
1363
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001365
1366 list_del(&data->list);
1367 kfree(data);
1368
1369 return 0;
1370}
1371
1372int hci_remote_oob_data_clear(struct hci_dev *hdev)
1373{
1374 struct oob_data *data, *n;
1375
1376 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1377 list_del(&data->list);
1378 kfree(data);
1379 }
1380
1381 return 0;
1382}
1383
1384int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001385 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001386{
1387 struct oob_data *data;
1388
1389 data = hci_find_remote_oob_data(hdev, bdaddr);
1390
1391 if (!data) {
1392 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1393 if (!data)
1394 return -ENOMEM;
1395
1396 bacpy(&data->bdaddr, bdaddr);
1397 list_add(&data->list, &hdev->remote_oob_data);
1398 }
1399
1400 memcpy(data->hash, hash, sizeof(data->hash));
1401 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1402
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001403 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001404
1405 return 0;
1406}
1407
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001408struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001409{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001410 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001411
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001412 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001413 if (bacmp(bdaddr, &b->bdaddr) == 0)
1414 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001415
1416 return NULL;
1417}
1418
1419int hci_blacklist_clear(struct hci_dev *hdev)
1420{
1421 struct list_head *p, *n;
1422
1423 list_for_each_safe(p, n, &hdev->blacklist) {
1424 struct bdaddr_list *b;
1425
1426 b = list_entry(p, struct bdaddr_list, list);
1427
1428 list_del(p);
1429 kfree(b);
1430 }
1431
1432 return 0;
1433}
1434
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001435int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001436{
1437 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001438
1439 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1440 return -EBADF;
1441
Antti Julku5e762442011-08-25 16:48:02 +03001442 if (hci_blacklist_lookup(hdev, bdaddr))
1443 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001444
1445 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001446 if (!entry)
1447 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448
1449 bacpy(&entry->bdaddr, bdaddr);
1450
1451 list_add(&entry->list, &hdev->blacklist);
1452
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001453 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001454}
1455
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001456int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457{
1458 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001459
Szymon Janc1ec918c2011-11-16 09:32:21 +01001460 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001461 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462
1463 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001464 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001465 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466
1467 list_del(&entry->list);
1468 kfree(entry);
1469
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001470 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471}
1472
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001473static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1474{
1475 struct le_scan_params *param = (struct le_scan_params *) opt;
1476 struct hci_cp_le_set_scan_param cp;
1477
1478 memset(&cp, 0, sizeof(cp));
1479 cp.type = param->type;
1480 cp.interval = cpu_to_le16(param->interval);
1481 cp.window = cpu_to_le16(param->window);
1482
1483 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1484}
1485
1486static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1487{
1488 struct hci_cp_le_set_scan_enable cp;
1489
1490 memset(&cp, 0, sizeof(cp));
1491 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001492 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001493
1494 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1495}
1496
1497static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001498 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001499{
1500 long timeo = msecs_to_jiffies(3000);
1501 struct le_scan_params param;
1502 int err;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1507 return -EINPROGRESS;
1508
1509 param.type = type;
1510 param.interval = interval;
1511 param.window = window;
1512
1513 hci_req_lock(hdev);
1514
1515 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001516 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001517 if (!err)
1518 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1519
1520 hci_req_unlock(hdev);
1521
1522 if (err < 0)
1523 return err;
1524
1525 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001526 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001527
1528 return 0;
1529}
1530
Andre Guedes7dbfac12012-03-15 16:52:07 -03001531int hci_cancel_le_scan(struct hci_dev *hdev)
1532{
1533 BT_DBG("%s", hdev->name);
1534
1535 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1536 return -EALREADY;
1537
1538 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 /* Send HCI command to disable LE Scan */
1542 memset(&cp, 0, sizeof(cp));
1543 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1544 }
1545
1546 return 0;
1547}
1548
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001549static void le_scan_disable_work(struct work_struct *work)
1550{
1551 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001552 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001553 struct hci_cp_le_set_scan_enable cp;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 memset(&cp, 0, sizeof(cp));
1558
1559 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1560}
1561
Andre Guedes28b75a82012-02-03 17:48:00 -03001562static void le_scan_work(struct work_struct *work)
1563{
1564 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1565 struct le_scan_params *param = &hdev->le_scan_params;
1566
1567 BT_DBG("%s", hdev->name);
1568
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001569 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1570 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001571}
1572
1573int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001574 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001575{
1576 struct le_scan_params *param = &hdev->le_scan_params;
1577
1578 BT_DBG("%s", hdev->name);
1579
1580 if (work_busy(&hdev->le_scan))
1581 return -EINPROGRESS;
1582
1583 param->type = type;
1584 param->interval = interval;
1585 param->window = window;
1586 param->timeout = timeout;
1587
1588 queue_work(system_long_wq, &hdev->le_scan);
1589
1590 return 0;
1591}
1592
David Herrmann9be0dab2012-04-22 14:39:57 +02001593/* Alloc HCI device */
1594struct hci_dev *hci_alloc_dev(void)
1595{
1596 struct hci_dev *hdev;
1597
1598 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1599 if (!hdev)
1600 return NULL;
1601
David Herrmannb1b813d2012-04-22 14:39:58 +02001602 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1603 hdev->esco_type = (ESCO_HV1);
1604 hdev->link_mode = (HCI_LM_ACCEPT);
1605 hdev->io_capability = 0x03; /* No Input No Output */
1606
David Herrmannb1b813d2012-04-22 14:39:58 +02001607 hdev->sniff_max_interval = 800;
1608 hdev->sniff_min_interval = 80;
1609
1610 mutex_init(&hdev->lock);
1611 mutex_init(&hdev->req_lock);
1612
1613 INIT_LIST_HEAD(&hdev->mgmt_pending);
1614 INIT_LIST_HEAD(&hdev->blacklist);
1615 INIT_LIST_HEAD(&hdev->uuids);
1616 INIT_LIST_HEAD(&hdev->link_keys);
1617 INIT_LIST_HEAD(&hdev->long_term_keys);
1618 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001619 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001620
1621 INIT_WORK(&hdev->rx_work, hci_rx_work);
1622 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1623 INIT_WORK(&hdev->tx_work, hci_tx_work);
1624 INIT_WORK(&hdev->power_on, hci_power_on);
1625 INIT_WORK(&hdev->le_scan, le_scan_work);
1626
David Herrmannb1b813d2012-04-22 14:39:58 +02001627 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1628 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1629 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1630
David Herrmann9be0dab2012-04-22 14:39:57 +02001631 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001632 skb_queue_head_init(&hdev->rx_q);
1633 skb_queue_head_init(&hdev->cmd_q);
1634 skb_queue_head_init(&hdev->raw_q);
1635
1636 init_waitqueue_head(&hdev->req_wait_q);
1637
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001638 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001639
David Herrmannb1b813d2012-04-22 14:39:58 +02001640 hci_init_sysfs(hdev);
1641 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001642
1643 return hdev;
1644}
1645EXPORT_SYMBOL(hci_alloc_dev);
1646
1647/* Free HCI device */
1648void hci_free_dev(struct hci_dev *hdev)
1649{
1650 skb_queue_purge(&hdev->driver_init);
1651
1652 /* will free via device release */
1653 put_device(&hdev->dev);
1654}
1655EXPORT_SYMBOL(hci_free_dev);
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657/* Register HCI device */
1658int hci_register_dev(struct hci_dev *hdev)
1659{
David Herrmannb1b813d2012-04-22 14:39:58 +02001660 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661
David Herrmann010666a2012-01-07 15:47:07 +01001662 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663 return -EINVAL;
1664
Mat Martineau08add512011-11-02 16:18:36 -07001665 /* Do not allow HCI_AMP devices to register at index 0,
1666 * so the index can be used as the AMP controller ID.
1667 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001668 switch (hdev->dev_type) {
1669 case HCI_BREDR:
1670 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1671 break;
1672 case HCI_AMP:
1673 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1674 break;
1675 default:
1676 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001677 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001678
Sasha Levin3df92b32012-05-27 22:36:56 +02001679 if (id < 0)
1680 return id;
1681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682 sprintf(hdev->name, "hci%d", id);
1683 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001684
1685 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1686
Sasha Levin3df92b32012-05-27 22:36:56 +02001687 write_lock(&hci_dev_list_lock);
1688 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001689 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001690
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001691 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001692 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001693 if (!hdev->workqueue) {
1694 error = -ENOMEM;
1695 goto err;
1696 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001697
David Herrmann33ca9542011-10-08 14:58:49 +02001698 error = hci_add_sysfs(hdev);
1699 if (error < 0)
1700 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001702 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001703 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1704 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001705 if (hdev->rfkill) {
1706 if (rfkill_register(hdev->rfkill) < 0) {
1707 rfkill_destroy(hdev->rfkill);
1708 hdev->rfkill = NULL;
1709 }
1710 }
1711
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001712 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001713
1714 if (hdev->dev_type != HCI_AMP)
1715 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1716
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001717 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001718
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001720 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721
1722 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001723
David Herrmann33ca9542011-10-08 14:58:49 +02001724err_wqueue:
1725 destroy_workqueue(hdev->workqueue);
1726err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001727 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001728 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001729 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001730 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001731
David Herrmann33ca9542011-10-08 14:58:49 +02001732 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733}
1734EXPORT_SYMBOL(hci_register_dev);
1735
1736/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001737void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738{
Sasha Levin3df92b32012-05-27 22:36:56 +02001739 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001740
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001741 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742
Johan Hovold94324962012-03-15 14:48:41 +01001743 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1744
Sasha Levin3df92b32012-05-27 22:36:56 +02001745 id = hdev->id;
1746
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001747 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001749 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 hci_dev_do_close(hdev);
1752
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301753 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001754 kfree_skb(hdev->reassembly[i]);
1755
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001756 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001757 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001758 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001759 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001760 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001761 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001762
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001763 /* mgmt_index_removed should take care of emptying the
1764 * pending list */
1765 BUG_ON(!list_empty(&hdev->mgmt_pending));
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767 hci_notify(hdev, HCI_DEV_UNREG);
1768
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001769 if (hdev->rfkill) {
1770 rfkill_unregister(hdev->rfkill);
1771 rfkill_destroy(hdev->rfkill);
1772 }
1773
David Herrmannce242972011-10-08 14:58:48 +02001774 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001775
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001776 destroy_workqueue(hdev->workqueue);
1777
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001778 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001779 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001780 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001781 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001782 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001783 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001784 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001785
David Herrmanndc946bd2012-01-07 15:47:24 +01001786 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001787
1788 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789}
1790EXPORT_SYMBOL(hci_unregister_dev);
1791
1792/* Suspend HCI device */
1793int hci_suspend_dev(struct hci_dev *hdev)
1794{
1795 hci_notify(hdev, HCI_DEV_SUSPEND);
1796 return 0;
1797}
1798EXPORT_SYMBOL(hci_suspend_dev);
1799
1800/* Resume HCI device */
1801int hci_resume_dev(struct hci_dev *hdev)
1802{
1803 hci_notify(hdev, HCI_DEV_RESUME);
1804 return 0;
1805}
1806EXPORT_SYMBOL(hci_resume_dev);
1807
Marcel Holtmann76bca882009-11-18 00:40:39 +01001808/* Receive frame from HCI drivers */
1809int hci_recv_frame(struct sk_buff *skb)
1810{
1811 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1812 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001813 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001814 kfree_skb(skb);
1815 return -ENXIO;
1816 }
1817
1818 /* Incomming skb */
1819 bt_cb(skb)->incoming = 1;
1820
1821 /* Time stamp */
1822 __net_timestamp(skb);
1823
Marcel Holtmann76bca882009-11-18 00:40:39 +01001824 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001825 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001826
Marcel Holtmann76bca882009-11-18 00:40:39 +01001827 return 0;
1828}
1829EXPORT_SYMBOL(hci_recv_frame);
1830
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301831static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001832 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301833{
1834 int len = 0;
1835 int hlen = 0;
1836 int remain = count;
1837 struct sk_buff *skb;
1838 struct bt_skb_cb *scb;
1839
1840 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001841 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301842 return -EILSEQ;
1843
1844 skb = hdev->reassembly[index];
1845
1846 if (!skb) {
1847 switch (type) {
1848 case HCI_ACLDATA_PKT:
1849 len = HCI_MAX_FRAME_SIZE;
1850 hlen = HCI_ACL_HDR_SIZE;
1851 break;
1852 case HCI_EVENT_PKT:
1853 len = HCI_MAX_EVENT_SIZE;
1854 hlen = HCI_EVENT_HDR_SIZE;
1855 break;
1856 case HCI_SCODATA_PKT:
1857 len = HCI_MAX_SCO_SIZE;
1858 hlen = HCI_SCO_HDR_SIZE;
1859 break;
1860 }
1861
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001862 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301863 if (!skb)
1864 return -ENOMEM;
1865
1866 scb = (void *) skb->cb;
1867 scb->expect = hlen;
1868 scb->pkt_type = type;
1869
1870 skb->dev = (void *) hdev;
1871 hdev->reassembly[index] = skb;
1872 }
1873
1874 while (count) {
1875 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001876 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301877
1878 memcpy(skb_put(skb, len), data, len);
1879
1880 count -= len;
1881 data += len;
1882 scb->expect -= len;
1883 remain = count;
1884
1885 switch (type) {
1886 case HCI_EVENT_PKT:
1887 if (skb->len == HCI_EVENT_HDR_SIZE) {
1888 struct hci_event_hdr *h = hci_event_hdr(skb);
1889 scb->expect = h->plen;
1890
1891 if (skb_tailroom(skb) < scb->expect) {
1892 kfree_skb(skb);
1893 hdev->reassembly[index] = NULL;
1894 return -ENOMEM;
1895 }
1896 }
1897 break;
1898
1899 case HCI_ACLDATA_PKT:
1900 if (skb->len == HCI_ACL_HDR_SIZE) {
1901 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1902 scb->expect = __le16_to_cpu(h->dlen);
1903
1904 if (skb_tailroom(skb) < scb->expect) {
1905 kfree_skb(skb);
1906 hdev->reassembly[index] = NULL;
1907 return -ENOMEM;
1908 }
1909 }
1910 break;
1911
1912 case HCI_SCODATA_PKT:
1913 if (skb->len == HCI_SCO_HDR_SIZE) {
1914 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1915 scb->expect = h->dlen;
1916
1917 if (skb_tailroom(skb) < scb->expect) {
1918 kfree_skb(skb);
1919 hdev->reassembly[index] = NULL;
1920 return -ENOMEM;
1921 }
1922 }
1923 break;
1924 }
1925
1926 if (scb->expect == 0) {
1927 /* Complete frame */
1928
1929 bt_cb(skb)->pkt_type = type;
1930 hci_recv_frame(skb);
1931
1932 hdev->reassembly[index] = NULL;
1933 return remain;
1934 }
1935 }
1936
1937 return remain;
1938}
1939
Marcel Holtmannef222012007-07-11 06:42:04 +02001940int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1941{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301942 int rem = 0;
1943
Marcel Holtmannef222012007-07-11 06:42:04 +02001944 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1945 return -EILSEQ;
1946
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001947 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001948 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301949 if (rem < 0)
1950 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001951
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301952 data += (count - rem);
1953 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001954 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001955
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301956 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001957}
1958EXPORT_SYMBOL(hci_recv_fragment);
1959
Suraj Sumangala99811512010-07-14 13:02:19 +05301960#define STREAM_REASSEMBLY 0
1961
1962int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1963{
1964 int type;
1965 int rem = 0;
1966
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001967 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301968 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1969
1970 if (!skb) {
1971 struct { char type; } *pkt;
1972
1973 /* Start of the frame */
1974 pkt = data;
1975 type = pkt->type;
1976
1977 data++;
1978 count--;
1979 } else
1980 type = bt_cb(skb)->pkt_type;
1981
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001982 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001983 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301984 if (rem < 0)
1985 return rem;
1986
1987 data += (count - rem);
1988 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001989 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301990
1991 return rem;
1992}
1993EXPORT_SYMBOL(hci_recv_stream_fragment);
1994
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995/* ---- Interface to upper protocols ---- */
1996
Linus Torvalds1da177e2005-04-16 15:20:36 -07001997int hci_register_cb(struct hci_cb *cb)
1998{
1999 BT_DBG("%p name %s", cb, cb->name);
2000
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002001 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002003 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005 return 0;
2006}
2007EXPORT_SYMBOL(hci_register_cb);
2008
2009int hci_unregister_cb(struct hci_cb *cb)
2010{
2011 BT_DBG("%p name %s", cb, cb->name);
2012
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002013 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002015 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016
2017 return 0;
2018}
2019EXPORT_SYMBOL(hci_unregister_cb);
2020
2021static int hci_send_frame(struct sk_buff *skb)
2022{
2023 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2024
2025 if (!hdev) {
2026 kfree_skb(skb);
2027 return -ENODEV;
2028 }
2029
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002030 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002031
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002032 /* Time stamp */
2033 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002035 /* Send copy to monitor */
2036 hci_send_to_monitor(hdev, skb);
2037
2038 if (atomic_read(&hdev->promisc)) {
2039 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002040 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041 }
2042
2043 /* Get rid of skb owner, prior to sending to the driver. */
2044 skb_orphan(skb);
2045
2046 return hdev->send(skb);
2047}
2048
2049/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002050int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051{
2052 int len = HCI_COMMAND_HDR_SIZE + plen;
2053 struct hci_command_hdr *hdr;
2054 struct sk_buff *skb;
2055
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002056 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057
2058 skb = bt_skb_alloc(len, GFP_ATOMIC);
2059 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002060 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002061 return -ENOMEM;
2062 }
2063
2064 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002065 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002066 hdr->plen = plen;
2067
2068 if (plen)
2069 memcpy(skb_put(skb, plen), param, plen);
2070
2071 BT_DBG("skb len %d", skb->len);
2072
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002073 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002075
Johan Hedberga5040ef2011-01-10 13:28:59 +02002076 if (test_bit(HCI_INIT, &hdev->flags))
2077 hdev->init_last_cmd = opcode;
2078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002080 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 return 0;
2083}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002086void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087{
2088 struct hci_command_hdr *hdr;
2089
2090 if (!hdev->sent_cmd)
2091 return NULL;
2092
2093 hdr = (void *) hdev->sent_cmd->data;
2094
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002095 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096 return NULL;
2097
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002098 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099
2100 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2101}
2102
2103/* Send ACL data */
2104static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2105{
2106 struct hci_acl_hdr *hdr;
2107 int len = skb->len;
2108
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002109 skb_push(skb, HCI_ACL_HDR_SIZE);
2110 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002111 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002112 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2113 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114}
2115
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002116static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002117 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002119 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 struct hci_dev *hdev = conn->hdev;
2121 struct sk_buff *list;
2122
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002123 skb->len = skb_headlen(skb);
2124 skb->data_len = 0;
2125
2126 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002127
2128 switch (hdev->dev_type) {
2129 case HCI_BREDR:
2130 hci_add_acl_hdr(skb, conn->handle, flags);
2131 break;
2132 case HCI_AMP:
2133 hci_add_acl_hdr(skb, chan->handle, flags);
2134 break;
2135 default:
2136 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2137 return;
2138 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002139
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002140 list = skb_shinfo(skb)->frag_list;
2141 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142 /* Non fragmented */
2143 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2144
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002145 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146 } else {
2147 /* Fragmented */
2148 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2149
2150 skb_shinfo(skb)->frag_list = NULL;
2151
2152 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002153 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002155 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002156
2157 flags &= ~ACL_START;
2158 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 do {
2160 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002161
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002163 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002164 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165
2166 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2167
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002168 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002169 } while (list);
2170
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002171 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002173}
2174
2175void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2176{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002177 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002178
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002179 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002180
2181 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002182
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002183 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002184
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002185 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
2188/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002189void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190{
2191 struct hci_dev *hdev = conn->hdev;
2192 struct hci_sco_hdr hdr;
2193
2194 BT_DBG("%s len %d", hdev->name, skb->len);
2195
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002196 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002197 hdr.dlen = skb->len;
2198
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002199 skb_push(skb, HCI_SCO_HDR_SIZE);
2200 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002201 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002202
2203 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002204 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002205
Linus Torvalds1da177e2005-04-16 15:20:36 -07002206 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002207 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002208}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209
2210/* ---- HCI TX task (outgoing data) ---- */
2211
2212/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002213static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2214 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002215{
2216 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002217 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002218 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002220 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002221 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002222
2223 rcu_read_lock();
2224
2225 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002226 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002228
2229 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2230 continue;
2231
Linus Torvalds1da177e2005-04-16 15:20:36 -07002232 num++;
2233
2234 if (c->sent < min) {
2235 min = c->sent;
2236 conn = c;
2237 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002238
2239 if (hci_conn_num(hdev, type) == num)
2240 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 }
2242
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002243 rcu_read_unlock();
2244
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002246 int cnt, q;
2247
2248 switch (conn->type) {
2249 case ACL_LINK:
2250 cnt = hdev->acl_cnt;
2251 break;
2252 case SCO_LINK:
2253 case ESCO_LINK:
2254 cnt = hdev->sco_cnt;
2255 break;
2256 case LE_LINK:
2257 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2258 break;
2259 default:
2260 cnt = 0;
2261 BT_ERR("Unknown link type");
2262 }
2263
2264 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002265 *quote = q ? q : 1;
2266 } else
2267 *quote = 0;
2268
2269 BT_DBG("conn %p quote %d", conn, *quote);
2270 return conn;
2271}
2272
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002273static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002274{
2275 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002276 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277
Ville Tervobae1f5d92011-02-10 22:38:53 -03002278 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002279
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002280 rcu_read_lock();
2281
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002283 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002284 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002285 BT_ERR("%s killing stalled connection %pMR",
2286 hdev->name, &c->dst);
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002287 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002288 }
2289 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002290
2291 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002292}
2293
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002294static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2295 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002296{
2297 struct hci_conn_hash *h = &hdev->conn_hash;
2298 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002299 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002300 struct hci_conn *conn;
2301 int cnt, q, conn_num = 0;
2302
2303 BT_DBG("%s", hdev->name);
2304
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002305 rcu_read_lock();
2306
2307 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002308 struct hci_chan *tmp;
2309
2310 if (conn->type != type)
2311 continue;
2312
2313 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2314 continue;
2315
2316 conn_num++;
2317
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002318 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002319 struct sk_buff *skb;
2320
2321 if (skb_queue_empty(&tmp->data_q))
2322 continue;
2323
2324 skb = skb_peek(&tmp->data_q);
2325 if (skb->priority < cur_prio)
2326 continue;
2327
2328 if (skb->priority > cur_prio) {
2329 num = 0;
2330 min = ~0;
2331 cur_prio = skb->priority;
2332 }
2333
2334 num++;
2335
2336 if (conn->sent < min) {
2337 min = conn->sent;
2338 chan = tmp;
2339 }
2340 }
2341
2342 if (hci_conn_num(hdev, type) == conn_num)
2343 break;
2344 }
2345
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002346 rcu_read_unlock();
2347
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002348 if (!chan)
2349 return NULL;
2350
2351 switch (chan->conn->type) {
2352 case ACL_LINK:
2353 cnt = hdev->acl_cnt;
2354 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002355 case AMP_LINK:
2356 cnt = hdev->block_cnt;
2357 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002358 case SCO_LINK:
2359 case ESCO_LINK:
2360 cnt = hdev->sco_cnt;
2361 break;
2362 case LE_LINK:
2363 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2364 break;
2365 default:
2366 cnt = 0;
2367 BT_ERR("Unknown link type");
2368 }
2369
2370 q = cnt / num;
2371 *quote = q ? q : 1;
2372 BT_DBG("chan %p quote %d", chan, *quote);
2373 return chan;
2374}
2375
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002376static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2377{
2378 struct hci_conn_hash *h = &hdev->conn_hash;
2379 struct hci_conn *conn;
2380 int num = 0;
2381
2382 BT_DBG("%s", hdev->name);
2383
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002384 rcu_read_lock();
2385
2386 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002387 struct hci_chan *chan;
2388
2389 if (conn->type != type)
2390 continue;
2391
2392 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2393 continue;
2394
2395 num++;
2396
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002397 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002398 struct sk_buff *skb;
2399
2400 if (chan->sent) {
2401 chan->sent = 0;
2402 continue;
2403 }
2404
2405 if (skb_queue_empty(&chan->data_q))
2406 continue;
2407
2408 skb = skb_peek(&chan->data_q);
2409 if (skb->priority >= HCI_PRIO_MAX - 1)
2410 continue;
2411
2412 skb->priority = HCI_PRIO_MAX - 1;
2413
2414 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002415 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002416 }
2417
2418 if (hci_conn_num(hdev, type) == num)
2419 break;
2420 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002421
2422 rcu_read_unlock();
2423
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002424}
2425
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002426static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2427{
2428 /* Calculate count of blocks used by this packet */
2429 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2430}
2431
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002432static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002433{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 if (!test_bit(HCI_RAW, &hdev->flags)) {
2435 /* ACL tx timeout must be longer than maximum
2436 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002437 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002438 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002439 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002440 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002441}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002442
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002443static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002444{
2445 unsigned int cnt = hdev->acl_cnt;
2446 struct hci_chan *chan;
2447 struct sk_buff *skb;
2448 int quote;
2449
2450 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002451
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002452 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002453 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002454 u32 priority = (skb_peek(&chan->data_q))->priority;
2455 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002456 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002457 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002458
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002459 /* Stop if priority has changed */
2460 if (skb->priority < priority)
2461 break;
2462
2463 skb = skb_dequeue(&chan->data_q);
2464
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002465 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002466 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002467
Linus Torvalds1da177e2005-04-16 15:20:36 -07002468 hci_send_frame(skb);
2469 hdev->acl_last_tx = jiffies;
2470
2471 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002472 chan->sent++;
2473 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002474 }
2475 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002476
2477 if (cnt != hdev->acl_cnt)
2478 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002479}
2480
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002481static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002482{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002483 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002484 struct hci_chan *chan;
2485 struct sk_buff *skb;
2486 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002487 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002488
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002489 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002490
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002491 BT_DBG("%s", hdev->name);
2492
2493 if (hdev->dev_type == HCI_AMP)
2494 type = AMP_LINK;
2495 else
2496 type = ACL_LINK;
2497
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002498 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002499 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002500 u32 priority = (skb_peek(&chan->data_q))->priority;
2501 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2502 int blocks;
2503
2504 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002505 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002506
2507 /* Stop if priority has changed */
2508 if (skb->priority < priority)
2509 break;
2510
2511 skb = skb_dequeue(&chan->data_q);
2512
2513 blocks = __get_blocks(hdev, skb);
2514 if (blocks > hdev->block_cnt)
2515 return;
2516
2517 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002518 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002519
2520 hci_send_frame(skb);
2521 hdev->acl_last_tx = jiffies;
2522
2523 hdev->block_cnt -= blocks;
2524 quote -= blocks;
2525
2526 chan->sent += blocks;
2527 chan->conn->sent += blocks;
2528 }
2529 }
2530
2531 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002532 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002533}
2534
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002535static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002536{
2537 BT_DBG("%s", hdev->name);
2538
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002539 /* No ACL link over BR/EDR controller */
2540 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2541 return;
2542
2543 /* No AMP link over AMP controller */
2544 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002545 return;
2546
2547 switch (hdev->flow_ctl_mode) {
2548 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2549 hci_sched_acl_pkt(hdev);
2550 break;
2551
2552 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2553 hci_sched_acl_blk(hdev);
2554 break;
2555 }
2556}
2557
Linus Torvalds1da177e2005-04-16 15:20:36 -07002558/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002559static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002560{
2561 struct hci_conn *conn;
2562 struct sk_buff *skb;
2563 int quote;
2564
2565 BT_DBG("%s", hdev->name);
2566
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002567 if (!hci_conn_num(hdev, SCO_LINK))
2568 return;
2569
Linus Torvalds1da177e2005-04-16 15:20:36 -07002570 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2571 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2572 BT_DBG("skb %p len %d", skb, skb->len);
2573 hci_send_frame(skb);
2574
2575 conn->sent++;
2576 if (conn->sent == ~0)
2577 conn->sent = 0;
2578 }
2579 }
2580}
2581
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002582static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002583{
2584 struct hci_conn *conn;
2585 struct sk_buff *skb;
2586 int quote;
2587
2588 BT_DBG("%s", hdev->name);
2589
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002590 if (!hci_conn_num(hdev, ESCO_LINK))
2591 return;
2592
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002593 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2594 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002595 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2596 BT_DBG("skb %p len %d", skb, skb->len);
2597 hci_send_frame(skb);
2598
2599 conn->sent++;
2600 if (conn->sent == ~0)
2601 conn->sent = 0;
2602 }
2603 }
2604}
2605
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002606static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002607{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002608 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002609 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002610 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002611
2612 BT_DBG("%s", hdev->name);
2613
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002614 if (!hci_conn_num(hdev, LE_LINK))
2615 return;
2616
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002617 if (!test_bit(HCI_RAW, &hdev->flags)) {
2618 /* LE tx timeout must be longer than maximum
2619 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002620 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002621 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002622 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002623 }
2624
2625 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002626 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002627 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002628 u32 priority = (skb_peek(&chan->data_q))->priority;
2629 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002631 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002632
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002633 /* Stop if priority has changed */
2634 if (skb->priority < priority)
2635 break;
2636
2637 skb = skb_dequeue(&chan->data_q);
2638
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002639 hci_send_frame(skb);
2640 hdev->le_last_tx = jiffies;
2641
2642 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002643 chan->sent++;
2644 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002645 }
2646 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002647
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648 if (hdev->le_pkts)
2649 hdev->le_cnt = cnt;
2650 else
2651 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002652
2653 if (cnt != tmp)
2654 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002655}
2656
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002657static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002658{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002659 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 struct sk_buff *skb;
2661
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002662 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002663 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002664
2665 /* Schedule queues and send stuff to HCI driver */
2666
2667 hci_sched_acl(hdev);
2668
2669 hci_sched_sco(hdev);
2670
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002671 hci_sched_esco(hdev);
2672
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002673 hci_sched_le(hdev);
2674
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675 /* Send next queued raw (unknown type) packet */
2676 while ((skb = skb_dequeue(&hdev->raw_q)))
2677 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678}
2679
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002680/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681
2682/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002683static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684{
2685 struct hci_acl_hdr *hdr = (void *) skb->data;
2686 struct hci_conn *conn;
2687 __u16 handle, flags;
2688
2689 skb_pull(skb, HCI_ACL_HDR_SIZE);
2690
2691 handle = __le16_to_cpu(hdr->handle);
2692 flags = hci_flags(handle);
2693 handle = hci_handle(handle);
2694
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002695 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002696 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
2698 hdev->stat.acl_rx++;
2699
2700 hci_dev_lock(hdev);
2701 conn = hci_conn_hash_lookup_handle(hdev, handle);
2702 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002703
Linus Torvalds1da177e2005-04-16 15:20:36 -07002704 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002705 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002706
Johan Hedberg671267b2012-05-12 16:11:50 -03002707 hci_dev_lock(hdev);
2708 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2709 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2710 mgmt_device_connected(hdev, &conn->dst, conn->type,
2711 conn->dst_type, 0, NULL, 0,
2712 conn->dev_class);
2713 hci_dev_unlock(hdev);
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002716 l2cap_recv_acldata(conn, skb, flags);
2717 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002719 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002720 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 }
2722
2723 kfree_skb(skb);
2724}
2725
2726/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002727static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002728{
2729 struct hci_sco_hdr *hdr = (void *) skb->data;
2730 struct hci_conn *conn;
2731 __u16 handle;
2732
2733 skb_pull(skb, HCI_SCO_HDR_SIZE);
2734
2735 handle = __le16_to_cpu(hdr->handle);
2736
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002737 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002738
2739 hdev->stat.sco_rx++;
2740
2741 hci_dev_lock(hdev);
2742 conn = hci_conn_hash_lookup_handle(hdev, handle);
2743 hci_dev_unlock(hdev);
2744
2745 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002747 sco_recv_scodata(conn, skb);
2748 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002750 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002751 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 }
2753
2754 kfree_skb(skb);
2755}
2756
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002757static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002759 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760 struct sk_buff *skb;
2761
2762 BT_DBG("%s", hdev->name);
2763
Linus Torvalds1da177e2005-04-16 15:20:36 -07002764 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002765 /* Send copy to monitor */
2766 hci_send_to_monitor(hdev, skb);
2767
Linus Torvalds1da177e2005-04-16 15:20:36 -07002768 if (atomic_read(&hdev->promisc)) {
2769 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002770 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 }
2772
2773 if (test_bit(HCI_RAW, &hdev->flags)) {
2774 kfree_skb(skb);
2775 continue;
2776 }
2777
2778 if (test_bit(HCI_INIT, &hdev->flags)) {
2779 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002780 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781 case HCI_ACLDATA_PKT:
2782 case HCI_SCODATA_PKT:
2783 kfree_skb(skb);
2784 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002785 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786 }
2787
2788 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002789 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002790 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002791 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 hci_event_packet(hdev, skb);
2793 break;
2794
2795 case HCI_ACLDATA_PKT:
2796 BT_DBG("%s ACL data packet", hdev->name);
2797 hci_acldata_packet(hdev, skb);
2798 break;
2799
2800 case HCI_SCODATA_PKT:
2801 BT_DBG("%s SCO data packet", hdev->name);
2802 hci_scodata_packet(hdev, skb);
2803 break;
2804
2805 default:
2806 kfree_skb(skb);
2807 break;
2808 }
2809 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002810}
2811
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002812static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002814 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002815 struct sk_buff *skb;
2816
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002817 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2818 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002821 if (atomic_read(&hdev->cmd_cnt)) {
2822 skb = skb_dequeue(&hdev->cmd_q);
2823 if (!skb)
2824 return;
2825
Wei Yongjun7585b972009-02-25 18:29:52 +08002826 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002828 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2829 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 atomic_dec(&hdev->cmd_cnt);
2831 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002832 if (test_bit(HCI_RESET, &hdev->flags))
2833 del_timer(&hdev->cmd_timer);
2834 else
2835 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002836 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 } else {
2838 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002839 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 }
2841 }
2842}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002843
2844int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2845{
2846 /* General inquiry access code (GIAC) */
2847 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2848 struct hci_cp_inquiry cp;
2849
2850 BT_DBG("%s", hdev->name);
2851
2852 if (test_bit(HCI_INQUIRY, &hdev->flags))
2853 return -EINPROGRESS;
2854
Johan Hedberg46632622012-01-02 16:06:08 +02002855 inquiry_cache_flush(hdev);
2856
Andre Guedes2519a1f2011-11-07 11:45:24 -03002857 memset(&cp, 0, sizeof(cp));
2858 memcpy(&cp.lap, lap, sizeof(cp.lap));
2859 cp.length = length;
2860
2861 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2862}
Andre Guedes023d50492011-11-04 14:16:52 -03002863
2864int hci_cancel_inquiry(struct hci_dev *hdev)
2865{
2866 BT_DBG("%s", hdev->name);
2867
2868 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002869 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002870
2871 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2872}
Andre Guedes31f79562012-04-24 21:02:53 -03002873
2874u8 bdaddr_to_le(u8 bdaddr_type)
2875{
2876 switch (bdaddr_type) {
2877 case BDADDR_LE_PUBLIC:
2878 return ADDR_LE_DEV_PUBLIC;
2879
2880 default:
2881 /* Fallback to LE Random address type */
2882 return ADDR_LE_DEV_RANDOM;
2883 }
2884}