blob: 515d0c394f35e29820801444e3a9d4510e549f2d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200186 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void amp_init(struct hci_dev *hdev)
191{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200267 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
Marcel Holtmanna418b892008-11-30 12:17:28 +0100275 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900281/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200285 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200304
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
Andre Guedes6fbe1952012-02-03 17:47:58 -0300309 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300310 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300311 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200312 return true;
313
Andre Guedes6fbe1952012-02-03 17:47:58 -0300314 default:
315 return false;
316 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200317}
318
Johan Hedbergff9ef572012-01-04 14:23:45 +0200319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200330 break;
331 case DISCOVERY_STARTING:
332 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300333 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200334 mgmt_discovering(hdev, 1);
335 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200336 case DISCOVERY_RESOLVING:
337 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
Johan Hedberg30883512012-01-04 14:16:21 +0200347 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200348 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Johan Hedberg561aafb2012-01-04 13:31:59 +0200350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200352 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Johan Hedberg30883512012-01-04 14:16:21 +0200362 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct inquiry_entry *e;
364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300365 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200369 return e;
370 }
371
372 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300376 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200377{
Johan Hedberg30883512012-01-04 14:16:21 +0200378 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 struct inquiry_entry *e;
380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300381 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300392 bdaddr_t *bdaddr,
393 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
Johan Hedberga3d4e202012-01-09 00:53:02 +0200410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300411 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
Johan Hedberg31754052012-01-04 13:39:52 +0200429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300430 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Johan Hedberg30883512012-01-04 14:16:21 +0200432 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200433 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200437 if (ssp)
438 *ssp = data->ssp_mode;
439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200441 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
Johan Hedberga3d4e202012-01-09 00:53:02 +0200445 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300446 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
Johan Hedberg561aafb2012-01-04 13:31:59 +0200451 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200452 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200453
Johan Hedberg561aafb2012-01-04 13:31:59 +0200454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200457 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300470 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200494
495 if (copied >= num)
496 break;
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200506 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return -ENODEV;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300552 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Marcel Holtmann04837f62006-07-03 10:02:33 +0200554 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200571 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 err = -ENOMEM;
573 goto done;
574 }
575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300576 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300578 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300585 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900587 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
597/* ---- HCI ioctl helpers ---- */
598
599int hci_dev_open(__u16 dev)
600{
601 struct hci_dev *hdev;
602 int ret = 0;
603
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200604 hdev = hci_dev_get(dev);
605 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606 return -ENODEV;
607
608 BT_DBG("%s %p", hdev->name, hdev);
609
610 hci_req_lock(hdev);
611
Johan Hovold94324962012-03-15 14:48:41 +0100612 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
613 ret = -ENODEV;
614 goto done;
615 }
616
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200617 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
618 ret = -ERFKILL;
619 goto done;
620 }
621
Linus Torvalds1da177e2005-04-16 15:20:36 -0700622 if (test_bit(HCI_UP, &hdev->flags)) {
623 ret = -EALREADY;
624 goto done;
625 }
626
627 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
628 set_bit(HCI_RAW, &hdev->flags);
629
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200630 /* Treat all non BR/EDR controllers as raw devices if
631 enable_hs is not set */
632 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100633 set_bit(HCI_RAW, &hdev->flags);
634
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (hdev->open(hdev)) {
636 ret = -EIO;
637 goto done;
638 }
639
640 if (!test_bit(HCI_RAW, &hdev->flags)) {
641 atomic_set(&hdev->cmd_cnt, 1);
642 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200643 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700644
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300645 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
647 clear_bit(HCI_INIT, &hdev->flags);
648 }
649
650 if (!ret) {
651 hci_dev_hold(hdev);
652 set_bit(HCI_UP, &hdev->flags);
653 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300654 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
655 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300656 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200657 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300658 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200659 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900660 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200662 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200663 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400664 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
666 skb_queue_purge(&hdev->cmd_q);
667 skb_queue_purge(&hdev->rx_q);
668
669 if (hdev->flush)
670 hdev->flush(hdev);
671
672 if (hdev->sent_cmd) {
673 kfree_skb(hdev->sent_cmd);
674 hdev->sent_cmd = NULL;
675 }
676
677 hdev->close(hdev);
678 hdev->flags = 0;
679 }
680
681done:
682 hci_req_unlock(hdev);
683 hci_dev_put(hdev);
684 return ret;
685}
686
687static int hci_dev_do_close(struct hci_dev *hdev)
688{
689 BT_DBG("%s %p", hdev->name, hdev);
690
Andre Guedes28b75a82012-02-03 17:48:00 -0300691 cancel_work_sync(&hdev->le_scan);
692
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300693 cancel_delayed_work(&hdev->power_off);
694
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 hci_req_cancel(hdev, ENODEV);
696 hci_req_lock(hdev);
697
698 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300699 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700 hci_req_unlock(hdev);
701 return 0;
702 }
703
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200704 /* Flush RX and TX works */
705 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400706 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700707
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200708 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200709 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200710 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200711 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200712 }
713
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200714 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200715 cancel_delayed_work(&hdev->service_cache);
716
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300717 cancel_delayed_work_sync(&hdev->le_scan_disable);
718
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300719 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300722 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700723
724 hci_notify(hdev, HCI_DEV_DOWN);
725
726 if (hdev->flush)
727 hdev->flush(hdev);
728
729 /* Reset device */
730 skb_queue_purge(&hdev->cmd_q);
731 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200732 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200733 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300735 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700736 clear_bit(HCI_INIT, &hdev->flags);
737 }
738
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200739 /* flush cmd work */
740 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700741
742 /* Drop queues */
743 skb_queue_purge(&hdev->rx_q);
744 skb_queue_purge(&hdev->cmd_q);
745 skb_queue_purge(&hdev->raw_q);
746
747 /* Drop last sent command */
748 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300749 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 kfree_skb(hdev->sent_cmd);
751 hdev->sent_cmd = NULL;
752 }
753
754 /* After this point our queues are empty
755 * and no tasks are scheduled. */
756 hdev->close(hdev);
757
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300758 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
759 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100760 hci_dev_lock(hdev);
761 mgmt_powered(hdev, 0);
762 hci_dev_unlock(hdev);
763 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200764
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 /* Clear flags */
766 hdev->flags = 0;
767
Johan Hedberge59fda82012-02-22 18:11:53 +0200768 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200769 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200770
Linus Torvalds1da177e2005-04-16 15:20:36 -0700771 hci_req_unlock(hdev);
772
773 hci_dev_put(hdev);
774 return 0;
775}
776
777int hci_dev_close(__u16 dev)
778{
779 struct hci_dev *hdev;
780 int err;
781
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200782 hdev = hci_dev_get(dev);
783 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100785
786 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
787 cancel_delayed_work(&hdev->power_off);
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100790
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 hci_dev_put(hdev);
792 return err;
793}
794
795int hci_dev_reset(__u16 dev)
796{
797 struct hci_dev *hdev;
798 int ret = 0;
799
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200800 hdev = hci_dev_get(dev);
801 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700802 return -ENODEV;
803
804 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700805
806 if (!test_bit(HCI_UP, &hdev->flags))
807 goto done;
808
809 /* Drop queues */
810 skb_queue_purge(&hdev->rx_q);
811 skb_queue_purge(&hdev->cmd_q);
812
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300813 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300816 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 if (hdev->flush)
819 hdev->flush(hdev);
820
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900821 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300822 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700823
824 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300825 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700826
827done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 hci_req_unlock(hdev);
829 hci_dev_put(hdev);
830 return ret;
831}
832
833int hci_dev_reset_stat(__u16 dev)
834{
835 struct hci_dev *hdev;
836 int ret = 0;
837
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838 hdev = hci_dev_get(dev);
839 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700840 return -ENODEV;
841
842 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
843
844 hci_dev_put(hdev);
845
846 return ret;
847}
848
849int hci_dev_cmd(unsigned int cmd, void __user *arg)
850{
851 struct hci_dev *hdev;
852 struct hci_dev_req dr;
853 int err = 0;
854
855 if (copy_from_user(&dr, arg, sizeof(dr)))
856 return -EFAULT;
857
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200858 hdev = hci_dev_get(dr.dev_id);
859 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700860 return -ENODEV;
861
862 switch (cmd) {
863 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200864 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300865 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866 break;
867
868 case HCISETENCRYPT:
869 if (!lmp_encrypt_capable(hdev)) {
870 err = -EOPNOTSUPP;
871 break;
872 }
873
874 if (!test_bit(HCI_AUTH, &hdev->flags)) {
875 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200876 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300877 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 if (err)
879 break;
880 }
881
Marcel Holtmann04837f62006-07-03 10:02:33 +0200882 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300883 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884 break;
885
886 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200887 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300888 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 break;
890
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200891 case HCISETLINKPOL:
892 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300893 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200894 break;
895
896 case HCISETLINKMODE:
897 hdev->link_mode = ((__u16) dr.dev_opt) &
898 (HCI_LM_MASTER | HCI_LM_ACCEPT);
899 break;
900
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 case HCISETPTYPE:
902 hdev->pkt_type = (__u16) dr.dev_opt;
903 break;
904
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200906 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
907 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 break;
909
910 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200911 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
912 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913 break;
914
915 default:
916 err = -EINVAL;
917 break;
918 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200919
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920 hci_dev_put(hdev);
921 return err;
922}
923
924int hci_get_dev_list(void __user *arg)
925{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200926 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700927 struct hci_dev_list_req *dl;
928 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 int n = 0, size, err;
930 __u16 dev_num;
931
932 if (get_user(dev_num, (__u16 __user *) arg))
933 return -EFAULT;
934
935 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
936 return -EINVAL;
937
938 size = sizeof(*dl) + dev_num * sizeof(*dr);
939
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200940 dl = kzalloc(size, GFP_KERNEL);
941 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700942 return -ENOMEM;
943
944 dr = dl->dev_req;
945
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200946 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200947 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200948 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +0200949 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +0200950
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200951 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
952 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200953
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 (dr + n)->dev_id = hdev->id;
955 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +0200956
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 if (++n >= dev_num)
958 break;
959 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -0200960 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700961
962 dl->dev_num = n;
963 size = sizeof(*dl) + n * sizeof(*dr);
964
965 err = copy_to_user(arg, dl, size);
966 kfree(dl);
967
968 return err ? -EFAULT : 0;
969}
970
971int hci_get_dev_info(void __user *arg)
972{
973 struct hci_dev *hdev;
974 struct hci_dev_info di;
975 int err = 0;
976
977 if (copy_from_user(&di, arg, sizeof(di)))
978 return -EFAULT;
979
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200980 hdev = hci_dev_get(di.dev_id);
981 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982 return -ENODEV;
983
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +0200985 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +0200986
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +0200989
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 strcpy(di.name, hdev->name);
991 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +0100992 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700993 di.flags = hdev->flags;
994 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +0300995 if (lmp_bredr_capable(hdev)) {
996 di.acl_mtu = hdev->acl_mtu;
997 di.acl_pkts = hdev->acl_pkts;
998 di.sco_mtu = hdev->sco_mtu;
999 di.sco_pkts = hdev->sco_pkts;
1000 } else {
1001 di.acl_mtu = hdev->le_mtu;
1002 di.acl_pkts = hdev->le_pkts;
1003 di.sco_mtu = 0;
1004 di.sco_pkts = 0;
1005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006 di.link_policy = hdev->link_policy;
1007 di.link_mode = hdev->link_mode;
1008
1009 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1010 memcpy(&di.features, &hdev->features, sizeof(di.features));
1011
1012 if (copy_to_user(arg, &di, sizeof(di)))
1013 err = -EFAULT;
1014
1015 hci_dev_put(hdev);
1016
1017 return err;
1018}
1019
1020/* ---- Interface to HCI drivers ---- */
1021
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001022static int hci_rfkill_set_block(void *data, bool blocked)
1023{
1024 struct hci_dev *hdev = data;
1025
1026 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1027
1028 if (!blocked)
1029 return 0;
1030
1031 hci_dev_do_close(hdev);
1032
1033 return 0;
1034}
1035
1036static const struct rfkill_ops hci_rfkill_ops = {
1037 .set_block = hci_rfkill_set_block,
1038};
1039
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001040static void hci_power_on(struct work_struct *work)
1041{
1042 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1043
1044 BT_DBG("%s", hdev->name);
1045
1046 if (hci_dev_open(hdev->id) < 0)
1047 return;
1048
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001049 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001050 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001051
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001052 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001053 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001054}
1055
1056static void hci_power_off(struct work_struct *work)
1057{
Johan Hedberg32435532011-11-07 22:16:04 +02001058 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001059 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001060
1061 BT_DBG("%s", hdev->name);
1062
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001063 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001064}
1065
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001066static void hci_discov_off(struct work_struct *work)
1067{
1068 struct hci_dev *hdev;
1069 u8 scan = SCAN_PAGE;
1070
1071 hdev = container_of(work, struct hci_dev, discov_off.work);
1072
1073 BT_DBG("%s", hdev->name);
1074
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001075 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001076
1077 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1078
1079 hdev->discov_timeout = 0;
1080
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001081 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001082}
1083
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001084int hci_uuids_clear(struct hci_dev *hdev)
1085{
1086 struct list_head *p, *n;
1087
1088 list_for_each_safe(p, n, &hdev->uuids) {
1089 struct bt_uuid *uuid;
1090
1091 uuid = list_entry(p, struct bt_uuid, list);
1092
1093 list_del(p);
1094 kfree(uuid);
1095 }
1096
1097 return 0;
1098}
1099
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001100int hci_link_keys_clear(struct hci_dev *hdev)
1101{
1102 struct list_head *p, *n;
1103
1104 list_for_each_safe(p, n, &hdev->link_keys) {
1105 struct link_key *key;
1106
1107 key = list_entry(p, struct link_key, list);
1108
1109 list_del(p);
1110 kfree(key);
1111 }
1112
1113 return 0;
1114}
1115
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001116int hci_smp_ltks_clear(struct hci_dev *hdev)
1117{
1118 struct smp_ltk *k, *tmp;
1119
1120 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1121 list_del(&k->list);
1122 kfree(k);
1123 }
1124
1125 return 0;
1126}
1127
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001128struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1129{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001130 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001131
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001132 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001133 if (bacmp(bdaddr, &k->bdaddr) == 0)
1134 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001135
1136 return NULL;
1137}
1138
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301139static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001140 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001141{
1142 /* Legacy key */
1143 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301144 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001145
1146 /* Debug keys are insecure so don't store them persistently */
1147 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301148 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001149
1150 /* Changed combination key and there's no previous one */
1151 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301152 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001153
1154 /* Security mode 3 case */
1155 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301156 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001157
1158 /* Neither local nor remote side had no-bonding as requirement */
1159 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301160 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001161
1162 /* Local side had dedicated bonding as requirement */
1163 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301164 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001165
1166 /* Remote side had dedicated bonding as requirement */
1167 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301168 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001169
1170 /* If none of the above criteria match, then don't store the key
1171 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301172 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001173}
1174
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001175struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001176{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001177 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001178
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001179 list_for_each_entry(k, &hdev->long_term_keys, list) {
1180 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001181 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001182 continue;
1183
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001184 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001185 }
1186
1187 return NULL;
1188}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001189
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001190struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001191 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001192{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001193 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001194
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001195 list_for_each_entry(k, &hdev->long_term_keys, list)
1196 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001197 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001198 return k;
1199
1200 return NULL;
1201}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001202
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001203int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001204 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001205{
1206 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301207 u8 old_key_type;
1208 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001209
1210 old_key = hci_find_link_key(hdev, bdaddr);
1211 if (old_key) {
1212 old_key_type = old_key->type;
1213 key = old_key;
1214 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001215 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001216 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1217 if (!key)
1218 return -ENOMEM;
1219 list_add(&key->list, &hdev->link_keys);
1220 }
1221
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001222 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001223
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001224 /* Some buggy controller combinations generate a changed
1225 * combination key for legacy pairing even when there's no
1226 * previous key */
1227 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001228 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001229 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001230 if (conn)
1231 conn->key_type = type;
1232 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001233
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001234 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001235 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001236 key->pin_len = pin_len;
1237
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001238 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001239 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001240 else
1241 key->type = type;
1242
Johan Hedberg4df378a2011-04-28 11:29:03 -07001243 if (!new_key)
1244 return 0;
1245
1246 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1247
Johan Hedberg744cf192011-11-08 20:40:14 +02001248 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001249
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301250 if (conn)
1251 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001252
1253 return 0;
1254}
1255
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001256int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001257 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001258 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001259{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001260 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001261
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001262 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1263 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001264
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001265 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1266 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001267 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001268 else {
1269 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001270 if (!key)
1271 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001272 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001273 }
1274
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001275 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001276 key->bdaddr_type = addr_type;
1277 memcpy(key->val, tk, sizeof(key->val));
1278 key->authenticated = authenticated;
1279 key->ediv = ediv;
1280 key->enc_size = enc_size;
1281 key->type = type;
1282 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001284 if (!new_key)
1285 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001286
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001287 if (type & HCI_SMP_LTK)
1288 mgmt_new_ltk(hdev, key, 1);
1289
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001290 return 0;
1291}
1292
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001293int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1294{
1295 struct link_key *key;
1296
1297 key = hci_find_link_key(hdev, bdaddr);
1298 if (!key)
1299 return -ENOENT;
1300
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001301 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001302
1303 list_del(&key->list);
1304 kfree(key);
1305
1306 return 0;
1307}
1308
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001309int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1310{
1311 struct smp_ltk *k, *tmp;
1312
1313 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1314 if (bacmp(bdaddr, &k->bdaddr))
1315 continue;
1316
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001317 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001318
1319 list_del(&k->list);
1320 kfree(k);
1321 }
1322
1323 return 0;
1324}
1325
Ville Tervo6bd32322011-02-16 16:32:41 +02001326/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001327static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001328{
1329 struct hci_dev *hdev = (void *) arg;
1330
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001331 if (hdev->sent_cmd) {
1332 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1333 u16 opcode = __le16_to_cpu(sent->opcode);
1334
1335 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1336 } else {
1337 BT_ERR("%s command tx timeout", hdev->name);
1338 }
1339
Ville Tervo6bd32322011-02-16 16:32:41 +02001340 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001341 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001342}
1343
Szymon Janc2763eda2011-03-22 13:12:22 +01001344struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001345 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001346{
1347 struct oob_data *data;
1348
1349 list_for_each_entry(data, &hdev->remote_oob_data, list)
1350 if (bacmp(bdaddr, &data->bdaddr) == 0)
1351 return data;
1352
1353 return NULL;
1354}
1355
1356int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1357{
1358 struct oob_data *data;
1359
1360 data = hci_find_remote_oob_data(hdev, bdaddr);
1361 if (!data)
1362 return -ENOENT;
1363
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001364 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001365
1366 list_del(&data->list);
1367 kfree(data);
1368
1369 return 0;
1370}
1371
1372int hci_remote_oob_data_clear(struct hci_dev *hdev)
1373{
1374 struct oob_data *data, *n;
1375
1376 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1377 list_del(&data->list);
1378 kfree(data);
1379 }
1380
1381 return 0;
1382}
1383
1384int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001385 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001386{
1387 struct oob_data *data;
1388
1389 data = hci_find_remote_oob_data(hdev, bdaddr);
1390
1391 if (!data) {
1392 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1393 if (!data)
1394 return -ENOMEM;
1395
1396 bacpy(&data->bdaddr, bdaddr);
1397 list_add(&data->list, &hdev->remote_oob_data);
1398 }
1399
1400 memcpy(data->hash, hash, sizeof(data->hash));
1401 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1402
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001403 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001404
1405 return 0;
1406}
1407
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001408struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001409{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001410 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001411
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001412 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001413 if (bacmp(bdaddr, &b->bdaddr) == 0)
1414 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001415
1416 return NULL;
1417}
1418
1419int hci_blacklist_clear(struct hci_dev *hdev)
1420{
1421 struct list_head *p, *n;
1422
1423 list_for_each_safe(p, n, &hdev->blacklist) {
1424 struct bdaddr_list *b;
1425
1426 b = list_entry(p, struct bdaddr_list, list);
1427
1428 list_del(p);
1429 kfree(b);
1430 }
1431
1432 return 0;
1433}
1434
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001435int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001436{
1437 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001438
1439 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1440 return -EBADF;
1441
Antti Julku5e762442011-08-25 16:48:02 +03001442 if (hci_blacklist_lookup(hdev, bdaddr))
1443 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001444
1445 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001446 if (!entry)
1447 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001448
1449 bacpy(&entry->bdaddr, bdaddr);
1450
1451 list_add(&entry->list, &hdev->blacklist);
1452
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001453 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001454}
1455
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001456int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001457{
1458 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001459
Szymon Janc1ec918c2011-11-16 09:32:21 +01001460 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001461 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001462
1463 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001464 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001465 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001466
1467 list_del(&entry->list);
1468 kfree(entry);
1469
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001470 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001471}
1472
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001473static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1474{
1475 struct le_scan_params *param = (struct le_scan_params *) opt;
1476 struct hci_cp_le_set_scan_param cp;
1477
1478 memset(&cp, 0, sizeof(cp));
1479 cp.type = param->type;
1480 cp.interval = cpu_to_le16(param->interval);
1481 cp.window = cpu_to_le16(param->window);
1482
1483 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1484}
1485
1486static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1487{
1488 struct hci_cp_le_set_scan_enable cp;
1489
1490 memset(&cp, 0, sizeof(cp));
1491 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001492 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001493
1494 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1495}
1496
1497static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001498 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001499{
1500 long timeo = msecs_to_jiffies(3000);
1501 struct le_scan_params param;
1502 int err;
1503
1504 BT_DBG("%s", hdev->name);
1505
1506 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1507 return -EINPROGRESS;
1508
1509 param.type = type;
1510 param.interval = interval;
1511 param.window = window;
1512
1513 hci_req_lock(hdev);
1514
1515 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001516 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001517 if (!err)
1518 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1519
1520 hci_req_unlock(hdev);
1521
1522 if (err < 0)
1523 return err;
1524
1525 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001526 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001527
1528 return 0;
1529}
1530
Andre Guedes7dbfac12012-03-15 16:52:07 -03001531int hci_cancel_le_scan(struct hci_dev *hdev)
1532{
1533 BT_DBG("%s", hdev->name);
1534
1535 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1536 return -EALREADY;
1537
1538 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1539 struct hci_cp_le_set_scan_enable cp;
1540
1541 /* Send HCI command to disable LE Scan */
1542 memset(&cp, 0, sizeof(cp));
1543 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1544 }
1545
1546 return 0;
1547}
1548
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001549static void le_scan_disable_work(struct work_struct *work)
1550{
1551 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001552 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001553 struct hci_cp_le_set_scan_enable cp;
1554
1555 BT_DBG("%s", hdev->name);
1556
1557 memset(&cp, 0, sizeof(cp));
1558
1559 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1560}
1561
Andre Guedes28b75a82012-02-03 17:48:00 -03001562static void le_scan_work(struct work_struct *work)
1563{
1564 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1565 struct le_scan_params *param = &hdev->le_scan_params;
1566
1567 BT_DBG("%s", hdev->name);
1568
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001569 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1570 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001571}
1572
1573int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001574 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001575{
1576 struct le_scan_params *param = &hdev->le_scan_params;
1577
1578 BT_DBG("%s", hdev->name);
1579
Johan Hedbergf1550472012-10-24 21:12:03 +03001580 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1581 return -ENOTSUPP;
1582
Andre Guedes28b75a82012-02-03 17:48:00 -03001583 if (work_busy(&hdev->le_scan))
1584 return -EINPROGRESS;
1585
1586 param->type = type;
1587 param->interval = interval;
1588 param->window = window;
1589 param->timeout = timeout;
1590
1591 queue_work(system_long_wq, &hdev->le_scan);
1592
1593 return 0;
1594}
1595
David Herrmann9be0dab2012-04-22 14:39:57 +02001596/* Alloc HCI device */
1597struct hci_dev *hci_alloc_dev(void)
1598{
1599 struct hci_dev *hdev;
1600
1601 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1602 if (!hdev)
1603 return NULL;
1604
David Herrmannb1b813d2012-04-22 14:39:58 +02001605 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1606 hdev->esco_type = (ESCO_HV1);
1607 hdev->link_mode = (HCI_LM_ACCEPT);
1608 hdev->io_capability = 0x03; /* No Input No Output */
1609
David Herrmannb1b813d2012-04-22 14:39:58 +02001610 hdev->sniff_max_interval = 800;
1611 hdev->sniff_min_interval = 80;
1612
1613 mutex_init(&hdev->lock);
1614 mutex_init(&hdev->req_lock);
1615
1616 INIT_LIST_HEAD(&hdev->mgmt_pending);
1617 INIT_LIST_HEAD(&hdev->blacklist);
1618 INIT_LIST_HEAD(&hdev->uuids);
1619 INIT_LIST_HEAD(&hdev->link_keys);
1620 INIT_LIST_HEAD(&hdev->long_term_keys);
1621 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001622 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001623
1624 INIT_WORK(&hdev->rx_work, hci_rx_work);
1625 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1626 INIT_WORK(&hdev->tx_work, hci_tx_work);
1627 INIT_WORK(&hdev->power_on, hci_power_on);
1628 INIT_WORK(&hdev->le_scan, le_scan_work);
1629
David Herrmannb1b813d2012-04-22 14:39:58 +02001630 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1631 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1632 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1633
David Herrmann9be0dab2012-04-22 14:39:57 +02001634 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001635 skb_queue_head_init(&hdev->rx_q);
1636 skb_queue_head_init(&hdev->cmd_q);
1637 skb_queue_head_init(&hdev->raw_q);
1638
1639 init_waitqueue_head(&hdev->req_wait_q);
1640
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001641 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001642
David Herrmannb1b813d2012-04-22 14:39:58 +02001643 hci_init_sysfs(hdev);
1644 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001645
1646 return hdev;
1647}
1648EXPORT_SYMBOL(hci_alloc_dev);
1649
1650/* Free HCI device */
1651void hci_free_dev(struct hci_dev *hdev)
1652{
1653 skb_queue_purge(&hdev->driver_init);
1654
1655 /* will free via device release */
1656 put_device(&hdev->dev);
1657}
1658EXPORT_SYMBOL(hci_free_dev);
1659
Linus Torvalds1da177e2005-04-16 15:20:36 -07001660/* Register HCI device */
1661int hci_register_dev(struct hci_dev *hdev)
1662{
David Herrmannb1b813d2012-04-22 14:39:58 +02001663 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664
David Herrmann010666a2012-01-07 15:47:07 +01001665 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666 return -EINVAL;
1667
Mat Martineau08add512011-11-02 16:18:36 -07001668 /* Do not allow HCI_AMP devices to register at index 0,
1669 * so the index can be used as the AMP controller ID.
1670 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001671 switch (hdev->dev_type) {
1672 case HCI_BREDR:
1673 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1674 break;
1675 case HCI_AMP:
1676 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1677 break;
1678 default:
1679 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001680 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001681
Sasha Levin3df92b32012-05-27 22:36:56 +02001682 if (id < 0)
1683 return id;
1684
Linus Torvalds1da177e2005-04-16 15:20:36 -07001685 sprintf(hdev->name, "hci%d", id);
1686 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001687
1688 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1689
Sasha Levin3df92b32012-05-27 22:36:56 +02001690 write_lock(&hci_dev_list_lock);
1691 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001692 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001694 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001695 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001696 if (!hdev->workqueue) {
1697 error = -ENOMEM;
1698 goto err;
1699 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001700
David Herrmann33ca9542011-10-08 14:58:49 +02001701 error = hci_add_sysfs(hdev);
1702 if (error < 0)
1703 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001705 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001706 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1707 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001708 if (hdev->rfkill) {
1709 if (rfkill_register(hdev->rfkill) < 0) {
1710 rfkill_destroy(hdev->rfkill);
1711 hdev->rfkill = NULL;
1712 }
1713 }
1714
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001715 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001716
1717 if (hdev->dev_type != HCI_AMP)
1718 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1719
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001720 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001721
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001723 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
1725 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001726
David Herrmann33ca9542011-10-08 14:58:49 +02001727err_wqueue:
1728 destroy_workqueue(hdev->workqueue);
1729err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001730 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001731 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001732 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001733 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001734
David Herrmann33ca9542011-10-08 14:58:49 +02001735 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736}
1737EXPORT_SYMBOL(hci_register_dev);
1738
1739/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001740void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741{
Sasha Levin3df92b32012-05-27 22:36:56 +02001742 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001743
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001744 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
Johan Hovold94324962012-03-15 14:48:41 +01001746 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1747
Sasha Levin3df92b32012-05-27 22:36:56 +02001748 id = hdev->id;
1749
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001750 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001751 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001752 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753
1754 hci_dev_do_close(hdev);
1755
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301756 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001757 kfree_skb(hdev->reassembly[i]);
1758
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001759 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001760 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001761 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001762 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001763 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001764 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001765
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001766 /* mgmt_index_removed should take care of emptying the
1767 * pending list */
1768 BUG_ON(!list_empty(&hdev->mgmt_pending));
1769
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 hci_notify(hdev, HCI_DEV_UNREG);
1771
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001772 if (hdev->rfkill) {
1773 rfkill_unregister(hdev->rfkill);
1774 rfkill_destroy(hdev->rfkill);
1775 }
1776
David Herrmannce242972011-10-08 14:58:48 +02001777 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001778
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001779 destroy_workqueue(hdev->workqueue);
1780
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001781 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001782 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001783 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001784 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001785 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001786 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001787 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001788
David Herrmanndc946bd2012-01-07 15:47:24 +01001789 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001790
1791 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792}
1793EXPORT_SYMBOL(hci_unregister_dev);
1794
1795/* Suspend HCI device */
1796int hci_suspend_dev(struct hci_dev *hdev)
1797{
1798 hci_notify(hdev, HCI_DEV_SUSPEND);
1799 return 0;
1800}
1801EXPORT_SYMBOL(hci_suspend_dev);
1802
1803/* Resume HCI device */
1804int hci_resume_dev(struct hci_dev *hdev)
1805{
1806 hci_notify(hdev, HCI_DEV_RESUME);
1807 return 0;
1808}
1809EXPORT_SYMBOL(hci_resume_dev);
1810
Marcel Holtmann76bca882009-11-18 00:40:39 +01001811/* Receive frame from HCI drivers */
1812int hci_recv_frame(struct sk_buff *skb)
1813{
1814 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1815 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001816 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001817 kfree_skb(skb);
1818 return -ENXIO;
1819 }
1820
1821 /* Incomming skb */
1822 bt_cb(skb)->incoming = 1;
1823
1824 /* Time stamp */
1825 __net_timestamp(skb);
1826
Marcel Holtmann76bca882009-11-18 00:40:39 +01001827 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001828 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001829
Marcel Holtmann76bca882009-11-18 00:40:39 +01001830 return 0;
1831}
1832EXPORT_SYMBOL(hci_recv_frame);
1833
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301834static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001835 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301836{
1837 int len = 0;
1838 int hlen = 0;
1839 int remain = count;
1840 struct sk_buff *skb;
1841 struct bt_skb_cb *scb;
1842
1843 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001844 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301845 return -EILSEQ;
1846
1847 skb = hdev->reassembly[index];
1848
1849 if (!skb) {
1850 switch (type) {
1851 case HCI_ACLDATA_PKT:
1852 len = HCI_MAX_FRAME_SIZE;
1853 hlen = HCI_ACL_HDR_SIZE;
1854 break;
1855 case HCI_EVENT_PKT:
1856 len = HCI_MAX_EVENT_SIZE;
1857 hlen = HCI_EVENT_HDR_SIZE;
1858 break;
1859 case HCI_SCODATA_PKT:
1860 len = HCI_MAX_SCO_SIZE;
1861 hlen = HCI_SCO_HDR_SIZE;
1862 break;
1863 }
1864
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001865 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301866 if (!skb)
1867 return -ENOMEM;
1868
1869 scb = (void *) skb->cb;
1870 scb->expect = hlen;
1871 scb->pkt_type = type;
1872
1873 skb->dev = (void *) hdev;
1874 hdev->reassembly[index] = skb;
1875 }
1876
1877 while (count) {
1878 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001879 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301880
1881 memcpy(skb_put(skb, len), data, len);
1882
1883 count -= len;
1884 data += len;
1885 scb->expect -= len;
1886 remain = count;
1887
1888 switch (type) {
1889 case HCI_EVENT_PKT:
1890 if (skb->len == HCI_EVENT_HDR_SIZE) {
1891 struct hci_event_hdr *h = hci_event_hdr(skb);
1892 scb->expect = h->plen;
1893
1894 if (skb_tailroom(skb) < scb->expect) {
1895 kfree_skb(skb);
1896 hdev->reassembly[index] = NULL;
1897 return -ENOMEM;
1898 }
1899 }
1900 break;
1901
1902 case HCI_ACLDATA_PKT:
1903 if (skb->len == HCI_ACL_HDR_SIZE) {
1904 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1905 scb->expect = __le16_to_cpu(h->dlen);
1906
1907 if (skb_tailroom(skb) < scb->expect) {
1908 kfree_skb(skb);
1909 hdev->reassembly[index] = NULL;
1910 return -ENOMEM;
1911 }
1912 }
1913 break;
1914
1915 case HCI_SCODATA_PKT:
1916 if (skb->len == HCI_SCO_HDR_SIZE) {
1917 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1918 scb->expect = h->dlen;
1919
1920 if (skb_tailroom(skb) < scb->expect) {
1921 kfree_skb(skb);
1922 hdev->reassembly[index] = NULL;
1923 return -ENOMEM;
1924 }
1925 }
1926 break;
1927 }
1928
1929 if (scb->expect == 0) {
1930 /* Complete frame */
1931
1932 bt_cb(skb)->pkt_type = type;
1933 hci_recv_frame(skb);
1934
1935 hdev->reassembly[index] = NULL;
1936 return remain;
1937 }
1938 }
1939
1940 return remain;
1941}
1942
Marcel Holtmannef222012007-07-11 06:42:04 +02001943int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1944{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301945 int rem = 0;
1946
Marcel Holtmannef222012007-07-11 06:42:04 +02001947 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1948 return -EILSEQ;
1949
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001950 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001951 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301952 if (rem < 0)
1953 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001954
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301955 data += (count - rem);
1956 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001957 }
Marcel Holtmannef222012007-07-11 06:42:04 +02001958
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05301959 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02001960}
1961EXPORT_SYMBOL(hci_recv_fragment);
1962
Suraj Sumangala99811512010-07-14 13:02:19 +05301963#define STREAM_REASSEMBLY 0
1964
1965int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1966{
1967 int type;
1968 int rem = 0;
1969
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03001970 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05301971 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1972
1973 if (!skb) {
1974 struct { char type; } *pkt;
1975
1976 /* Start of the frame */
1977 pkt = data;
1978 type = pkt->type;
1979
1980 data++;
1981 count--;
1982 } else
1983 type = bt_cb(skb)->pkt_type;
1984
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001985 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001986 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05301987 if (rem < 0)
1988 return rem;
1989
1990 data += (count - rem);
1991 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00001992 }
Suraj Sumangala99811512010-07-14 13:02:19 +05301993
1994 return rem;
1995}
1996EXPORT_SYMBOL(hci_recv_stream_fragment);
1997
Linus Torvalds1da177e2005-04-16 15:20:36 -07001998/* ---- Interface to upper protocols ---- */
1999
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000int hci_register_cb(struct hci_cb *cb)
2001{
2002 BT_DBG("%p name %s", cb, cb->name);
2003
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002004 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002005 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002006 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002007
2008 return 0;
2009}
2010EXPORT_SYMBOL(hci_register_cb);
2011
2012int hci_unregister_cb(struct hci_cb *cb)
2013{
2014 BT_DBG("%p name %s", cb, cb->name);
2015
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002016 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002018 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
2020 return 0;
2021}
2022EXPORT_SYMBOL(hci_unregister_cb);
2023
2024static int hci_send_frame(struct sk_buff *skb)
2025{
2026 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2027
2028 if (!hdev) {
2029 kfree_skb(skb);
2030 return -ENODEV;
2031 }
2032
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002033 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002035 /* Time stamp */
2036 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002038 /* Send copy to monitor */
2039 hci_send_to_monitor(hdev, skb);
2040
2041 if (atomic_read(&hdev->promisc)) {
2042 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002043 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002044 }
2045
2046 /* Get rid of skb owner, prior to sending to the driver. */
2047 skb_orphan(skb);
2048
2049 return hdev->send(skb);
2050}
2051
2052/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002053int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002054{
2055 int len = HCI_COMMAND_HDR_SIZE + plen;
2056 struct hci_command_hdr *hdr;
2057 struct sk_buff *skb;
2058
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002059 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002060
2061 skb = bt_skb_alloc(len, GFP_ATOMIC);
2062 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002063 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064 return -ENOMEM;
2065 }
2066
2067 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002068 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002069 hdr->plen = plen;
2070
2071 if (plen)
2072 memcpy(skb_put(skb, plen), param, plen);
2073
2074 BT_DBG("skb len %d", skb->len);
2075
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002076 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002078
Johan Hedberga5040ef2011-01-10 13:28:59 +02002079 if (test_bit(HCI_INIT, &hdev->flags))
2080 hdev->init_last_cmd = opcode;
2081
Linus Torvalds1da177e2005-04-16 15:20:36 -07002082 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002083 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
2085 return 0;
2086}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
2088/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002089void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
2091 struct hci_command_hdr *hdr;
2092
2093 if (!hdev->sent_cmd)
2094 return NULL;
2095
2096 hdr = (void *) hdev->sent_cmd->data;
2097
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002098 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002099 return NULL;
2100
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002101 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2104}
2105
2106/* Send ACL data */
2107static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2108{
2109 struct hci_acl_hdr *hdr;
2110 int len = skb->len;
2111
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002112 skb_push(skb, HCI_ACL_HDR_SIZE);
2113 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002114 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002115 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2116 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002117}
2118
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002119static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002120 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002122 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123 struct hci_dev *hdev = conn->hdev;
2124 struct sk_buff *list;
2125
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002126 skb->len = skb_headlen(skb);
2127 skb->data_len = 0;
2128
2129 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002130
2131 switch (hdev->dev_type) {
2132 case HCI_BREDR:
2133 hci_add_acl_hdr(skb, conn->handle, flags);
2134 break;
2135 case HCI_AMP:
2136 hci_add_acl_hdr(skb, chan->handle, flags);
2137 break;
2138 default:
2139 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2140 return;
2141 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002142
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002143 list = skb_shinfo(skb)->frag_list;
2144 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002145 /* Non fragmented */
2146 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2147
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002148 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 } else {
2150 /* Fragmented */
2151 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2152
2153 skb_shinfo(skb)->frag_list = NULL;
2154
2155 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002156 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002158 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002159
2160 flags &= ~ACL_START;
2161 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002162 do {
2163 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002164
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002166 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002167 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168
2169 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2170
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002171 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002172 } while (list);
2173
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002174 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002175 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002176}
2177
2178void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2179{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002180 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002181
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002182 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002183
2184 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002185
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002186 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002187
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002188 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002189}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002190
2191/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002192void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002193{
2194 struct hci_dev *hdev = conn->hdev;
2195 struct hci_sco_hdr hdr;
2196
2197 BT_DBG("%s len %d", hdev->name, skb->len);
2198
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002199 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 hdr.dlen = skb->len;
2201
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002202 skb_push(skb, HCI_SCO_HDR_SIZE);
2203 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002204 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002205
2206 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002207 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002208
Linus Torvalds1da177e2005-04-16 15:20:36 -07002209 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002210 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002211}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002212
2213/* ---- HCI TX task (outgoing data) ---- */
2214
2215/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002216static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2217 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002218{
2219 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002220 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002221 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002222
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002223 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002225
2226 rcu_read_lock();
2227
2228 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002229 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002231
2232 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2233 continue;
2234
Linus Torvalds1da177e2005-04-16 15:20:36 -07002235 num++;
2236
2237 if (c->sent < min) {
2238 min = c->sent;
2239 conn = c;
2240 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002241
2242 if (hci_conn_num(hdev, type) == num)
2243 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 }
2245
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002246 rcu_read_unlock();
2247
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002249 int cnt, q;
2250
2251 switch (conn->type) {
2252 case ACL_LINK:
2253 cnt = hdev->acl_cnt;
2254 break;
2255 case SCO_LINK:
2256 case ESCO_LINK:
2257 cnt = hdev->sco_cnt;
2258 break;
2259 case LE_LINK:
2260 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2261 break;
2262 default:
2263 cnt = 0;
2264 BT_ERR("Unknown link type");
2265 }
2266
2267 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 *quote = q ? q : 1;
2269 } else
2270 *quote = 0;
2271
2272 BT_DBG("conn %p quote %d", conn, *quote);
2273 return conn;
2274}
2275
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002276static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002277{
2278 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002279 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002280
Ville Tervobae1f5d92011-02-10 22:38:53 -03002281 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002282
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002283 rcu_read_lock();
2284
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002286 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002287 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002288 BT_ERR("%s killing stalled connection %pMR",
2289 hdev->name, &c->dst);
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002290 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002291 }
2292 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002293
2294 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002295}
2296
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002297static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2298 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002299{
2300 struct hci_conn_hash *h = &hdev->conn_hash;
2301 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002302 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002303 struct hci_conn *conn;
2304 int cnt, q, conn_num = 0;
2305
2306 BT_DBG("%s", hdev->name);
2307
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002308 rcu_read_lock();
2309
2310 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002311 struct hci_chan *tmp;
2312
2313 if (conn->type != type)
2314 continue;
2315
2316 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2317 continue;
2318
2319 conn_num++;
2320
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002321 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002322 struct sk_buff *skb;
2323
2324 if (skb_queue_empty(&tmp->data_q))
2325 continue;
2326
2327 skb = skb_peek(&tmp->data_q);
2328 if (skb->priority < cur_prio)
2329 continue;
2330
2331 if (skb->priority > cur_prio) {
2332 num = 0;
2333 min = ~0;
2334 cur_prio = skb->priority;
2335 }
2336
2337 num++;
2338
2339 if (conn->sent < min) {
2340 min = conn->sent;
2341 chan = tmp;
2342 }
2343 }
2344
2345 if (hci_conn_num(hdev, type) == conn_num)
2346 break;
2347 }
2348
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002349 rcu_read_unlock();
2350
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002351 if (!chan)
2352 return NULL;
2353
2354 switch (chan->conn->type) {
2355 case ACL_LINK:
2356 cnt = hdev->acl_cnt;
2357 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002358 case AMP_LINK:
2359 cnt = hdev->block_cnt;
2360 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002361 case SCO_LINK:
2362 case ESCO_LINK:
2363 cnt = hdev->sco_cnt;
2364 break;
2365 case LE_LINK:
2366 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2367 break;
2368 default:
2369 cnt = 0;
2370 BT_ERR("Unknown link type");
2371 }
2372
2373 q = cnt / num;
2374 *quote = q ? q : 1;
2375 BT_DBG("chan %p quote %d", chan, *quote);
2376 return chan;
2377}
2378
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002379static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2380{
2381 struct hci_conn_hash *h = &hdev->conn_hash;
2382 struct hci_conn *conn;
2383 int num = 0;
2384
2385 BT_DBG("%s", hdev->name);
2386
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002387 rcu_read_lock();
2388
2389 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002390 struct hci_chan *chan;
2391
2392 if (conn->type != type)
2393 continue;
2394
2395 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2396 continue;
2397
2398 num++;
2399
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002400 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002401 struct sk_buff *skb;
2402
2403 if (chan->sent) {
2404 chan->sent = 0;
2405 continue;
2406 }
2407
2408 if (skb_queue_empty(&chan->data_q))
2409 continue;
2410
2411 skb = skb_peek(&chan->data_q);
2412 if (skb->priority >= HCI_PRIO_MAX - 1)
2413 continue;
2414
2415 skb->priority = HCI_PRIO_MAX - 1;
2416
2417 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002418 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002419 }
2420
2421 if (hci_conn_num(hdev, type) == num)
2422 break;
2423 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002424
2425 rcu_read_unlock();
2426
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002427}
2428
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002429static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2430{
2431 /* Calculate count of blocks used by this packet */
2432 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2433}
2434
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002435static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002436{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002437 if (!test_bit(HCI_RAW, &hdev->flags)) {
2438 /* ACL tx timeout must be longer than maximum
2439 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002440 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002441 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002442 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002443 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002444}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002445
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002446static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002447{
2448 unsigned int cnt = hdev->acl_cnt;
2449 struct hci_chan *chan;
2450 struct sk_buff *skb;
2451 int quote;
2452
2453 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002454
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002455 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002456 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002457 u32 priority = (skb_peek(&chan->data_q))->priority;
2458 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002459 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002460 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002461
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002462 /* Stop if priority has changed */
2463 if (skb->priority < priority)
2464 break;
2465
2466 skb = skb_dequeue(&chan->data_q);
2467
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002468 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002469 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002470
Linus Torvalds1da177e2005-04-16 15:20:36 -07002471 hci_send_frame(skb);
2472 hdev->acl_last_tx = jiffies;
2473
2474 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002475 chan->sent++;
2476 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002477 }
2478 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002479
2480 if (cnt != hdev->acl_cnt)
2481 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002482}
2483
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002484static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002485{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002486 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002487 struct hci_chan *chan;
2488 struct sk_buff *skb;
2489 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002490 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002491
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002492 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002493
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002494 BT_DBG("%s", hdev->name);
2495
2496 if (hdev->dev_type == HCI_AMP)
2497 type = AMP_LINK;
2498 else
2499 type = ACL_LINK;
2500
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002501 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002502 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002503 u32 priority = (skb_peek(&chan->data_q))->priority;
2504 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2505 int blocks;
2506
2507 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002508 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002509
2510 /* Stop if priority has changed */
2511 if (skb->priority < priority)
2512 break;
2513
2514 skb = skb_dequeue(&chan->data_q);
2515
2516 blocks = __get_blocks(hdev, skb);
2517 if (blocks > hdev->block_cnt)
2518 return;
2519
2520 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002521 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002522
2523 hci_send_frame(skb);
2524 hdev->acl_last_tx = jiffies;
2525
2526 hdev->block_cnt -= blocks;
2527 quote -= blocks;
2528
2529 chan->sent += blocks;
2530 chan->conn->sent += blocks;
2531 }
2532 }
2533
2534 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002535 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002536}
2537
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002538static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002539{
2540 BT_DBG("%s", hdev->name);
2541
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002542 /* No ACL link over BR/EDR controller */
2543 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2544 return;
2545
2546 /* No AMP link over AMP controller */
2547 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002548 return;
2549
2550 switch (hdev->flow_ctl_mode) {
2551 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2552 hci_sched_acl_pkt(hdev);
2553 break;
2554
2555 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2556 hci_sched_acl_blk(hdev);
2557 break;
2558 }
2559}
2560
Linus Torvalds1da177e2005-04-16 15:20:36 -07002561/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002562static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002563{
2564 struct hci_conn *conn;
2565 struct sk_buff *skb;
2566 int quote;
2567
2568 BT_DBG("%s", hdev->name);
2569
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002570 if (!hci_conn_num(hdev, SCO_LINK))
2571 return;
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2574 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2575 BT_DBG("skb %p len %d", skb, skb->len);
2576 hci_send_frame(skb);
2577
2578 conn->sent++;
2579 if (conn->sent == ~0)
2580 conn->sent = 0;
2581 }
2582 }
2583}
2584
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002585static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002586{
2587 struct hci_conn *conn;
2588 struct sk_buff *skb;
2589 int quote;
2590
2591 BT_DBG("%s", hdev->name);
2592
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002593 if (!hci_conn_num(hdev, ESCO_LINK))
2594 return;
2595
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002596 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2597 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002598 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2599 BT_DBG("skb %p len %d", skb, skb->len);
2600 hci_send_frame(skb);
2601
2602 conn->sent++;
2603 if (conn->sent == ~0)
2604 conn->sent = 0;
2605 }
2606 }
2607}
2608
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002609static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002610{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002611 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002612 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002613 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002614
2615 BT_DBG("%s", hdev->name);
2616
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002617 if (!hci_conn_num(hdev, LE_LINK))
2618 return;
2619
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002620 if (!test_bit(HCI_RAW, &hdev->flags)) {
2621 /* LE tx timeout must be longer than maximum
2622 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002623 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002624 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002625 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002626 }
2627
2628 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002629 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002630 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002631 u32 priority = (skb_peek(&chan->data_q))->priority;
2632 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002633 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002634 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002635
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002636 /* Stop if priority has changed */
2637 if (skb->priority < priority)
2638 break;
2639
2640 skb = skb_dequeue(&chan->data_q);
2641
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002642 hci_send_frame(skb);
2643 hdev->le_last_tx = jiffies;
2644
2645 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002646 chan->sent++;
2647 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002648 }
2649 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002650
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002651 if (hdev->le_pkts)
2652 hdev->le_cnt = cnt;
2653 else
2654 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002655
2656 if (cnt != tmp)
2657 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002658}
2659
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002660static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002661{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002662 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002663 struct sk_buff *skb;
2664
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002665 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002666 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667
2668 /* Schedule queues and send stuff to HCI driver */
2669
2670 hci_sched_acl(hdev);
2671
2672 hci_sched_sco(hdev);
2673
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002674 hci_sched_esco(hdev);
2675
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002676 hci_sched_le(hdev);
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678 /* Send next queued raw (unknown type) packet */
2679 while ((skb = skb_dequeue(&hdev->raw_q)))
2680 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002681}
2682
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002683/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
2685/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002686static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687{
2688 struct hci_acl_hdr *hdr = (void *) skb->data;
2689 struct hci_conn *conn;
2690 __u16 handle, flags;
2691
2692 skb_pull(skb, HCI_ACL_HDR_SIZE);
2693
2694 handle = __le16_to_cpu(hdr->handle);
2695 flags = hci_flags(handle);
2696 handle = hci_handle(handle);
2697
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002698 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002699 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700
2701 hdev->stat.acl_rx++;
2702
2703 hci_dev_lock(hdev);
2704 conn = hci_conn_hash_lookup_handle(hdev, handle);
2705 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002706
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002708 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002709
Johan Hedberg671267b2012-05-12 16:11:50 -03002710 hci_dev_lock(hdev);
2711 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2712 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2713 mgmt_device_connected(hdev, &conn->dst, conn->type,
2714 conn->dst_type, 0, NULL, 0,
2715 conn->dev_class);
2716 hci_dev_unlock(hdev);
2717
Linus Torvalds1da177e2005-04-16 15:20:36 -07002718 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002719 l2cap_recv_acldata(conn, skb, flags);
2720 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002721 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002722 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002723 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724 }
2725
2726 kfree_skb(skb);
2727}
2728
2729/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002730static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731{
2732 struct hci_sco_hdr *hdr = (void *) skb->data;
2733 struct hci_conn *conn;
2734 __u16 handle;
2735
2736 skb_pull(skb, HCI_SCO_HDR_SIZE);
2737
2738 handle = __le16_to_cpu(hdr->handle);
2739
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002740 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002741
2742 hdev->stat.sco_rx++;
2743
2744 hci_dev_lock(hdev);
2745 conn = hci_conn_hash_lookup_handle(hdev, handle);
2746 hci_dev_unlock(hdev);
2747
2748 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002750 sco_recv_scodata(conn, skb);
2751 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002753 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002754 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755 }
2756
2757 kfree_skb(skb);
2758}
2759
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002760static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002761{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002762 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763 struct sk_buff *skb;
2764
2765 BT_DBG("%s", hdev->name);
2766
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002768 /* Send copy to monitor */
2769 hci_send_to_monitor(hdev, skb);
2770
Linus Torvalds1da177e2005-04-16 15:20:36 -07002771 if (atomic_read(&hdev->promisc)) {
2772 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002773 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 }
2775
2776 if (test_bit(HCI_RAW, &hdev->flags)) {
2777 kfree_skb(skb);
2778 continue;
2779 }
2780
2781 if (test_bit(HCI_INIT, &hdev->flags)) {
2782 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002783 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002784 case HCI_ACLDATA_PKT:
2785 case HCI_SCODATA_PKT:
2786 kfree_skb(skb);
2787 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002788 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002789 }
2790
2791 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002792 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002794 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002795 hci_event_packet(hdev, skb);
2796 break;
2797
2798 case HCI_ACLDATA_PKT:
2799 BT_DBG("%s ACL data packet", hdev->name);
2800 hci_acldata_packet(hdev, skb);
2801 break;
2802
2803 case HCI_SCODATA_PKT:
2804 BT_DBG("%s SCO data packet", hdev->name);
2805 hci_scodata_packet(hdev, skb);
2806 break;
2807
2808 default:
2809 kfree_skb(skb);
2810 break;
2811 }
2812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813}
2814
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002815static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002816{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002817 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002818 struct sk_buff *skb;
2819
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002820 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2821 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002824 if (atomic_read(&hdev->cmd_cnt)) {
2825 skb = skb_dequeue(&hdev->cmd_q);
2826 if (!skb)
2827 return;
2828
Wei Yongjun7585b972009-02-25 18:29:52 +08002829 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002831 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2832 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 atomic_dec(&hdev->cmd_cnt);
2834 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002835 if (test_bit(HCI_RESET, &hdev->flags))
2836 del_timer(&hdev->cmd_timer);
2837 else
2838 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002839 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 } else {
2841 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002842 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 }
2844 }
2845}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002846
2847int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2848{
2849 /* General inquiry access code (GIAC) */
2850 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2851 struct hci_cp_inquiry cp;
2852
2853 BT_DBG("%s", hdev->name);
2854
2855 if (test_bit(HCI_INQUIRY, &hdev->flags))
2856 return -EINPROGRESS;
2857
Johan Hedberg46632622012-01-02 16:06:08 +02002858 inquiry_cache_flush(hdev);
2859
Andre Guedes2519a1f2011-11-07 11:45:24 -03002860 memset(&cp, 0, sizeof(cp));
2861 memcpy(&cp.lap, lap, sizeof(cp.lap));
2862 cp.length = length;
2863
2864 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2865}
Andre Guedes023d50492011-11-04 14:16:52 -03002866
2867int hci_cancel_inquiry(struct hci_dev *hdev)
2868{
2869 BT_DBG("%s", hdev->name);
2870
2871 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002872 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002873
2874 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2875}
Andre Guedes31f79562012-04-24 21:02:53 -03002876
2877u8 bdaddr_to_le(u8 bdaddr_type)
2878{
2879 switch (bdaddr_type) {
2880 case BDADDR_LE_PUBLIC:
2881 return ADDR_LE_DEV_PUBLIC;
2882
2883 default:
2884 /* Fallback to LE Random address type */
2885 return ADDR_LE_DEV_RANDOM;
2886 }
2887}