blob: e3a49db9cfcb4a17cb68287838a0de531abb7508 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg23bb5762010-12-21 23:01:27 +020060void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +030062 BT_DBG("%s command 0x%4.4x result 0x%2.2x", hdev->name, cmd, result);
Johan Hedberg23bb5762010-12-21 23:01:27 +020063
Johan Hedberga5040ef2011-01-10 13:28:59 +020064 /* If this is the init phase check if the completed command matches
65 * the last init command, and if not just return.
66 */
Johan Hedberg75fb0e32012-03-01 21:35:55 +020067 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
68 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020069 u16 opcode = __le16_to_cpu(sent->opcode);
Johan Hedberg75fb0e32012-03-01 21:35:55 +020070 struct sk_buff *skb;
71
72 /* Some CSR based controllers generate a spontaneous
73 * reset complete event during init and any pending
74 * command will never be completed. In such a case we
75 * need to resend whatever was the last sent
76 * command.
77 */
78
Andrei Emeltchenko1036b892012-03-12 15:59:33 +020079 if (cmd != HCI_OP_RESET || opcode == HCI_OP_RESET)
Johan Hedberg75fb0e32012-03-01 21:35:55 +020080 return;
81
82 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
83 if (skb) {
84 skb_queue_head(&hdev->cmd_q, skb);
85 queue_work(hdev->workqueue, &hdev->cmd_work);
86 }
87
Johan Hedberg23bb5762010-12-21 23:01:27 +020088 return;
Johan Hedberg75fb0e32012-03-01 21:35:55 +020089 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91 if (hdev->req_status == HCI_REQ_PEND) {
92 hdev->req_result = result;
93 hdev->req_status = HCI_REQ_DONE;
94 wake_up_interruptible(&hdev->req_wait_q);
95 }
96}
97
98static void hci_req_cancel(struct hci_dev *hdev, int err)
99{
100 BT_DBG("%s err 0x%2.2x", hdev->name, err);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = err;
104 hdev->req_status = HCI_REQ_CANCELED;
105 wake_up_interruptible(&hdev->req_wait_q);
106 }
107}
108
109/* Execute request and wait for completion. */
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300110static int __hci_request(struct hci_dev *hdev,
111 void (*req)(struct hci_dev *hdev, unsigned long opt),
112 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 DECLARE_WAITQUEUE(wait, current);
115 int err = 0;
116
117 BT_DBG("%s start", hdev->name);
118
119 hdev->req_status = HCI_REQ_PEND;
120
121 add_wait_queue(&hdev->req_wait_q, &wait);
122 set_current_state(TASK_INTERRUPTIBLE);
123
124 req(hdev, opt);
125 schedule_timeout(timeout);
126
127 remove_wait_queue(&hdev->req_wait_q, &wait);
128
129 if (signal_pending(current))
130 return -EINTR;
131
132 switch (hdev->req_status) {
133 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700134 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 break;
136
137 case HCI_REQ_CANCELED:
138 err = -hdev->req_result;
139 break;
140
141 default:
142 err = -ETIMEDOUT;
143 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700144 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
Johan Hedberga5040ef2011-01-10 13:28:59 +0200146 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 BT_DBG("%s end: err %d", hdev->name, err);
149
150 return err;
151}
152
Gustavo Padovan6039aa732012-05-23 04:04:18 -0300153static int hci_request(struct hci_dev *hdev,
154 void (*req)(struct hci_dev *hdev, unsigned long opt),
155 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156{
157 int ret;
158
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200159 if (!test_bit(HCI_UP, &hdev->flags))
160 return -ENETDOWN;
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 /* Serialize all requests */
163 hci_req_lock(hdev);
164 ret = __hci_request(hdev, req, opt, timeout);
165 hci_req_unlock(hdev);
166
167 return ret;
168}
169
170static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
171{
172 BT_DBG("%s %ld", hdev->name, opt);
173
174 /* Reset device */
Gustavo F. Padovanf630cf02011-03-16 15:36:29 -0300175 set_bit(HCI_RESET, &hdev->flags);
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200176 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177}
178
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200179static void bredr_init(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200181 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 /* Read Local Supported Features */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200184 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200186 /* Read Local Version */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200187 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188}
189
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200190static void amp_init(struct hci_dev *hdev)
191{
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200192 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
193
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200194 /* Read Local Version */
195 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300196
197 /* Read Local AMP Info */
198 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300199
200 /* Read Data Blk size */
201 hci_send_cmd(hdev, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202}
203
204static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
205{
206 struct sk_buff *skb;
207
208 BT_DBG("%s %ld", hdev->name, opt);
209
210 /* Driver initialization */
211
212 /* Special commands */
213 while ((skb = skb_dequeue(&hdev->driver_init))) {
214 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
215 skb->dev = (void *) hdev;
216
217 skb_queue_tail(&hdev->cmd_q, skb);
218 queue_work(hdev->workqueue, &hdev->cmd_work);
219 }
220 skb_queue_purge(&hdev->driver_init);
221
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300222 /* Reset */
223 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
224 hci_reset_req(hdev, 0);
225
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200226 switch (hdev->dev_type) {
227 case HCI_BREDR:
228 bredr_init(hdev);
229 break;
230
231 case HCI_AMP:
232 amp_init(hdev);
233 break;
234
235 default:
236 BT_ERR("Unknown device type %d", hdev->dev_type);
237 break;
238 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200239}
240
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
242{
243 __u8 scan = opt;
244
245 BT_DBG("%s %x", hdev->name, scan);
246
247 /* Inquiry and Page scans */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200248 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249}
250
251static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
252{
253 __u8 auth = opt;
254
255 BT_DBG("%s %x", hdev->name, auth);
256
257 /* Authentication */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200258 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
260
261static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
262{
263 __u8 encrypt = opt;
264
265 BT_DBG("%s %x", hdev->name, encrypt);
266
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200267 /* Encryption */
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200268 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269}
270
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200271static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
272{
273 __le16 policy = cpu_to_le16(opt);
274
Marcel Holtmanna418b892008-11-30 12:17:28 +0100275 BT_DBG("%s %x", hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200276
277 /* Default link policy */
278 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
279}
280
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900281/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 * Device is held on return. */
283struct hci_dev *hci_dev_get(int index)
284{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200285 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286
287 BT_DBG("%d", index);
288
289 if (index < 0)
290 return NULL;
291
292 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200293 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700294 if (d->id == index) {
295 hdev = hci_dev_hold(d);
296 break;
297 }
298 }
299 read_unlock(&hci_dev_list_lock);
300 return hdev;
301}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302
303/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200304
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200305bool hci_discovery_active(struct hci_dev *hdev)
306{
307 struct discovery_state *discov = &hdev->discovery;
308
Andre Guedes6fbe1952012-02-03 17:47:58 -0300309 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300310 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300311 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200312 return true;
313
Andre Guedes6fbe1952012-02-03 17:47:58 -0300314 default:
315 return false;
316 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200317}
318
Johan Hedbergff9ef572012-01-04 14:23:45 +0200319void hci_discovery_set_state(struct hci_dev *hdev, int state)
320{
321 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
322
323 if (hdev->discovery.state == state)
324 return;
325
326 switch (state) {
327 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300328 if (hdev->discovery.state != DISCOVERY_STARTING)
329 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200330 break;
331 case DISCOVERY_STARTING:
332 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300333 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200334 mgmt_discovering(hdev, 1);
335 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200336 case DISCOVERY_RESOLVING:
337 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200338 case DISCOVERY_STOPPING:
339 break;
340 }
341
342 hdev->discovery.state = state;
343}
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345static void inquiry_cache_flush(struct hci_dev *hdev)
346{
Johan Hedberg30883512012-01-04 14:16:21 +0200347 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200348 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349
Johan Hedberg561aafb2012-01-04 13:31:59 +0200350 list_for_each_entry_safe(p, n, &cache->all, all) {
351 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200352 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200354
355 INIT_LIST_HEAD(&cache->unknown);
356 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300359struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
360 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Johan Hedberg30883512012-01-04 14:16:21 +0200362 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 struct inquiry_entry *e;
364
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300365 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366
Johan Hedberg561aafb2012-01-04 13:31:59 +0200367 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200369 return e;
370 }
371
372 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373}
374
Johan Hedberg561aafb2012-01-04 13:31:59 +0200375struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300376 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200377{
Johan Hedberg30883512012-01-04 14:16:21 +0200378 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200379 struct inquiry_entry *e;
380
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300381 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200382
383 list_for_each_entry(e, &cache->unknown, list) {
384 if (!bacmp(&e->data.bdaddr, bdaddr))
385 return e;
386 }
387
388 return NULL;
389}
390
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200391struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300392 bdaddr_t *bdaddr,
393 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200394{
395 struct discovery_state *cache = &hdev->discovery;
396 struct inquiry_entry *e;
397
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300398 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200399
400 list_for_each_entry(e, &cache->resolve, list) {
401 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
402 return e;
403 if (!bacmp(&e->data.bdaddr, bdaddr))
404 return e;
405 }
406
407 return NULL;
408}
409
Johan Hedberga3d4e202012-01-09 00:53:02 +0200410void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300411 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200412{
413 struct discovery_state *cache = &hdev->discovery;
414 struct list_head *pos = &cache->resolve;
415 struct inquiry_entry *p;
416
417 list_del(&ie->list);
418
419 list_for_each_entry(p, &cache->resolve, list) {
420 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300421 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200422 break;
423 pos = &p->list;
424 }
425
426 list_add(&ie->list, pos);
427}
428
Johan Hedberg31754052012-01-04 13:39:52 +0200429bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300430 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
Johan Hedberg30883512012-01-04 14:16:21 +0200432 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200433 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700434
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300435 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200437 if (ssp)
438 *ssp = data->ssp_mode;
439
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200440 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200441 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200442 if (ie->data.ssp_mode && ssp)
443 *ssp = true;
444
Johan Hedberga3d4e202012-01-09 00:53:02 +0200445 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300446 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200447 ie->data.rssi = data->rssi;
448 hci_inquiry_cache_update_resolve(hdev, ie);
449 }
450
Johan Hedberg561aafb2012-01-04 13:31:59 +0200451 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200452 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200453
Johan Hedberg561aafb2012-01-04 13:31:59 +0200454 /* Entry not in the cache. Add new one. */
455 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
456 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200457 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200458
459 list_add(&ie->all, &cache->all);
460
461 if (name_known) {
462 ie->name_state = NAME_KNOWN;
463 } else {
464 ie->name_state = NAME_NOT_KNOWN;
465 list_add(&ie->list, &cache->unknown);
466 }
467
468update:
469 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300470 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200471 ie->name_state = NAME_KNOWN;
472 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700473 }
474
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200475 memcpy(&ie->data, data, sizeof(*data));
476 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200478
479 if (ie->name_state == NAME_NOT_KNOWN)
480 return false;
481
482 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
486{
Johan Hedberg30883512012-01-04 14:16:21 +0200487 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 struct inquiry_info *info = (struct inquiry_info *) buf;
489 struct inquiry_entry *e;
490 int copied = 0;
491
Johan Hedberg561aafb2012-01-04 13:31:59 +0200492 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200494
495 if (copied >= num)
496 break;
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 bacpy(&info->bdaddr, &data->bdaddr);
499 info->pscan_rep_mode = data->pscan_rep_mode;
500 info->pscan_period_mode = data->pscan_period_mode;
501 info->pscan_mode = data->pscan_mode;
502 memcpy(info->dev_class, data->dev_class, 3);
503 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200504
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200506 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 }
508
509 BT_DBG("cache %p, copied %d", cache, copied);
510 return copied;
511}
512
513static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
514{
515 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
516 struct hci_cp_inquiry cp;
517
518 BT_DBG("%s", hdev->name);
519
520 if (test_bit(HCI_INQUIRY, &hdev->flags))
521 return;
522
523 /* Start Inquiry */
524 memcpy(&cp.lap, &ir->lap, 3);
525 cp.length = ir->length;
526 cp.num_rsp = ir->num_rsp;
Marcel Holtmanna9de9242007-10-20 13:33:56 +0200527 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528}
529
530int hci_inquiry(void __user *arg)
531{
532 __u8 __user *ptr = arg;
533 struct hci_inquiry_req ir;
534 struct hci_dev *hdev;
535 int err = 0, do_inquiry = 0, max_rsp;
536 long timeo;
537 __u8 *buf;
538
539 if (copy_from_user(&ir, ptr, sizeof(ir)))
540 return -EFAULT;
541
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200542 hdev = hci_dev_get(ir.dev_id);
543 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 return -ENODEV;
545
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300546 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900547 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300548 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 inquiry_cache_flush(hdev);
550 do_inquiry = 1;
551 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300552 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Marcel Holtmann04837f62006-07-03 10:02:33 +0200554 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200555
556 if (do_inquiry) {
557 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
558 if (err < 0)
559 goto done;
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300562 /* for unlimited number of responses we will use buffer with
563 * 255 entries
564 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
566
567 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
568 * copy it to the user space.
569 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100570 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200571 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700572 err = -ENOMEM;
573 goto done;
574 }
575
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300576 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300578 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700579
580 BT_DBG("num_rsp %d", ir.num_rsp);
581
582 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
583 ptr += sizeof(ir);
584 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300585 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900587 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588 err = -EFAULT;
589
590 kfree(buf);
591
592done:
593 hci_dev_put(hdev);
594 return err;
595}
596
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100597static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
598{
599 u8 ad_len = 0, flags = 0;
600 size_t name_len;
601
602 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
603 flags |= LE_AD_GENERAL;
604
605 if (!lmp_bredr_capable(hdev))
606 flags |= LE_AD_NO_BREDR;
607
608 if (lmp_le_br_capable(hdev))
609 flags |= LE_AD_SIM_LE_BREDR_CTRL;
610
611 if (lmp_host_le_br_capable(hdev))
612 flags |= LE_AD_SIM_LE_BREDR_HOST;
613
614 if (flags) {
615 BT_DBG("adv flags 0x%02x", flags);
616
617 ptr[0] = 2;
618 ptr[1] = EIR_FLAGS;
619 ptr[2] = flags;
620
621 ad_len += 3;
622 ptr += 3;
623 }
624
625 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
626 ptr[0] = 2;
627 ptr[1] = EIR_TX_POWER;
628 ptr[2] = (u8) hdev->adv_tx_power;
629
630 ad_len += 3;
631 ptr += 3;
632 }
633
634 name_len = strlen(hdev->dev_name);
635 if (name_len > 0) {
636 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
637
638 if (name_len > max_len) {
639 name_len = max_len;
640 ptr[1] = EIR_NAME_SHORT;
641 } else
642 ptr[1] = EIR_NAME_COMPLETE;
643
644 ptr[0] = name_len + 1;
645
646 memcpy(ptr + 2, hdev->dev_name, name_len);
647
648 ad_len += (name_len + 2);
649 ptr += (name_len + 2);
650 }
651
652 return ad_len;
653}
654
655int hci_update_ad(struct hci_dev *hdev)
656{
657 struct hci_cp_le_set_adv_data cp;
658 u8 len;
659 int err;
660
661 hci_dev_lock(hdev);
662
663 if (!lmp_le_capable(hdev)) {
664 err = -EINVAL;
665 goto unlock;
666 }
667
668 memset(&cp, 0, sizeof(cp));
669
670 len = create_ad(hdev, cp.data);
671
672 if (hdev->adv_data_len == len &&
673 memcmp(cp.data, hdev->adv_data, len) == 0) {
674 err = 0;
675 goto unlock;
676 }
677
678 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
679 hdev->adv_data_len = len;
680
681 cp.length = len;
682 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
683
684unlock:
685 hci_dev_unlock(hdev);
686
687 return err;
688}
689
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690/* ---- HCI ioctl helpers ---- */
691
692int hci_dev_open(__u16 dev)
693{
694 struct hci_dev *hdev;
695 int ret = 0;
696
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200697 hdev = hci_dev_get(dev);
698 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 return -ENODEV;
700
701 BT_DBG("%s %p", hdev->name, hdev);
702
703 hci_req_lock(hdev);
704
Johan Hovold94324962012-03-15 14:48:41 +0100705 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
706 ret = -ENODEV;
707 goto done;
708 }
709
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200710 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
711 ret = -ERFKILL;
712 goto done;
713 }
714
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715 if (test_bit(HCI_UP, &hdev->flags)) {
716 ret = -EALREADY;
717 goto done;
718 }
719
720 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
721 set_bit(HCI_RAW, &hdev->flags);
722
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +0200723 /* Treat all non BR/EDR controllers as raw devices if
724 enable_hs is not set */
725 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +0100726 set_bit(HCI_RAW, &hdev->flags);
727
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 if (hdev->open(hdev)) {
729 ret = -EIO;
730 goto done;
731 }
732
733 if (!test_bit(HCI_RAW, &hdev->flags)) {
734 atomic_set(&hdev->cmd_cnt, 1);
735 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberga5040ef2011-01-10 13:28:59 +0200736 hdev->init_last_cmd = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300738 ret = __hci_request(hdev, hci_init_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
740 clear_bit(HCI_INIT, &hdev->flags);
741 }
742
743 if (!ret) {
744 hci_dev_hold(hdev);
745 set_bit(HCI_UP, &hdev->flags);
746 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100747 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300748 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
749 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300750 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +0200751 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300752 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +0200753 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900754 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200756 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200757 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400758 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759
760 skb_queue_purge(&hdev->cmd_q);
761 skb_queue_purge(&hdev->rx_q);
762
763 if (hdev->flush)
764 hdev->flush(hdev);
765
766 if (hdev->sent_cmd) {
767 kfree_skb(hdev->sent_cmd);
768 hdev->sent_cmd = NULL;
769 }
770
771 hdev->close(hdev);
772 hdev->flags = 0;
773 }
774
775done:
776 hci_req_unlock(hdev);
777 hci_dev_put(hdev);
778 return ret;
779}
780
781static int hci_dev_do_close(struct hci_dev *hdev)
782{
783 BT_DBG("%s %p", hdev->name, hdev);
784
Andre Guedes28b75a82012-02-03 17:48:00 -0300785 cancel_work_sync(&hdev->le_scan);
786
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -0300787 cancel_delayed_work(&hdev->power_off);
788
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 hci_req_cancel(hdev, ENODEV);
790 hci_req_lock(hdev);
791
792 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300793 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794 hci_req_unlock(hdev);
795 return 0;
796 }
797
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -0200798 /* Flush RX and TX works */
799 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -0400800 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700801
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200802 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +0200803 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200804 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +0200805 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +0200806 }
807
Johan Hedberga8b2d5c2012-01-08 23:11:15 +0200808 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +0200809 cancel_delayed_work(&hdev->service_cache);
810
Andre Guedes7ba8b4b2012-02-03 17:47:59 -0300811 cancel_delayed_work_sync(&hdev->le_scan_disable);
812
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300813 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700814 inquiry_cache_flush(hdev);
815 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300816 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700817
818 hci_notify(hdev, HCI_DEV_DOWN);
819
820 if (hdev->flush)
821 hdev->flush(hdev);
822
823 /* Reset device */
824 skb_queue_purge(&hdev->cmd_q);
825 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +0200826 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +0200827 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 set_bit(HCI_INIT, &hdev->flags);
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300829 __hci_request(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 clear_bit(HCI_INIT, &hdev->flags);
831 }
832
Gustavo F. Padovanc347b762011-12-14 23:53:47 -0200833 /* flush cmd work */
834 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700835
836 /* Drop queues */
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
839 skb_queue_purge(&hdev->raw_q);
840
841 /* Drop last sent command */
842 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -0300843 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 kfree_skb(hdev->sent_cmd);
845 hdev->sent_cmd = NULL;
846 }
847
848 /* After this point our queues are empty
849 * and no tasks are scheduled. */
850 hdev->close(hdev);
851
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +0300852 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
853 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100854 hci_dev_lock(hdev);
855 mgmt_powered(hdev, 0);
856 hci_dev_unlock(hdev);
857 }
Johan Hedberg5add6af2010-12-16 10:00:37 +0200858
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 /* Clear flags */
860 hdev->flags = 0;
861
Johan Hedberge59fda82012-02-22 18:11:53 +0200862 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +0200863 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +0200864
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 hci_req_unlock(hdev);
866
867 hci_dev_put(hdev);
868 return 0;
869}
870
871int hci_dev_close(__u16 dev)
872{
873 struct hci_dev *hdev;
874 int err;
875
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 hdev = hci_dev_get(dev);
877 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700878 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100879
880 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
881 cancel_delayed_work(&hdev->power_off);
882
Linus Torvalds1da177e2005-04-16 15:20:36 -0700883 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +0100884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 hci_dev_put(hdev);
886 return err;
887}
888
889int hci_dev_reset(__u16 dev)
890{
891 struct hci_dev *hdev;
892 int ret = 0;
893
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200894 hdev = hci_dev_get(dev);
895 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896 return -ENODEV;
897
898 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
900 if (!test_bit(HCI_UP, &hdev->flags))
901 goto done;
902
903 /* Drop queues */
904 skb_queue_purge(&hdev->rx_q);
905 skb_queue_purge(&hdev->cmd_q);
906
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300907 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700908 inquiry_cache_flush(hdev);
909 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300910 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911
912 if (hdev->flush)
913 hdev->flush(hdev);
914
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900915 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -0300916 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917
918 if (!test_bit(HCI_RAW, &hdev->flags))
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300919 ret = __hci_request(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700920
921done:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922 hci_req_unlock(hdev);
923 hci_dev_put(hdev);
924 return ret;
925}
926
927int hci_dev_reset_stat(__u16 dev)
928{
929 struct hci_dev *hdev;
930 int ret = 0;
931
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200932 hdev = hci_dev_get(dev);
933 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 return -ENODEV;
935
936 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
937
938 hci_dev_put(hdev);
939
940 return ret;
941}
942
943int hci_dev_cmd(unsigned int cmd, void __user *arg)
944{
945 struct hci_dev *hdev;
946 struct hci_dev_req dr;
947 int err = 0;
948
949 if (copy_from_user(&dr, arg, sizeof(dr)))
950 return -EFAULT;
951
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200952 hdev = hci_dev_get(dr.dev_id);
953 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 return -ENODEV;
955
956 switch (cmd) {
957 case HCISETAUTH:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200958 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300959 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 break;
961
962 case HCISETENCRYPT:
963 if (!lmp_encrypt_capable(hdev)) {
964 err = -EOPNOTSUPP;
965 break;
966 }
967
968 if (!test_bit(HCI_AUTH, &hdev->flags)) {
969 /* Auth must be enabled first */
Marcel Holtmann04837f62006-07-03 10:02:33 +0200970 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300971 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700972 if (err)
973 break;
974 }
975
Marcel Holtmann04837f62006-07-03 10:02:33 +0200976 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300977 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 break;
979
980 case HCISETSCAN:
Marcel Holtmann04837f62006-07-03 10:02:33 +0200981 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300982 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 break;
984
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200985 case HCISETLINKPOL:
986 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +0300987 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200988 break;
989
990 case HCISETLINKMODE:
991 hdev->link_mode = ((__u16) dr.dev_opt) &
992 (HCI_LM_MASTER | HCI_LM_ACCEPT);
993 break;
994
Linus Torvalds1da177e2005-04-16 15:20:36 -0700995 case HCISETPTYPE:
996 hdev->pkt_type = (__u16) dr.dev_opt;
997 break;
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001000 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1001 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 break;
1003
1004 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001005 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1006 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007 break;
1008
1009 default:
1010 err = -EINVAL;
1011 break;
1012 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001013
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 hci_dev_put(hdev);
1015 return err;
1016}
1017
1018int hci_get_dev_list(void __user *arg)
1019{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001020 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 struct hci_dev_list_req *dl;
1022 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 int n = 0, size, err;
1024 __u16 dev_num;
1025
1026 if (get_user(dev_num, (__u16 __user *) arg))
1027 return -EFAULT;
1028
1029 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1030 return -EINVAL;
1031
1032 size = sizeof(*dl) + dev_num * sizeof(*dr);
1033
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001034 dl = kzalloc(size, GFP_KERNEL);
1035 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 return -ENOMEM;
1037
1038 dr = dl->dev_req;
1039
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001040 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001041 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001042 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001043 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001044
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001045 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1046 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001047
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048 (dr + n)->dev_id = hdev->id;
1049 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001050
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 if (++n >= dev_num)
1052 break;
1053 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001054 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
1056 dl->dev_num = n;
1057 size = sizeof(*dl) + n * sizeof(*dr);
1058
1059 err = copy_to_user(arg, dl, size);
1060 kfree(dl);
1061
1062 return err ? -EFAULT : 0;
1063}
1064
1065int hci_get_dev_info(void __user *arg)
1066{
1067 struct hci_dev *hdev;
1068 struct hci_dev_info di;
1069 int err = 0;
1070
1071 if (copy_from_user(&di, arg, sizeof(di)))
1072 return -EFAULT;
1073
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001074 hdev = hci_dev_get(di.dev_id);
1075 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001076 return -ENODEV;
1077
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001078 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001079 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001080
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001081 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1082 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 strcpy(di.name, hdev->name);
1085 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001086 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087 di.flags = hdev->flags;
1088 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001089 if (lmp_bredr_capable(hdev)) {
1090 di.acl_mtu = hdev->acl_mtu;
1091 di.acl_pkts = hdev->acl_pkts;
1092 di.sco_mtu = hdev->sco_mtu;
1093 di.sco_pkts = hdev->sco_pkts;
1094 } else {
1095 di.acl_mtu = hdev->le_mtu;
1096 di.acl_pkts = hdev->le_pkts;
1097 di.sco_mtu = 0;
1098 di.sco_pkts = 0;
1099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 di.link_policy = hdev->link_policy;
1101 di.link_mode = hdev->link_mode;
1102
1103 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1104 memcpy(&di.features, &hdev->features, sizeof(di.features));
1105
1106 if (copy_to_user(arg, &di, sizeof(di)))
1107 err = -EFAULT;
1108
1109 hci_dev_put(hdev);
1110
1111 return err;
1112}
1113
1114/* ---- Interface to HCI drivers ---- */
1115
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001116static int hci_rfkill_set_block(void *data, bool blocked)
1117{
1118 struct hci_dev *hdev = data;
1119
1120 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1121
1122 if (!blocked)
1123 return 0;
1124
1125 hci_dev_do_close(hdev);
1126
1127 return 0;
1128}
1129
1130static const struct rfkill_ops hci_rfkill_ops = {
1131 .set_block = hci_rfkill_set_block,
1132};
1133
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001134static void hci_power_on(struct work_struct *work)
1135{
1136 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1137
1138 BT_DBG("%s", hdev->name);
1139
1140 if (hci_dev_open(hdev->id) < 0)
1141 return;
1142
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001143 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Andrei Emeltchenko9345d402012-06-15 10:36:42 +03001144 schedule_delayed_work(&hdev->power_off, HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001145
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001146 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001147 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001148}
1149
1150static void hci_power_off(struct work_struct *work)
1151{
Johan Hedberg32435532011-11-07 22:16:04 +02001152 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001153 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001154
1155 BT_DBG("%s", hdev->name);
1156
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001157 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001158}
1159
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001160static void hci_discov_off(struct work_struct *work)
1161{
1162 struct hci_dev *hdev;
1163 u8 scan = SCAN_PAGE;
1164
1165 hdev = container_of(work, struct hci_dev, discov_off.work);
1166
1167 BT_DBG("%s", hdev->name);
1168
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001169 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001170
1171 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1172
1173 hdev->discov_timeout = 0;
1174
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001175 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001176}
1177
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001178int hci_uuids_clear(struct hci_dev *hdev)
1179{
1180 struct list_head *p, *n;
1181
1182 list_for_each_safe(p, n, &hdev->uuids) {
1183 struct bt_uuid *uuid;
1184
1185 uuid = list_entry(p, struct bt_uuid, list);
1186
1187 list_del(p);
1188 kfree(uuid);
1189 }
1190
1191 return 0;
1192}
1193
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001194int hci_link_keys_clear(struct hci_dev *hdev)
1195{
1196 struct list_head *p, *n;
1197
1198 list_for_each_safe(p, n, &hdev->link_keys) {
1199 struct link_key *key;
1200
1201 key = list_entry(p, struct link_key, list);
1202
1203 list_del(p);
1204 kfree(key);
1205 }
1206
1207 return 0;
1208}
1209
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001210int hci_smp_ltks_clear(struct hci_dev *hdev)
1211{
1212 struct smp_ltk *k, *tmp;
1213
1214 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1215 list_del(&k->list);
1216 kfree(k);
1217 }
1218
1219 return 0;
1220}
1221
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001222struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001224 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001225
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001226 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001227 if (bacmp(bdaddr, &k->bdaddr) == 0)
1228 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001229
1230 return NULL;
1231}
1232
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301233static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001234 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001235{
1236 /* Legacy key */
1237 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301238 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001239
1240 /* Debug keys are insecure so don't store them persistently */
1241 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301242 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001243
1244 /* Changed combination key and there's no previous one */
1245 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301246 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001247
1248 /* Security mode 3 case */
1249 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301250 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001251
1252 /* Neither local nor remote side had no-bonding as requirement */
1253 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301254 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001255
1256 /* Local side had dedicated bonding as requirement */
1257 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301258 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001259
1260 /* Remote side had dedicated bonding as requirement */
1261 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301262 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001263
1264 /* If none of the above criteria match, then don't store the key
1265 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301266 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001267}
1268
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001269struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001270{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001271 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001272
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001273 list_for_each_entry(k, &hdev->long_term_keys, list) {
1274 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001275 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001276 continue;
1277
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001278 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001279 }
1280
1281 return NULL;
1282}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001283
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001284struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001285 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001286{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001287 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001288
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001289 list_for_each_entry(k, &hdev->long_term_keys, list)
1290 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001291 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001292 return k;
1293
1294 return NULL;
1295}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001296
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001297int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001298 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001299{
1300 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301301 u8 old_key_type;
1302 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001303
1304 old_key = hci_find_link_key(hdev, bdaddr);
1305 if (old_key) {
1306 old_key_type = old_key->type;
1307 key = old_key;
1308 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001309 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001310 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1311 if (!key)
1312 return -ENOMEM;
1313 list_add(&key->list, &hdev->link_keys);
1314 }
1315
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001316 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001317
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001318 /* Some buggy controller combinations generate a changed
1319 * combination key for legacy pairing even when there's no
1320 * previous key */
1321 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001322 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001323 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001324 if (conn)
1325 conn->key_type = type;
1326 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001327
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001328 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001329 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001330 key->pin_len = pin_len;
1331
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001332 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001333 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001334 else
1335 key->type = type;
1336
Johan Hedberg4df378a2011-04-28 11:29:03 -07001337 if (!new_key)
1338 return 0;
1339
1340 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1341
Johan Hedberg744cf192011-11-08 20:40:14 +02001342 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001343
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301344 if (conn)
1345 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001346
1347 return 0;
1348}
1349
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001350int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001351 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001352 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001353{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001354 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001355
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001356 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1357 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001358
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001359 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1360 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001361 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001362 else {
1363 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001364 if (!key)
1365 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001366 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001367 }
1368
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001369 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001370 key->bdaddr_type = addr_type;
1371 memcpy(key->val, tk, sizeof(key->val));
1372 key->authenticated = authenticated;
1373 key->ediv = ediv;
1374 key->enc_size = enc_size;
1375 key->type = type;
1376 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001377
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001378 if (!new_key)
1379 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001380
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001381 if (type & HCI_SMP_LTK)
1382 mgmt_new_ltk(hdev, key, 1);
1383
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001384 return 0;
1385}
1386
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001387int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1388{
1389 struct link_key *key;
1390
1391 key = hci_find_link_key(hdev, bdaddr);
1392 if (!key)
1393 return -ENOENT;
1394
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001395 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001396
1397 list_del(&key->list);
1398 kfree(key);
1399
1400 return 0;
1401}
1402
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001403int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404{
1405 struct smp_ltk *k, *tmp;
1406
1407 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1408 if (bacmp(bdaddr, &k->bdaddr))
1409 continue;
1410
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001411 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001412
1413 list_del(&k->list);
1414 kfree(k);
1415 }
1416
1417 return 0;
1418}
1419
Ville Tervo6bd32322011-02-16 16:32:41 +02001420/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001421static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001422{
1423 struct hci_dev *hdev = (void *) arg;
1424
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001425 if (hdev->sent_cmd) {
1426 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1427 u16 opcode = __le16_to_cpu(sent->opcode);
1428
1429 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1430 } else {
1431 BT_ERR("%s command tx timeout", hdev->name);
1432 }
1433
Ville Tervo6bd32322011-02-16 16:32:41 +02001434 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001435 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001436}
1437
Szymon Janc2763eda2011-03-22 13:12:22 +01001438struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001439 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001440{
1441 struct oob_data *data;
1442
1443 list_for_each_entry(data, &hdev->remote_oob_data, list)
1444 if (bacmp(bdaddr, &data->bdaddr) == 0)
1445 return data;
1446
1447 return NULL;
1448}
1449
1450int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1451{
1452 struct oob_data *data;
1453
1454 data = hci_find_remote_oob_data(hdev, bdaddr);
1455 if (!data)
1456 return -ENOENT;
1457
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001458 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001459
1460 list_del(&data->list);
1461 kfree(data);
1462
1463 return 0;
1464}
1465
1466int hci_remote_oob_data_clear(struct hci_dev *hdev)
1467{
1468 struct oob_data *data, *n;
1469
1470 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1471 list_del(&data->list);
1472 kfree(data);
1473 }
1474
1475 return 0;
1476}
1477
1478int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001479 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001480{
1481 struct oob_data *data;
1482
1483 data = hci_find_remote_oob_data(hdev, bdaddr);
1484
1485 if (!data) {
1486 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1487 if (!data)
1488 return -ENOMEM;
1489
1490 bacpy(&data->bdaddr, bdaddr);
1491 list_add(&data->list, &hdev->remote_oob_data);
1492 }
1493
1494 memcpy(data->hash, hash, sizeof(data->hash));
1495 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1496
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001497 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001498
1499 return 0;
1500}
1501
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001502struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001503{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001504 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001505
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001506 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001507 if (bacmp(bdaddr, &b->bdaddr) == 0)
1508 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001509
1510 return NULL;
1511}
1512
1513int hci_blacklist_clear(struct hci_dev *hdev)
1514{
1515 struct list_head *p, *n;
1516
1517 list_for_each_safe(p, n, &hdev->blacklist) {
1518 struct bdaddr_list *b;
1519
1520 b = list_entry(p, struct bdaddr_list, list);
1521
1522 list_del(p);
1523 kfree(b);
1524 }
1525
1526 return 0;
1527}
1528
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001529int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001530{
1531 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001532
1533 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1534 return -EBADF;
1535
Antti Julku5e762442011-08-25 16:48:02 +03001536 if (hci_blacklist_lookup(hdev, bdaddr))
1537 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001538
1539 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001540 if (!entry)
1541 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001542
1543 bacpy(&entry->bdaddr, bdaddr);
1544
1545 list_add(&entry->list, &hdev->blacklist);
1546
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001547 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001548}
1549
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001550int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001551{
1552 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001553
Szymon Janc1ec918c2011-11-16 09:32:21 +01001554 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001555 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001556
1557 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001558 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001559 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001560
1561 list_del(&entry->list);
1562 kfree(entry);
1563
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001564 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001565}
1566
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001567static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1568{
1569 struct le_scan_params *param = (struct le_scan_params *) opt;
1570 struct hci_cp_le_set_scan_param cp;
1571
1572 memset(&cp, 0, sizeof(cp));
1573 cp.type = param->type;
1574 cp.interval = cpu_to_le16(param->interval);
1575 cp.window = cpu_to_le16(param->window);
1576
1577 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1578}
1579
1580static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1581{
1582 struct hci_cp_le_set_scan_enable cp;
1583
1584 memset(&cp, 0, sizeof(cp));
1585 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001586 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001587
1588 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1589}
1590
1591static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001592 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001593{
1594 long timeo = msecs_to_jiffies(3000);
1595 struct le_scan_params param;
1596 int err;
1597
1598 BT_DBG("%s", hdev->name);
1599
1600 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1601 return -EINPROGRESS;
1602
1603 param.type = type;
1604 param.interval = interval;
1605 param.window = window;
1606
1607 hci_req_lock(hdev);
1608
1609 err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001610 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001611 if (!err)
1612 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1613
1614 hci_req_unlock(hdev);
1615
1616 if (err < 0)
1617 return err;
1618
1619 schedule_delayed_work(&hdev->le_scan_disable,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001620 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001621
1622 return 0;
1623}
1624
Andre Guedes7dbfac12012-03-15 16:52:07 -03001625int hci_cancel_le_scan(struct hci_dev *hdev)
1626{
1627 BT_DBG("%s", hdev->name);
1628
1629 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1630 return -EALREADY;
1631
1632 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1633 struct hci_cp_le_set_scan_enable cp;
1634
1635 /* Send HCI command to disable LE Scan */
1636 memset(&cp, 0, sizeof(cp));
1637 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1638 }
1639
1640 return 0;
1641}
1642
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001643static void le_scan_disable_work(struct work_struct *work)
1644{
1645 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001646 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001647 struct hci_cp_le_set_scan_enable cp;
1648
1649 BT_DBG("%s", hdev->name);
1650
1651 memset(&cp, 0, sizeof(cp));
1652
1653 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1654}
1655
Andre Guedes28b75a82012-02-03 17:48:00 -03001656static void le_scan_work(struct work_struct *work)
1657{
1658 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1659 struct le_scan_params *param = &hdev->le_scan_params;
1660
1661 BT_DBG("%s", hdev->name);
1662
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001663 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1664 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001665}
1666
1667int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001668 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001669{
1670 struct le_scan_params *param = &hdev->le_scan_params;
1671
1672 BT_DBG("%s", hdev->name);
1673
Johan Hedbergf1550472012-10-24 21:12:03 +03001674 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1675 return -ENOTSUPP;
1676
Andre Guedes28b75a82012-02-03 17:48:00 -03001677 if (work_busy(&hdev->le_scan))
1678 return -EINPROGRESS;
1679
1680 param->type = type;
1681 param->interval = interval;
1682 param->window = window;
1683 param->timeout = timeout;
1684
1685 queue_work(system_long_wq, &hdev->le_scan);
1686
1687 return 0;
1688}
1689
David Herrmann9be0dab2012-04-22 14:39:57 +02001690/* Alloc HCI device */
1691struct hci_dev *hci_alloc_dev(void)
1692{
1693 struct hci_dev *hdev;
1694
1695 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1696 if (!hdev)
1697 return NULL;
1698
David Herrmannb1b813d2012-04-22 14:39:58 +02001699 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1700 hdev->esco_type = (ESCO_HV1);
1701 hdev->link_mode = (HCI_LM_ACCEPT);
1702 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001703 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1704 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001705
David Herrmannb1b813d2012-04-22 14:39:58 +02001706 hdev->sniff_max_interval = 800;
1707 hdev->sniff_min_interval = 80;
1708
1709 mutex_init(&hdev->lock);
1710 mutex_init(&hdev->req_lock);
1711
1712 INIT_LIST_HEAD(&hdev->mgmt_pending);
1713 INIT_LIST_HEAD(&hdev->blacklist);
1714 INIT_LIST_HEAD(&hdev->uuids);
1715 INIT_LIST_HEAD(&hdev->link_keys);
1716 INIT_LIST_HEAD(&hdev->long_term_keys);
1717 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001718 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02001719
1720 INIT_WORK(&hdev->rx_work, hci_rx_work);
1721 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1722 INIT_WORK(&hdev->tx_work, hci_tx_work);
1723 INIT_WORK(&hdev->power_on, hci_power_on);
1724 INIT_WORK(&hdev->le_scan, le_scan_work);
1725
David Herrmannb1b813d2012-04-22 14:39:58 +02001726 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1727 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1728 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1729
David Herrmann9be0dab2012-04-22 14:39:57 +02001730 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02001731 skb_queue_head_init(&hdev->rx_q);
1732 skb_queue_head_init(&hdev->cmd_q);
1733 skb_queue_head_init(&hdev->raw_q);
1734
1735 init_waitqueue_head(&hdev->req_wait_q);
1736
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001737 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02001738
David Herrmannb1b813d2012-04-22 14:39:58 +02001739 hci_init_sysfs(hdev);
1740 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02001741
1742 return hdev;
1743}
1744EXPORT_SYMBOL(hci_alloc_dev);
1745
1746/* Free HCI device */
1747void hci_free_dev(struct hci_dev *hdev)
1748{
1749 skb_queue_purge(&hdev->driver_init);
1750
1751 /* will free via device release */
1752 put_device(&hdev->dev);
1753}
1754EXPORT_SYMBOL(hci_free_dev);
1755
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756/* Register HCI device */
1757int hci_register_dev(struct hci_dev *hdev)
1758{
David Herrmannb1b813d2012-04-22 14:39:58 +02001759 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
David Herrmann010666a2012-01-07 15:47:07 +01001761 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001762 return -EINVAL;
1763
Mat Martineau08add512011-11-02 16:18:36 -07001764 /* Do not allow HCI_AMP devices to register at index 0,
1765 * so the index can be used as the AMP controller ID.
1766 */
Sasha Levin3df92b32012-05-27 22:36:56 +02001767 switch (hdev->dev_type) {
1768 case HCI_BREDR:
1769 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
1770 break;
1771 case HCI_AMP:
1772 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
1773 break;
1774 default:
1775 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001777
Sasha Levin3df92b32012-05-27 22:36:56 +02001778 if (id < 0)
1779 return id;
1780
Linus Torvalds1da177e2005-04-16 15:20:36 -07001781 sprintf(hdev->name, "hci%d", id);
1782 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03001783
1784 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1785
Sasha Levin3df92b32012-05-27 22:36:56 +02001786 write_lock(&hci_dev_list_lock);
1787 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001788 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001789
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02001790 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001791 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02001792 if (!hdev->workqueue) {
1793 error = -ENOMEM;
1794 goto err;
1795 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001796
David Herrmann33ca9542011-10-08 14:58:49 +02001797 error = hci_add_sysfs(hdev);
1798 if (error < 0)
1799 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001800
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001801 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001802 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
1803 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001804 if (hdev->rfkill) {
1805 if (rfkill_register(hdev->rfkill) < 0) {
1806 rfkill_destroy(hdev->rfkill);
1807 hdev->rfkill = NULL;
1808 }
1809 }
1810
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001811 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03001812
1813 if (hdev->dev_type != HCI_AMP)
1814 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1815
Gustavo F. Padovan7f971042011-12-18 12:40:32 -02001816 schedule_work(&hdev->power_on);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001817
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01001819 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
1821 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001822
David Herrmann33ca9542011-10-08 14:58:49 +02001823err_wqueue:
1824 destroy_workqueue(hdev->workqueue);
1825err:
Sasha Levin3df92b32012-05-27 22:36:56 +02001826 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001827 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001828 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001829 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001830
David Herrmann33ca9542011-10-08 14:58:49 +02001831 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001832}
1833EXPORT_SYMBOL(hci_register_dev);
1834
1835/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02001836void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001837{
Sasha Levin3df92b32012-05-27 22:36:56 +02001838 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02001839
Marcel Holtmannc13854c2010-02-08 15:27:07 +01001840 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001841
Johan Hovold94324962012-03-15 14:48:41 +01001842 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1843
Sasha Levin3df92b32012-05-27 22:36:56 +02001844 id = hdev->id;
1845
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001846 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001848 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849
1850 hci_dev_do_close(hdev);
1851
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05301852 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02001853 kfree_skb(hdev->reassembly[i]);
1854
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001855 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001856 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001857 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001858 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001859 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001860 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001861
Johan Hedberg2e58ef32011-11-08 20:40:15 +02001862 /* mgmt_index_removed should take care of emptying the
1863 * pending list */
1864 BUG_ON(!list_empty(&hdev->mgmt_pending));
1865
Linus Torvalds1da177e2005-04-16 15:20:36 -07001866 hci_notify(hdev, HCI_DEV_UNREG);
1867
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001868 if (hdev->rfkill) {
1869 rfkill_unregister(hdev->rfkill);
1870 rfkill_destroy(hdev->rfkill);
1871 }
1872
David Herrmannce242972011-10-08 14:58:48 +02001873 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08001874
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01001875 destroy_workqueue(hdev->workqueue);
1876
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001877 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001878 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001879 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001880 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001881 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01001882 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001883 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02001884
David Herrmanndc946bd2012-01-07 15:47:24 +01001885 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02001886
1887 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001888}
1889EXPORT_SYMBOL(hci_unregister_dev);
1890
1891/* Suspend HCI device */
1892int hci_suspend_dev(struct hci_dev *hdev)
1893{
1894 hci_notify(hdev, HCI_DEV_SUSPEND);
1895 return 0;
1896}
1897EXPORT_SYMBOL(hci_suspend_dev);
1898
1899/* Resume HCI device */
1900int hci_resume_dev(struct hci_dev *hdev)
1901{
1902 hci_notify(hdev, HCI_DEV_RESUME);
1903 return 0;
1904}
1905EXPORT_SYMBOL(hci_resume_dev);
1906
Marcel Holtmann76bca882009-11-18 00:40:39 +01001907/* Receive frame from HCI drivers */
1908int hci_recv_frame(struct sk_buff *skb)
1909{
1910 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1911 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001912 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01001913 kfree_skb(skb);
1914 return -ENXIO;
1915 }
1916
1917 /* Incomming skb */
1918 bt_cb(skb)->incoming = 1;
1919
1920 /* Time stamp */
1921 __net_timestamp(skb);
1922
Marcel Holtmann76bca882009-11-18 00:40:39 +01001923 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001924 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01001925
Marcel Holtmann76bca882009-11-18 00:40:39 +01001926 return 0;
1927}
1928EXPORT_SYMBOL(hci_recv_frame);
1929
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301930static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001931 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301932{
1933 int len = 0;
1934 int hlen = 0;
1935 int remain = count;
1936 struct sk_buff *skb;
1937 struct bt_skb_cb *scb;
1938
1939 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001940 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301941 return -EILSEQ;
1942
1943 skb = hdev->reassembly[index];
1944
1945 if (!skb) {
1946 switch (type) {
1947 case HCI_ACLDATA_PKT:
1948 len = HCI_MAX_FRAME_SIZE;
1949 hlen = HCI_ACL_HDR_SIZE;
1950 break;
1951 case HCI_EVENT_PKT:
1952 len = HCI_MAX_EVENT_SIZE;
1953 hlen = HCI_EVENT_HDR_SIZE;
1954 break;
1955 case HCI_SCODATA_PKT:
1956 len = HCI_MAX_SCO_SIZE;
1957 hlen = HCI_SCO_HDR_SIZE;
1958 break;
1959 }
1960
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03001961 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301962 if (!skb)
1963 return -ENOMEM;
1964
1965 scb = (void *) skb->cb;
1966 scb->expect = hlen;
1967 scb->pkt_type = type;
1968
1969 skb->dev = (void *) hdev;
1970 hdev->reassembly[index] = skb;
1971 }
1972
1973 while (count) {
1974 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03001975 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05301976
1977 memcpy(skb_put(skb, len), data, len);
1978
1979 count -= len;
1980 data += len;
1981 scb->expect -= len;
1982 remain = count;
1983
1984 switch (type) {
1985 case HCI_EVENT_PKT:
1986 if (skb->len == HCI_EVENT_HDR_SIZE) {
1987 struct hci_event_hdr *h = hci_event_hdr(skb);
1988 scb->expect = h->plen;
1989
1990 if (skb_tailroom(skb) < scb->expect) {
1991 kfree_skb(skb);
1992 hdev->reassembly[index] = NULL;
1993 return -ENOMEM;
1994 }
1995 }
1996 break;
1997
1998 case HCI_ACLDATA_PKT:
1999 if (skb->len == HCI_ACL_HDR_SIZE) {
2000 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2001 scb->expect = __le16_to_cpu(h->dlen);
2002
2003 if (skb_tailroom(skb) < scb->expect) {
2004 kfree_skb(skb);
2005 hdev->reassembly[index] = NULL;
2006 return -ENOMEM;
2007 }
2008 }
2009 break;
2010
2011 case HCI_SCODATA_PKT:
2012 if (skb->len == HCI_SCO_HDR_SIZE) {
2013 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2014 scb->expect = h->dlen;
2015
2016 if (skb_tailroom(skb) < scb->expect) {
2017 kfree_skb(skb);
2018 hdev->reassembly[index] = NULL;
2019 return -ENOMEM;
2020 }
2021 }
2022 break;
2023 }
2024
2025 if (scb->expect == 0) {
2026 /* Complete frame */
2027
2028 bt_cb(skb)->pkt_type = type;
2029 hci_recv_frame(skb);
2030
2031 hdev->reassembly[index] = NULL;
2032 return remain;
2033 }
2034 }
2035
2036 return remain;
2037}
2038
Marcel Holtmannef222012007-07-11 06:42:04 +02002039int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2040{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302041 int rem = 0;
2042
Marcel Holtmannef222012007-07-11 06:42:04 +02002043 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2044 return -EILSEQ;
2045
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002046 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002047 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302048 if (rem < 0)
2049 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002050
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302051 data += (count - rem);
2052 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002053 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002054
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302055 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002056}
2057EXPORT_SYMBOL(hci_recv_fragment);
2058
Suraj Sumangala99811512010-07-14 13:02:19 +05302059#define STREAM_REASSEMBLY 0
2060
2061int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2062{
2063 int type;
2064 int rem = 0;
2065
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002066 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302067 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2068
2069 if (!skb) {
2070 struct { char type; } *pkt;
2071
2072 /* Start of the frame */
2073 pkt = data;
2074 type = pkt->type;
2075
2076 data++;
2077 count--;
2078 } else
2079 type = bt_cb(skb)->pkt_type;
2080
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002081 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002082 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302083 if (rem < 0)
2084 return rem;
2085
2086 data += (count - rem);
2087 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002088 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302089
2090 return rem;
2091}
2092EXPORT_SYMBOL(hci_recv_stream_fragment);
2093
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094/* ---- Interface to upper protocols ---- */
2095
Linus Torvalds1da177e2005-04-16 15:20:36 -07002096int hci_register_cb(struct hci_cb *cb)
2097{
2098 BT_DBG("%p name %s", cb, cb->name);
2099
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002100 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002102 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002103
2104 return 0;
2105}
2106EXPORT_SYMBOL(hci_register_cb);
2107
2108int hci_unregister_cb(struct hci_cb *cb)
2109{
2110 BT_DBG("%p name %s", cb, cb->name);
2111
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002112 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002113 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002114 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002115
2116 return 0;
2117}
2118EXPORT_SYMBOL(hci_unregister_cb);
2119
2120static int hci_send_frame(struct sk_buff *skb)
2121{
2122 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2123
2124 if (!hdev) {
2125 kfree_skb(skb);
2126 return -ENODEV;
2127 }
2128
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002129 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002131 /* Time stamp */
2132 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002133
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002134 /* Send copy to monitor */
2135 hci_send_to_monitor(hdev, skb);
2136
2137 if (atomic_read(&hdev->promisc)) {
2138 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002139 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140 }
2141
2142 /* Get rid of skb owner, prior to sending to the driver. */
2143 skb_orphan(skb);
2144
2145 return hdev->send(skb);
2146}
2147
2148/* Send HCI command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002149int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002150{
2151 int len = HCI_COMMAND_HDR_SIZE + plen;
2152 struct hci_command_hdr *hdr;
2153 struct sk_buff *skb;
2154
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002155 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002156
2157 skb = bt_skb_alloc(len, GFP_ATOMIC);
2158 if (!skb) {
Marcel Holtmannef222012007-07-11 06:42:04 +02002159 BT_ERR("%s no memory for command", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002160 return -ENOMEM;
2161 }
2162
2163 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002164 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002165 hdr->plen = plen;
2166
2167 if (plen)
2168 memcpy(skb_put(skb, plen), param, plen);
2169
2170 BT_DBG("skb len %d", skb->len);
2171
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002172 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002174
Johan Hedberga5040ef2011-01-10 13:28:59 +02002175 if (test_bit(HCI_INIT, &hdev->flags))
2176 hdev->init_last_cmd = opcode;
2177
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002179 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
2181 return 0;
2182}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183
2184/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002185void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002186{
2187 struct hci_command_hdr *hdr;
2188
2189 if (!hdev->sent_cmd)
2190 return NULL;
2191
2192 hdr = (void *) hdev->sent_cmd->data;
2193
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002194 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002195 return NULL;
2196
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002197 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002198
2199 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2200}
2201
2202/* Send ACL data */
2203static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2204{
2205 struct hci_acl_hdr *hdr;
2206 int len = skb->len;
2207
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002208 skb_push(skb, HCI_ACL_HDR_SIZE);
2209 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002210 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002211 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2212 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002213}
2214
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002215static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002216 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002217{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002218 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219 struct hci_dev *hdev = conn->hdev;
2220 struct sk_buff *list;
2221
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002222 skb->len = skb_headlen(skb);
2223 skb->data_len = 0;
2224
2225 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002226
2227 switch (hdev->dev_type) {
2228 case HCI_BREDR:
2229 hci_add_acl_hdr(skb, conn->handle, flags);
2230 break;
2231 case HCI_AMP:
2232 hci_add_acl_hdr(skb, chan->handle, flags);
2233 break;
2234 default:
2235 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2236 return;
2237 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002238
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002239 list = skb_shinfo(skb)->frag_list;
2240 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002241 /* Non fragmented */
2242 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2243
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002244 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002245 } else {
2246 /* Fragmented */
2247 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2248
2249 skb_shinfo(skb)->frag_list = NULL;
2250
2251 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002252 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002254 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002255
2256 flags &= ~ACL_START;
2257 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002258 do {
2259 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002260
Linus Torvalds1da177e2005-04-16 15:20:36 -07002261 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002262 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002263 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002264
2265 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2266
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002267 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002268 } while (list);
2269
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002270 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002272}
2273
2274void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2275{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002276 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002277
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002278 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002279
2280 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002281
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002282 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002283
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002284 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002285}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002286
2287/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002288void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002289{
2290 struct hci_dev *hdev = conn->hdev;
2291 struct hci_sco_hdr hdr;
2292
2293 BT_DBG("%s len %d", hdev->name, skb->len);
2294
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002295 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002296 hdr.dlen = skb->len;
2297
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002298 skb_push(skb, HCI_SCO_HDR_SIZE);
2299 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002300 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002301
2302 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002303 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002304
Linus Torvalds1da177e2005-04-16 15:20:36 -07002305 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002306 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002307}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002308
2309/* ---- HCI TX task (outgoing data) ---- */
2310
2311/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002312static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2313 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002314{
2315 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002316 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002317 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002318
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002319 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002320 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002321
2322 rcu_read_lock();
2323
2324 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002325 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002326 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002327
2328 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2329 continue;
2330
Linus Torvalds1da177e2005-04-16 15:20:36 -07002331 num++;
2332
2333 if (c->sent < min) {
2334 min = c->sent;
2335 conn = c;
2336 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002337
2338 if (hci_conn_num(hdev, type) == num)
2339 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002340 }
2341
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002342 rcu_read_unlock();
2343
Linus Torvalds1da177e2005-04-16 15:20:36 -07002344 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002345 int cnt, q;
2346
2347 switch (conn->type) {
2348 case ACL_LINK:
2349 cnt = hdev->acl_cnt;
2350 break;
2351 case SCO_LINK:
2352 case ESCO_LINK:
2353 cnt = hdev->sco_cnt;
2354 break;
2355 case LE_LINK:
2356 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2357 break;
2358 default:
2359 cnt = 0;
2360 BT_ERR("Unknown link type");
2361 }
2362
2363 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002364 *quote = q ? q : 1;
2365 } else
2366 *quote = 0;
2367
2368 BT_DBG("conn %p quote %d", conn, *quote);
2369 return conn;
2370}
2371
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002372static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002373{
2374 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002375 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002376
Ville Tervobae1f5d92011-02-10 22:38:53 -03002377 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002378
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002379 rcu_read_lock();
2380
Linus Torvalds1da177e2005-04-16 15:20:36 -07002381 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002382 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002383 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002384 BT_ERR("%s killing stalled connection %pMR",
2385 hdev->name, &c->dst);
Andrei Emeltchenko7490c6c2012-06-01 16:18:25 +03002386 hci_acl_disconn(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002387 }
2388 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002389
2390 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002391}
2392
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002393static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2394 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002395{
2396 struct hci_conn_hash *h = &hdev->conn_hash;
2397 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002398 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002399 struct hci_conn *conn;
2400 int cnt, q, conn_num = 0;
2401
2402 BT_DBG("%s", hdev->name);
2403
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002404 rcu_read_lock();
2405
2406 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002407 struct hci_chan *tmp;
2408
2409 if (conn->type != type)
2410 continue;
2411
2412 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2413 continue;
2414
2415 conn_num++;
2416
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002417 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002418 struct sk_buff *skb;
2419
2420 if (skb_queue_empty(&tmp->data_q))
2421 continue;
2422
2423 skb = skb_peek(&tmp->data_q);
2424 if (skb->priority < cur_prio)
2425 continue;
2426
2427 if (skb->priority > cur_prio) {
2428 num = 0;
2429 min = ~0;
2430 cur_prio = skb->priority;
2431 }
2432
2433 num++;
2434
2435 if (conn->sent < min) {
2436 min = conn->sent;
2437 chan = tmp;
2438 }
2439 }
2440
2441 if (hci_conn_num(hdev, type) == conn_num)
2442 break;
2443 }
2444
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002445 rcu_read_unlock();
2446
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002447 if (!chan)
2448 return NULL;
2449
2450 switch (chan->conn->type) {
2451 case ACL_LINK:
2452 cnt = hdev->acl_cnt;
2453 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002454 case AMP_LINK:
2455 cnt = hdev->block_cnt;
2456 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002457 case SCO_LINK:
2458 case ESCO_LINK:
2459 cnt = hdev->sco_cnt;
2460 break;
2461 case LE_LINK:
2462 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2463 break;
2464 default:
2465 cnt = 0;
2466 BT_ERR("Unknown link type");
2467 }
2468
2469 q = cnt / num;
2470 *quote = q ? q : 1;
2471 BT_DBG("chan %p quote %d", chan, *quote);
2472 return chan;
2473}
2474
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002475static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2476{
2477 struct hci_conn_hash *h = &hdev->conn_hash;
2478 struct hci_conn *conn;
2479 int num = 0;
2480
2481 BT_DBG("%s", hdev->name);
2482
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002483 rcu_read_lock();
2484
2485 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002486 struct hci_chan *chan;
2487
2488 if (conn->type != type)
2489 continue;
2490
2491 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2492 continue;
2493
2494 num++;
2495
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002496 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002497 struct sk_buff *skb;
2498
2499 if (chan->sent) {
2500 chan->sent = 0;
2501 continue;
2502 }
2503
2504 if (skb_queue_empty(&chan->data_q))
2505 continue;
2506
2507 skb = skb_peek(&chan->data_q);
2508 if (skb->priority >= HCI_PRIO_MAX - 1)
2509 continue;
2510
2511 skb->priority = HCI_PRIO_MAX - 1;
2512
2513 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002514 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002515 }
2516
2517 if (hci_conn_num(hdev, type) == num)
2518 break;
2519 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002520
2521 rcu_read_unlock();
2522
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002523}
2524
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002525static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2526{
2527 /* Calculate count of blocks used by this packet */
2528 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2529}
2530
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002531static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002532{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002533 if (!test_bit(HCI_RAW, &hdev->flags)) {
2534 /* ACL tx timeout must be longer than maximum
2535 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002536 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002537 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002538 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002540}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002542static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002543{
2544 unsigned int cnt = hdev->acl_cnt;
2545 struct hci_chan *chan;
2546 struct sk_buff *skb;
2547 int quote;
2548
2549 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002550
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002551 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002552 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002553 u32 priority = (skb_peek(&chan->data_q))->priority;
2554 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002555 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002556 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002557
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002558 /* Stop if priority has changed */
2559 if (skb->priority < priority)
2560 break;
2561
2562 skb = skb_dequeue(&chan->data_q);
2563
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002564 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002565 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002566
Linus Torvalds1da177e2005-04-16 15:20:36 -07002567 hci_send_frame(skb);
2568 hdev->acl_last_tx = jiffies;
2569
2570 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002571 chan->sent++;
2572 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573 }
2574 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002575
2576 if (cnt != hdev->acl_cnt)
2577 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578}
2579
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002580static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002581{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002582 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002583 struct hci_chan *chan;
2584 struct sk_buff *skb;
2585 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002586 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002587
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002588 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002589
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002590 BT_DBG("%s", hdev->name);
2591
2592 if (hdev->dev_type == HCI_AMP)
2593 type = AMP_LINK;
2594 else
2595 type = ACL_LINK;
2596
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002597 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002598 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002599 u32 priority = (skb_peek(&chan->data_q))->priority;
2600 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2601 int blocks;
2602
2603 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002604 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002605
2606 /* Stop if priority has changed */
2607 if (skb->priority < priority)
2608 break;
2609
2610 skb = skb_dequeue(&chan->data_q);
2611
2612 blocks = __get_blocks(hdev, skb);
2613 if (blocks > hdev->block_cnt)
2614 return;
2615
2616 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002617 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002618
2619 hci_send_frame(skb);
2620 hdev->acl_last_tx = jiffies;
2621
2622 hdev->block_cnt -= blocks;
2623 quote -= blocks;
2624
2625 chan->sent += blocks;
2626 chan->conn->sent += blocks;
2627 }
2628 }
2629
2630 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002631 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002632}
2633
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002634static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002635{
2636 BT_DBG("%s", hdev->name);
2637
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002638 /* No ACL link over BR/EDR controller */
2639 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
2640 return;
2641
2642 /* No AMP link over AMP controller */
2643 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002644 return;
2645
2646 switch (hdev->flow_ctl_mode) {
2647 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2648 hci_sched_acl_pkt(hdev);
2649 break;
2650
2651 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2652 hci_sched_acl_blk(hdev);
2653 break;
2654 }
2655}
2656
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002658static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002659{
2660 struct hci_conn *conn;
2661 struct sk_buff *skb;
2662 int quote;
2663
2664 BT_DBG("%s", hdev->name);
2665
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002666 if (!hci_conn_num(hdev, SCO_LINK))
2667 return;
2668
Linus Torvalds1da177e2005-04-16 15:20:36 -07002669 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2670 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2671 BT_DBG("skb %p len %d", skb, skb->len);
2672 hci_send_frame(skb);
2673
2674 conn->sent++;
2675 if (conn->sent == ~0)
2676 conn->sent = 0;
2677 }
2678 }
2679}
2680
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002681static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002682{
2683 struct hci_conn *conn;
2684 struct sk_buff *skb;
2685 int quote;
2686
2687 BT_DBG("%s", hdev->name);
2688
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002689 if (!hci_conn_num(hdev, ESCO_LINK))
2690 return;
2691
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03002692 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
2693 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002694 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2695 BT_DBG("skb %p len %d", skb, skb->len);
2696 hci_send_frame(skb);
2697
2698 conn->sent++;
2699 if (conn->sent == ~0)
2700 conn->sent = 0;
2701 }
2702 }
2703}
2704
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002705static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002706{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002707 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002708 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002709 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002710
2711 BT_DBG("%s", hdev->name);
2712
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002713 if (!hci_conn_num(hdev, LE_LINK))
2714 return;
2715
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002716 if (!test_bit(HCI_RAW, &hdev->flags)) {
2717 /* LE tx timeout must be longer than maximum
2718 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03002719 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002720 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002721 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002722 }
2723
2724 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002725 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002726 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002727 u32 priority = (skb_peek(&chan->data_q))->priority;
2728 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002729 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002730 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002731
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002732 /* Stop if priority has changed */
2733 if (skb->priority < priority)
2734 break;
2735
2736 skb = skb_dequeue(&chan->data_q);
2737
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002738 hci_send_frame(skb);
2739 hdev->le_last_tx = jiffies;
2740
2741 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002742 chan->sent++;
2743 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002744 }
2745 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002746
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002747 if (hdev->le_pkts)
2748 hdev->le_cnt = cnt;
2749 else
2750 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002751
2752 if (cnt != tmp)
2753 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002754}
2755
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002756static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002757{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002758 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002759 struct sk_buff *skb;
2760
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002761 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002762 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
2764 /* Schedule queues and send stuff to HCI driver */
2765
2766 hci_sched_acl(hdev);
2767
2768 hci_sched_sco(hdev);
2769
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02002770 hci_sched_esco(hdev);
2771
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002772 hci_sched_le(hdev);
2773
Linus Torvalds1da177e2005-04-16 15:20:36 -07002774 /* Send next queued raw (unknown type) packet */
2775 while ((skb = skb_dequeue(&hdev->raw_q)))
2776 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002777}
2778
Lucas De Marchi25985ed2011-03-30 22:57:33 -03002779/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780
2781/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002782static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002783{
2784 struct hci_acl_hdr *hdr = (void *) skb->data;
2785 struct hci_conn *conn;
2786 __u16 handle, flags;
2787
2788 skb_pull(skb, HCI_ACL_HDR_SIZE);
2789
2790 handle = __le16_to_cpu(hdr->handle);
2791 flags = hci_flags(handle);
2792 handle = hci_handle(handle);
2793
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002794 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002795 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002796
2797 hdev->stat.acl_rx++;
2798
2799 hci_dev_lock(hdev);
2800 conn = hci_conn_hash_lookup_handle(hdev, handle);
2801 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002802
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08002804 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002805
Johan Hedberg671267b2012-05-12 16:11:50 -03002806 hci_dev_lock(hdev);
2807 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2808 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2809 mgmt_device_connected(hdev, &conn->dst, conn->type,
2810 conn->dst_type, 0, NULL, 0,
2811 conn->dev_class);
2812 hci_dev_unlock(hdev);
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002815 l2cap_recv_acldata(conn, skb, flags);
2816 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002818 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002819 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 }
2821
2822 kfree_skb(skb);
2823}
2824
2825/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002826static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002827{
2828 struct hci_sco_hdr *hdr = (void *) skb->data;
2829 struct hci_conn *conn;
2830 __u16 handle;
2831
2832 skb_pull(skb, HCI_SCO_HDR_SIZE);
2833
2834 handle = __le16_to_cpu(hdr->handle);
2835
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002836 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837
2838 hdev->stat.sco_rx++;
2839
2840 hci_dev_lock(hdev);
2841 conn = hci_conn_hash_lookup_handle(hdev, handle);
2842 hci_dev_unlock(hdev);
2843
2844 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02002846 sco_recv_scodata(conn, skb);
2847 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002849 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002850 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851 }
2852
2853 kfree_skb(skb);
2854}
2855
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002856static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002858 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859 struct sk_buff *skb;
2860
2861 BT_DBG("%s", hdev->name);
2862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002864 /* Send copy to monitor */
2865 hci_send_to_monitor(hdev, skb);
2866
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (atomic_read(&hdev->promisc)) {
2868 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002869 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002870 }
2871
2872 if (test_bit(HCI_RAW, &hdev->flags)) {
2873 kfree_skb(skb);
2874 continue;
2875 }
2876
2877 if (test_bit(HCI_INIT, &hdev->flags)) {
2878 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002879 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002880 case HCI_ACLDATA_PKT:
2881 case HCI_SCODATA_PKT:
2882 kfree_skb(skb);
2883 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07002884 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002885 }
2886
2887 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002888 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002889 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002890 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 hci_event_packet(hdev, skb);
2892 break;
2893
2894 case HCI_ACLDATA_PKT:
2895 BT_DBG("%s ACL data packet", hdev->name);
2896 hci_acldata_packet(hdev, skb);
2897 break;
2898
2899 case HCI_SCODATA_PKT:
2900 BT_DBG("%s SCO data packet", hdev->name);
2901 hci_scodata_packet(hdev, skb);
2902 break;
2903
2904 default:
2905 kfree_skb(skb);
2906 break;
2907 }
2908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002909}
2910
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002911static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002913 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914 struct sk_buff *skb;
2915
Andrei Emeltchenko21047862012-07-10 15:27:47 +03002916 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
2917 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918
Linus Torvalds1da177e2005-04-16 15:20:36 -07002919 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02002920 if (atomic_read(&hdev->cmd_cnt)) {
2921 skb = skb_dequeue(&hdev->cmd_q);
2922 if (!skb)
2923 return;
2924
Wei Yongjun7585b972009-02-25 18:29:52 +08002925 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002926
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002927 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2928 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002929 atomic_dec(&hdev->cmd_cnt);
2930 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02002931 if (test_bit(HCI_RESET, &hdev->flags))
2932 del_timer(&hdev->cmd_timer);
2933 else
2934 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002935 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936 } else {
2937 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002938 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 }
2940 }
2941}
Andre Guedes2519a1f2011-11-07 11:45:24 -03002942
2943int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2944{
2945 /* General inquiry access code (GIAC) */
2946 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2947 struct hci_cp_inquiry cp;
2948
2949 BT_DBG("%s", hdev->name);
2950
2951 if (test_bit(HCI_INQUIRY, &hdev->flags))
2952 return -EINPROGRESS;
2953
Johan Hedberg46632622012-01-02 16:06:08 +02002954 inquiry_cache_flush(hdev);
2955
Andre Guedes2519a1f2011-11-07 11:45:24 -03002956 memset(&cp, 0, sizeof(cp));
2957 memcpy(&cp.lap, lap, sizeof(cp.lap));
2958 cp.length = length;
2959
2960 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2961}
Andre Guedes023d50492011-11-04 14:16:52 -03002962
2963int hci_cancel_inquiry(struct hci_dev *hdev)
2964{
2965 BT_DBG("%s", hdev->name);
2966
2967 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03002968 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03002969
2970 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2971}
Andre Guedes31f79562012-04-24 21:02:53 -03002972
2973u8 bdaddr_to_le(u8 bdaddr_type)
2974{
2975 switch (bdaddr_type) {
2976 case BDADDR_LE_PUBLIC:
2977 return ADDR_LE_DEV_PUBLIC;
2978
2979 default:
2980 /* Fallback to LE Random address type */
2981 return ADDR_LE_DEV_RANDOM;
2982 }
2983}