blob: aa868f6f5a9079fd7042d892f7089c86cffcabf3 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
Johan Hedbergbe91cd02015-11-10 09:44:54 +020030#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
Johan Hedberg0857dd32014-12-19 13:40:20 +020034void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
Johan Hedberge62144872015-04-02 13:41:08 +030041static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020043{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020063 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020069
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
Johan Hedberge62144872015-04-02 13:41:08 +030079int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
Johan Hedbergbe91cd02015-11-10 09:44:54 +020089static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
103void hci_req_cancel(struct hci_dev *hdev, int err)
104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
187int __hci_req_sync(struct hci_dev *hdev, void (*func)(struct hci_request *req,
188 unsigned long opt),
189 unsigned long opt, __u32 timeout)
190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
201 func(&req, opt);
202
203 add_wait_queue(&hdev->req_wait_q, &wait);
204 set_current_state(TASK_INTERRUPTIBLE);
205
206 err = hci_req_run_skb(&req, hci_req_sync_complete);
207 if (err < 0) {
208 hdev->req_status = 0;
209
210 remove_wait_queue(&hdev->req_wait_q, &wait);
211 set_current_state(TASK_RUNNING);
212
213 /* ENODATA means the HCI request command queue is empty.
214 * This can happen when a request with conditionals doesn't
215 * trigger any commands to be sent. This is normal behavior
216 * and should not trigger an error return.
217 */
218 if (err == -ENODATA)
219 return 0;
220
221 return err;
222 }
223
224 schedule_timeout(timeout);
225
226 remove_wait_queue(&hdev->req_wait_q, &wait);
227
228 if (signal_pending(current))
229 return -EINTR;
230
231 switch (hdev->req_status) {
232 case HCI_REQ_DONE:
233 err = -bt_to_errno(hdev->req_result);
234 break;
235
236 case HCI_REQ_CANCELED:
237 err = -hdev->req_result;
238 break;
239
240 default:
241 err = -ETIMEDOUT;
242 break;
243 }
244
245 hdev->req_status = hdev->req_result = 0;
246
247 BT_DBG("%s end: err %d", hdev->name, err);
248
249 return err;
250}
251
252int hci_req_sync(struct hci_dev *hdev, void (*req)(struct hci_request *req,
253 unsigned long opt),
254 unsigned long opt, __u32 timeout)
255{
256 int ret;
257
258 if (!test_bit(HCI_UP, &hdev->flags))
259 return -ENETDOWN;
260
261 /* Serialize all requests */
262 hci_req_lock(hdev);
263 ret = __hci_req_sync(hdev, req, opt, timeout);
264 hci_req_unlock(hdev);
265
266 return ret;
267}
268
Johan Hedberg0857dd32014-12-19 13:40:20 +0200269struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
270 const void *param)
271{
272 int len = HCI_COMMAND_HDR_SIZE + plen;
273 struct hci_command_hdr *hdr;
274 struct sk_buff *skb;
275
276 skb = bt_skb_alloc(len, GFP_ATOMIC);
277 if (!skb)
278 return NULL;
279
280 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
281 hdr->opcode = cpu_to_le16(opcode);
282 hdr->plen = plen;
283
284 if (plen)
285 memcpy(skb_put(skb, plen), param, plen);
286
287 BT_DBG("skb len %d", skb->len);
288
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100289 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
290 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200291
292 return skb;
293}
294
295/* Queue a command to an asynchronous HCI request */
296void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
297 const void *param, u8 event)
298{
299 struct hci_dev *hdev = req->hdev;
300 struct sk_buff *skb;
301
302 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
303
304 /* If an error occurred during request building, there is no point in
305 * queueing the HCI command. We can simply return.
306 */
307 if (req->err)
308 return;
309
310 skb = hci_prepare_cmd(hdev, opcode, plen, param);
311 if (!skb) {
312 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
313 hdev->name, opcode);
314 req->err = -ENOMEM;
315 return;
316 }
317
318 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200319 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200320
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100321 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200322
323 skb_queue_tail(&req->cmd_q, skb);
324}
325
326void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
327 const void *param)
328{
329 hci_req_add_ev(req, opcode, plen, param, 0);
330}
331
332void hci_req_add_le_scan_disable(struct hci_request *req)
333{
334 struct hci_cp_le_set_scan_enable cp;
335
336 memset(&cp, 0, sizeof(cp));
337 cp.enable = LE_SCAN_DISABLE;
338 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
339}
340
341static void add_to_white_list(struct hci_request *req,
342 struct hci_conn_params *params)
343{
344 struct hci_cp_le_add_to_white_list cp;
345
346 cp.bdaddr_type = params->addr_type;
347 bacpy(&cp.bdaddr, &params->addr);
348
349 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
350}
351
352static u8 update_white_list(struct hci_request *req)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_conn_params *params;
356 struct bdaddr_list *b;
357 uint8_t white_list_entries = 0;
358
359 /* Go through the current white list programmed into the
360 * controller one by one and check if that address is still
361 * in the list of pending connections or list of devices to
362 * report. If not present in either list, then queue the
363 * command to remove it from the controller.
364 */
365 list_for_each_entry(b, &hdev->le_white_list, list) {
366 struct hci_cp_le_del_from_white_list cp;
367
368 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
369 &b->bdaddr, b->bdaddr_type) ||
370 hci_pend_le_action_lookup(&hdev->pend_le_reports,
371 &b->bdaddr, b->bdaddr_type)) {
372 white_list_entries++;
373 continue;
374 }
375
376 cp.bdaddr_type = b->bdaddr_type;
377 bacpy(&cp.bdaddr, &b->bdaddr);
378
379 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
380 sizeof(cp), &cp);
381 }
382
383 /* Since all no longer valid white list entries have been
384 * removed, walk through the list of pending connections
385 * and ensure that any new device gets programmed into
386 * the controller.
387 *
388 * If the list of the devices is larger than the list of
389 * available white list entries in the controller, then
390 * just abort and return filer policy value to not use the
391 * white list.
392 */
393 list_for_each_entry(params, &hdev->pend_le_conns, action) {
394 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
395 &params->addr, params->addr_type))
396 continue;
397
398 if (white_list_entries >= hdev->le_white_list_size) {
399 /* Select filter policy to accept all advertising */
400 return 0x00;
401 }
402
403 if (hci_find_irk_by_addr(hdev, &params->addr,
404 params->addr_type)) {
405 /* White list can not be used with RPAs */
406 return 0x00;
407 }
408
409 white_list_entries++;
410 add_to_white_list(req, params);
411 }
412
413 /* After adding all new pending connections, walk through
414 * the list of pending reports and also add these to the
415 * white list if there is still space.
416 */
417 list_for_each_entry(params, &hdev->pend_le_reports, action) {
418 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
419 &params->addr, params->addr_type))
420 continue;
421
422 if (white_list_entries >= hdev->le_white_list_size) {
423 /* Select filter policy to accept all advertising */
424 return 0x00;
425 }
426
427 if (hci_find_irk_by_addr(hdev, &params->addr,
428 params->addr_type)) {
429 /* White list can not be used with RPAs */
430 return 0x00;
431 }
432
433 white_list_entries++;
434 add_to_white_list(req, params);
435 }
436
437 /* Select filter policy to use white list */
438 return 0x01;
439}
440
441void hci_req_add_le_passive_scan(struct hci_request *req)
442{
443 struct hci_cp_le_set_scan_param param_cp;
444 struct hci_cp_le_set_scan_enable enable_cp;
445 struct hci_dev *hdev = req->hdev;
446 u8 own_addr_type;
447 u8 filter_policy;
448
449 /* Set require_privacy to false since no SCAN_REQ are send
450 * during passive scanning. Not using an non-resolvable address
451 * here is important so that peer devices using direct
452 * advertising with our address will be correctly reported
453 * by the controller.
454 */
455 if (hci_update_random_address(req, false, &own_addr_type))
456 return;
457
458 /* Adding or removing entries from the white list must
459 * happen before enabling scanning. The controller does
460 * not allow white list modification while scanning.
461 */
462 filter_policy = update_white_list(req);
463
464 /* When the controller is using random resolvable addresses and
465 * with that having LE privacy enabled, then controllers with
466 * Extended Scanner Filter Policies support can now enable support
467 * for handling directed advertising.
468 *
469 * So instead of using filter polices 0x00 (no whitelist)
470 * and 0x01 (whitelist enabled) use the new filter policies
471 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
472 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700473 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200474 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
475 filter_policy |= 0x02;
476
477 memset(&param_cp, 0, sizeof(param_cp));
478 param_cp.type = LE_SCAN_PASSIVE;
479 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
480 param_cp.window = cpu_to_le16(hdev->le_scan_window);
481 param_cp.own_address_type = own_addr_type;
482 param_cp.filter_policy = filter_policy;
483 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
484 &param_cp);
485
486 memset(&enable_cp, 0, sizeof(enable_cp));
487 enable_cp.enable = LE_SCAN_ENABLE;
488 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
489 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
490 &enable_cp);
491}
492
493static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
494{
495 struct hci_dev *hdev = req->hdev;
496
497 /* If we're advertising or initiating an LE connection we can't
498 * go ahead and change the random address at this time. This is
499 * because the eventual initiator address used for the
500 * subsequently created connection will be undefined (some
501 * controllers use the new address and others the one we had
502 * when the operation started).
503 *
504 * In this kind of scenario skip the update and let the random
505 * address be updated at the next cycle.
506 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700507 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200508 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200509 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700510 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200511 return;
512 }
513
514 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
515}
516
517int hci_update_random_address(struct hci_request *req, bool require_privacy,
518 u8 *own_addr_type)
519{
520 struct hci_dev *hdev = req->hdev;
521 int err;
522
523 /* If privacy is enabled use a resolvable private address. If
524 * current RPA has expired or there is something else than
525 * the current RPA in use, then generate a new one.
526 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700527 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200528 int to;
529
530 *own_addr_type = ADDR_LE_DEV_RANDOM;
531
Marcel Holtmanna69d8922015-03-13 02:11:05 -0700532 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200533 !bacmp(&hdev->random_addr, &hdev->rpa))
534 return 0;
535
536 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
537 if (err < 0) {
538 BT_ERR("%s failed to generate new RPA", hdev->name);
539 return err;
540 }
541
542 set_random_addr(req, &hdev->rpa);
543
544 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
545 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
546
547 return 0;
548 }
549
550 /* In case of required privacy without resolvable private address,
551 * use an non-resolvable private address. This is useful for active
552 * scanning and non-connectable advertising.
553 */
554 if (require_privacy) {
555 bdaddr_t nrpa;
556
557 while (true) {
558 /* The non-resolvable private address is generated
559 * from random six bytes with the two most significant
560 * bits cleared.
561 */
562 get_random_bytes(&nrpa, 6);
563 nrpa.b[5] &= 0x3f;
564
565 /* The non-resolvable private address shall not be
566 * equal to the public address.
567 */
568 if (bacmp(&hdev->bdaddr, &nrpa))
569 break;
570 }
571
572 *own_addr_type = ADDR_LE_DEV_RANDOM;
573 set_random_addr(req, &nrpa);
574 return 0;
575 }
576
577 /* If forcing static address is in use or there is no public
578 * address use the static address as random address (but skip
579 * the HCI command if the current random address is already the
580 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100581 *
582 * In case BR/EDR has been disabled on a dual-mode controller
583 * and a static address has been configured, then use that
584 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200585 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700586 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100587 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700588 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100589 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200590 *own_addr_type = ADDR_LE_DEV_RANDOM;
591 if (bacmp(&hdev->static_addr, &hdev->random_addr))
592 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
593 &hdev->static_addr);
594 return 0;
595 }
596
597 /* Neither privacy nor static address is being used so use a
598 * public address.
599 */
600 *own_addr_type = ADDR_LE_DEV_PUBLIC;
601
602 return 0;
603}
Johan Hedberg2cf22212014-12-19 22:26:00 +0200604
Johan Hedberg405a2612014-12-19 23:18:22 +0200605static bool disconnected_whitelist_entries(struct hci_dev *hdev)
606{
607 struct bdaddr_list *b;
608
609 list_for_each_entry(b, &hdev->whitelist, list) {
610 struct hci_conn *conn;
611
612 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
613 if (!conn)
614 return true;
615
616 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
617 return true;
618 }
619
620 return false;
621}
622
623void __hci_update_page_scan(struct hci_request *req)
624{
625 struct hci_dev *hdev = req->hdev;
626 u8 scan;
627
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700628 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +0200629 return;
630
631 if (!hdev_is_powered(hdev))
632 return;
633
634 if (mgmt_powering_down(hdev))
635 return;
636
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700637 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +0200638 disconnected_whitelist_entries(hdev))
639 scan = SCAN_PAGE;
640 else
641 scan = SCAN_DISABLED;
642
643 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
644 return;
645
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700646 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +0200647 scan |= SCAN_INQUIRY;
648
649 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
650}
651
652void hci_update_page_scan(struct hci_dev *hdev)
653{
654 struct hci_request req;
655
656 hci_req_init(&req, hdev);
657 __hci_update_page_scan(&req);
658 hci_req_run(&req, NULL);
659}
660
Johan Hedberg2cf22212014-12-19 22:26:00 +0200661/* This function controls the background scanning based on hdev->pend_le_conns
662 * list. If there are pending LE connection we start the background scanning,
663 * otherwise we stop it.
664 *
665 * This function requires the caller holds hdev->lock.
666 */
667void __hci_update_background_scan(struct hci_request *req)
668{
669 struct hci_dev *hdev = req->hdev;
Johan Hedberg2cf22212014-12-19 22:26:00 +0200670
671 if (!test_bit(HCI_UP, &hdev->flags) ||
672 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700673 hci_dev_test_flag(hdev, HCI_SETUP) ||
674 hci_dev_test_flag(hdev, HCI_CONFIG) ||
675 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
676 hci_dev_test_flag(hdev, HCI_UNREGISTER))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200677 return;
678
679 /* No point in doing scanning if LE support hasn't been enabled */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700680 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200681 return;
682
683 /* If discovery is active don't interfere with it */
684 if (hdev->discovery.state != DISCOVERY_STOPPED)
685 return;
686
687 /* Reset RSSI and UUID filters when starting background scanning
688 * since these filters are meant for service discovery only.
689 *
690 * The Start Discovery and Start Service Discovery operations
691 * ensure to set proper values for RSSI threshold and UUID
692 * filter list. So it is safe to just reset them here.
693 */
694 hci_discovery_filter_clear(hdev);
695
696 if (list_empty(&hdev->pend_le_conns) &&
697 list_empty(&hdev->pend_le_reports)) {
698 /* If there is no pending LE connections or devices
699 * to be scanned for, we should stop the background
700 * scanning.
701 */
702
703 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700704 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200705 return;
706
707 hci_req_add_le_scan_disable(req);
708
709 BT_DBG("%s stopping background scanning", hdev->name);
710 } else {
711 /* If there is at least one pending LE connection, we should
712 * keep the background scan running.
713 */
714
715 /* If controller is connecting, we should not start scanning
716 * since some controllers are not able to scan and connect at
717 * the same time.
718 */
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200719 if (hci_lookup_le_connect(hdev))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200720 return;
721
722 /* If controller is currently scanning, we stop it to ensure we
723 * don't miss any advertising (due to duplicates filter).
724 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700725 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200726 hci_req_add_le_scan_disable(req);
727
728 hci_req_add_le_passive_scan(req);
729
730 BT_DBG("%s starting background scanning", hdev->name);
731 }
732}
733
Marcel Holtmann1904a852015-01-11 13:50:44 -0800734static void update_background_scan_complete(struct hci_dev *hdev, u8 status,
735 u16 opcode)
Johan Hedberg2cf22212014-12-19 22:26:00 +0200736{
737 if (status)
738 BT_DBG("HCI request failed to update background scanning: "
739 "status 0x%2.2x", status);
740}
741
742void hci_update_background_scan(struct hci_dev *hdev)
743{
744 int err;
745 struct hci_request req;
746
747 hci_req_init(&req, hdev);
748
749 __hci_update_background_scan(&req);
750
751 err = hci_req_run(&req, update_background_scan_complete);
752 if (err && err != -ENODATA)
753 BT_ERR("Failed to run HCI request: err %d", err);
754}
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +0300755
756void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
757 u8 reason)
758{
759 switch (conn->state) {
760 case BT_CONNECTED:
761 case BT_CONFIG:
762 if (conn->type == AMP_LINK) {
763 struct hci_cp_disconn_phy_link cp;
764
765 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
766 cp.reason = reason;
767 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
768 &cp);
769 } else {
770 struct hci_cp_disconnect dc;
771
772 dc.handle = cpu_to_le16(conn->handle);
773 dc.reason = reason;
774 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
775 }
776
777 conn->state = BT_DISCONN;
778
779 break;
780 case BT_CONNECT:
781 if (conn->type == LE_LINK) {
782 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
783 break;
784 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
785 0, NULL);
786 } else if (conn->type == ACL_LINK) {
787 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
788 break;
789 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
790 6, &conn->dst);
791 }
792 break;
793 case BT_CONNECT2:
794 if (conn->type == ACL_LINK) {
795 struct hci_cp_reject_conn_req rej;
796
797 bacpy(&rej.bdaddr, &conn->dst);
798 rej.reason = reason;
799
800 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
801 sizeof(rej), &rej);
802 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
803 struct hci_cp_reject_sync_conn_req rej;
804
805 bacpy(&rej.bdaddr, &conn->dst);
806
807 /* SCO rejection has its own limited set of
808 * allowed error values (0x0D-0x0F) which isn't
809 * compatible with most values passed to this
810 * function. To be safe hard-code one of the
811 * values that's suitable for SCO.
812 */
813 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
814
815 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
816 sizeof(rej), &rej);
817 }
818 break;
819 default:
820 conn->state = BT_CLOSED;
821 break;
822 }
823}
824
825static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
826{
827 if (status)
828 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
829}
830
831int hci_abort_conn(struct hci_conn *conn, u8 reason)
832{
833 struct hci_request req;
834 int err;
835
836 hci_req_init(&req, conn->hdev);
837
838 __hci_abort_conn(&req, conn, reason);
839
840 err = hci_req_run(&req, abort_conn_complete);
841 if (err && err != -ENODATA) {
842 BT_ERR("Failed to run HCI request: err %d", err);
843 return err;
844 }
845
846 return 0;
847}