blob: 7c85435b8982c7d625dca93de902444937901731 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
Johan Hedbergbe91cd02015-11-10 09:44:54 +020030#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
Johan Hedberg0857dd32014-12-19 13:40:20 +020034void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
Johan Hedberge62144872015-04-02 13:41:08 +030041static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020043{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020063 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020069
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
Johan Hedberge62144872015-04-02 13:41:08 +030079int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
Johan Hedbergbe91cd02015-11-10 09:44:54 +020089static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
Johan Hedbergb5044302015-11-10 09:44:55 +0200103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
Johan Hedberga1d01db2015-11-11 08:11:25 +0200201 err = func(&req, opt);
202 if (err) {
203 if (hci_status)
204 *hci_status = HCI_ERROR_UNSPECIFIED;
205 return err;
206 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207
208 add_wait_queue(&hdev->req_wait_q, &wait);
209 set_current_state(TASK_INTERRUPTIBLE);
210
211 err = hci_req_run_skb(&req, hci_req_sync_complete);
212 if (err < 0) {
213 hdev->req_status = 0;
214
215 remove_wait_queue(&hdev->req_wait_q, &wait);
216 set_current_state(TASK_RUNNING);
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200226 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200231
232 return err;
233 }
234
235 schedule_timeout(timeout);
236
237 remove_wait_queue(&hdev->req_wait_q, &wait);
238
239 if (signal_pending(current))
240 return -EINTR;
241
242 switch (hdev->req_status) {
243 case HCI_REQ_DONE:
244 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200245 if (hci_status)
246 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200247 break;
248
249 case HCI_REQ_CANCELED:
250 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200251 if (hci_status)
252 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200253 break;
254
255 default:
256 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200257 if (hci_status)
258 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200259 break;
260 }
261
262 hdev->req_status = hdev->req_result = 0;
263
264 BT_DBG("%s end: err %d", hdev->name, err);
265
266 return err;
267}
268
Johan Hedberga1d01db2015-11-11 08:11:25 +0200269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200271 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200272{
273 int ret;
274
275 if (!test_bit(HCI_UP, &hdev->flags))
276 return -ENETDOWN;
277
278 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200279 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200281 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200282
283 return ret;
284}
285
Johan Hedberg0857dd32014-12-19 13:40:20 +0200286struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287 const void *param)
288{
289 int len = HCI_COMMAND_HDR_SIZE + plen;
290 struct hci_command_hdr *hdr;
291 struct sk_buff *skb;
292
293 skb = bt_skb_alloc(len, GFP_ATOMIC);
294 if (!skb)
295 return NULL;
296
297 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
298 hdr->opcode = cpu_to_le16(opcode);
299 hdr->plen = plen;
300
301 if (plen)
302 memcpy(skb_put(skb, plen), param, plen);
303
304 BT_DBG("skb len %d", skb->len);
305
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
309 return skb;
310}
311
312/* Queue a command to an asynchronous HCI request */
313void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 const void *param, u8 event)
315{
316 struct hci_dev *hdev = req->hdev;
317 struct sk_buff *skb;
318
319 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
320
321 /* If an error occurred during request building, there is no point in
322 * queueing the HCI command. We can simply return.
323 */
324 if (req->err)
325 return;
326
327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 if (!skb) {
329 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
330 hdev->name, opcode);
331 req->err = -ENOMEM;
332 return;
333 }
334
335 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200337
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100338 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200339
340 skb_queue_tail(&req->cmd_q, skb);
341}
342
343void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344 const void *param)
345{
346 hci_req_add_ev(req, opcode, plen, param, 0);
347}
348
Johan Hedberg196a5e92015-11-22 18:55:44 +0200349/* This function controls the background scanning based on hdev->pend_le_conns
350 * list. If there are pending LE connection we start the background scanning,
351 * otherwise we stop it.
352 *
353 * This function requires the caller holds hdev->lock.
354 */
355static void __hci_update_background_scan(struct hci_request *req)
356{
357 struct hci_dev *hdev = req->hdev;
358
359 if (!test_bit(HCI_UP, &hdev->flags) ||
360 test_bit(HCI_INIT, &hdev->flags) ||
361 hci_dev_test_flag(hdev, HCI_SETUP) ||
362 hci_dev_test_flag(hdev, HCI_CONFIG) ||
363 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
364 hci_dev_test_flag(hdev, HCI_UNREGISTER))
365 return;
366
367 /* No point in doing scanning if LE support hasn't been enabled */
368 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
369 return;
370
371 /* If discovery is active don't interfere with it */
372 if (hdev->discovery.state != DISCOVERY_STOPPED)
373 return;
374
375 /* Reset RSSI and UUID filters when starting background scanning
376 * since these filters are meant for service discovery only.
377 *
378 * The Start Discovery and Start Service Discovery operations
379 * ensure to set proper values for RSSI threshold and UUID
380 * filter list. So it is safe to just reset them here.
381 */
382 hci_discovery_filter_clear(hdev);
383
384 if (list_empty(&hdev->pend_le_conns) &&
385 list_empty(&hdev->pend_le_reports)) {
386 /* If there is no pending LE connections or devices
387 * to be scanned for, we should stop the background
388 * scanning.
389 */
390
391 /* If controller is not scanning we are done. */
392 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
393 return;
394
395 hci_req_add_le_scan_disable(req);
396
397 BT_DBG("%s stopping background scanning", hdev->name);
398 } else {
399 /* If there is at least one pending LE connection, we should
400 * keep the background scan running.
401 */
402
403 /* If controller is connecting, we should not start scanning
404 * since some controllers are not able to scan and connect at
405 * the same time.
406 */
407 if (hci_lookup_le_connect(hdev))
408 return;
409
410 /* If controller is currently scanning, we stop it to ensure we
411 * don't miss any advertising (due to duplicates filter).
412 */
413 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
414 hci_req_add_le_scan_disable(req);
415
416 hci_req_add_le_passive_scan(req);
417
418 BT_DBG("%s starting background scanning", hdev->name);
419 }
420}
421
Johan Hedberg0857dd32014-12-19 13:40:20 +0200422void hci_req_add_le_scan_disable(struct hci_request *req)
423{
424 struct hci_cp_le_set_scan_enable cp;
425
426 memset(&cp, 0, sizeof(cp));
427 cp.enable = LE_SCAN_DISABLE;
428 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
429}
430
431static void add_to_white_list(struct hci_request *req,
432 struct hci_conn_params *params)
433{
434 struct hci_cp_le_add_to_white_list cp;
435
436 cp.bdaddr_type = params->addr_type;
437 bacpy(&cp.bdaddr, &params->addr);
438
439 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
440}
441
442static u8 update_white_list(struct hci_request *req)
443{
444 struct hci_dev *hdev = req->hdev;
445 struct hci_conn_params *params;
446 struct bdaddr_list *b;
447 uint8_t white_list_entries = 0;
448
449 /* Go through the current white list programmed into the
450 * controller one by one and check if that address is still
451 * in the list of pending connections or list of devices to
452 * report. If not present in either list, then queue the
453 * command to remove it from the controller.
454 */
455 list_for_each_entry(b, &hdev->le_white_list, list) {
456 struct hci_cp_le_del_from_white_list cp;
457
458 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
459 &b->bdaddr, b->bdaddr_type) ||
460 hci_pend_le_action_lookup(&hdev->pend_le_reports,
461 &b->bdaddr, b->bdaddr_type)) {
462 white_list_entries++;
463 continue;
464 }
465
466 cp.bdaddr_type = b->bdaddr_type;
467 bacpy(&cp.bdaddr, &b->bdaddr);
468
469 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
470 sizeof(cp), &cp);
471 }
472
473 /* Since all no longer valid white list entries have been
474 * removed, walk through the list of pending connections
475 * and ensure that any new device gets programmed into
476 * the controller.
477 *
478 * If the list of the devices is larger than the list of
479 * available white list entries in the controller, then
480 * just abort and return filer policy value to not use the
481 * white list.
482 */
483 list_for_each_entry(params, &hdev->pend_le_conns, action) {
484 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
485 &params->addr, params->addr_type))
486 continue;
487
488 if (white_list_entries >= hdev->le_white_list_size) {
489 /* Select filter policy to accept all advertising */
490 return 0x00;
491 }
492
493 if (hci_find_irk_by_addr(hdev, &params->addr,
494 params->addr_type)) {
495 /* White list can not be used with RPAs */
496 return 0x00;
497 }
498
499 white_list_entries++;
500 add_to_white_list(req, params);
501 }
502
503 /* After adding all new pending connections, walk through
504 * the list of pending reports and also add these to the
505 * white list if there is still space.
506 */
507 list_for_each_entry(params, &hdev->pend_le_reports, action) {
508 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
509 &params->addr, params->addr_type))
510 continue;
511
512 if (white_list_entries >= hdev->le_white_list_size) {
513 /* Select filter policy to accept all advertising */
514 return 0x00;
515 }
516
517 if (hci_find_irk_by_addr(hdev, &params->addr,
518 params->addr_type)) {
519 /* White list can not be used with RPAs */
520 return 0x00;
521 }
522
523 white_list_entries++;
524 add_to_white_list(req, params);
525 }
526
527 /* Select filter policy to use white list */
528 return 0x01;
529}
530
531void hci_req_add_le_passive_scan(struct hci_request *req)
532{
533 struct hci_cp_le_set_scan_param param_cp;
534 struct hci_cp_le_set_scan_enable enable_cp;
535 struct hci_dev *hdev = req->hdev;
536 u8 own_addr_type;
537 u8 filter_policy;
538
539 /* Set require_privacy to false since no SCAN_REQ are send
540 * during passive scanning. Not using an non-resolvable address
541 * here is important so that peer devices using direct
542 * advertising with our address will be correctly reported
543 * by the controller.
544 */
545 if (hci_update_random_address(req, false, &own_addr_type))
546 return;
547
548 /* Adding or removing entries from the white list must
549 * happen before enabling scanning. The controller does
550 * not allow white list modification while scanning.
551 */
552 filter_policy = update_white_list(req);
553
554 /* When the controller is using random resolvable addresses and
555 * with that having LE privacy enabled, then controllers with
556 * Extended Scanner Filter Policies support can now enable support
557 * for handling directed advertising.
558 *
559 * So instead of using filter polices 0x00 (no whitelist)
560 * and 0x01 (whitelist enabled) use the new filter policies
561 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
562 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700563 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200564 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
565 filter_policy |= 0x02;
566
567 memset(&param_cp, 0, sizeof(param_cp));
568 param_cp.type = LE_SCAN_PASSIVE;
569 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
570 param_cp.window = cpu_to_le16(hdev->le_scan_window);
571 param_cp.own_address_type = own_addr_type;
572 param_cp.filter_policy = filter_policy;
573 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
574 &param_cp);
575
576 memset(&enable_cp, 0, sizeof(enable_cp));
577 enable_cp.enable = LE_SCAN_ENABLE;
578 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
579 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
580 &enable_cp);
581}
582
583static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
584{
585 struct hci_dev *hdev = req->hdev;
586
587 /* If we're advertising or initiating an LE connection we can't
588 * go ahead and change the random address at this time. This is
589 * because the eventual initiator address used for the
590 * subsequently created connection will be undefined (some
591 * controllers use the new address and others the one we had
592 * when the operation started).
593 *
594 * In this kind of scenario skip the update and let the random
595 * address be updated at the next cycle.
596 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700597 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200598 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200599 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700600 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200601 return;
602 }
603
604 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
605}
606
607int hci_update_random_address(struct hci_request *req, bool require_privacy,
608 u8 *own_addr_type)
609{
610 struct hci_dev *hdev = req->hdev;
611 int err;
612
613 /* If privacy is enabled use a resolvable private address. If
614 * current RPA has expired or there is something else than
615 * the current RPA in use, then generate a new one.
616 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700617 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200618 int to;
619
620 *own_addr_type = ADDR_LE_DEV_RANDOM;
621
Marcel Holtmanna69d8922015-03-13 02:11:05 -0700622 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200623 !bacmp(&hdev->random_addr, &hdev->rpa))
624 return 0;
625
626 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
627 if (err < 0) {
628 BT_ERR("%s failed to generate new RPA", hdev->name);
629 return err;
630 }
631
632 set_random_addr(req, &hdev->rpa);
633
634 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
635 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
636
637 return 0;
638 }
639
640 /* In case of required privacy without resolvable private address,
641 * use an non-resolvable private address. This is useful for active
642 * scanning and non-connectable advertising.
643 */
644 if (require_privacy) {
645 bdaddr_t nrpa;
646
647 while (true) {
648 /* The non-resolvable private address is generated
649 * from random six bytes with the two most significant
650 * bits cleared.
651 */
652 get_random_bytes(&nrpa, 6);
653 nrpa.b[5] &= 0x3f;
654
655 /* The non-resolvable private address shall not be
656 * equal to the public address.
657 */
658 if (bacmp(&hdev->bdaddr, &nrpa))
659 break;
660 }
661
662 *own_addr_type = ADDR_LE_DEV_RANDOM;
663 set_random_addr(req, &nrpa);
664 return 0;
665 }
666
667 /* If forcing static address is in use or there is no public
668 * address use the static address as random address (but skip
669 * the HCI command if the current random address is already the
670 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100671 *
672 * In case BR/EDR has been disabled on a dual-mode controller
673 * and a static address has been configured, then use that
674 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200675 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700676 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100677 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700678 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100679 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200680 *own_addr_type = ADDR_LE_DEV_RANDOM;
681 if (bacmp(&hdev->static_addr, &hdev->random_addr))
682 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
683 &hdev->static_addr);
684 return 0;
685 }
686
687 /* Neither privacy nor static address is being used so use a
688 * public address.
689 */
690 *own_addr_type = ADDR_LE_DEV_PUBLIC;
691
692 return 0;
693}
Johan Hedberg2cf22212014-12-19 22:26:00 +0200694
Johan Hedberg405a2612014-12-19 23:18:22 +0200695static bool disconnected_whitelist_entries(struct hci_dev *hdev)
696{
697 struct bdaddr_list *b;
698
699 list_for_each_entry(b, &hdev->whitelist, list) {
700 struct hci_conn *conn;
701
702 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
703 if (!conn)
704 return true;
705
706 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
707 return true;
708 }
709
710 return false;
711}
712
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200713void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +0200714{
715 struct hci_dev *hdev = req->hdev;
716 u8 scan;
717
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700718 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +0200719 return;
720
721 if (!hdev_is_powered(hdev))
722 return;
723
724 if (mgmt_powering_down(hdev))
725 return;
726
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700727 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +0200728 disconnected_whitelist_entries(hdev))
729 scan = SCAN_PAGE;
730 else
731 scan = SCAN_DISABLED;
732
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700733 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +0200734 scan |= SCAN_INQUIRY;
735
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200736 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
737 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
738 return;
739
Johan Hedberg405a2612014-12-19 23:18:22 +0200740 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
741}
742
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200743static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +0200744{
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200745 hci_dev_lock(req->hdev);
746 __hci_req_update_scan(req);
747 hci_dev_unlock(req->hdev);
748 return 0;
749}
Johan Hedberg405a2612014-12-19 23:18:22 +0200750
Johan Hedberg01b1cb82015-11-16 12:52:21 +0200751static void scan_update_work(struct work_struct *work)
752{
753 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
754
755 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +0200756}
757
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +0300758void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
759 u8 reason)
760{
761 switch (conn->state) {
762 case BT_CONNECTED:
763 case BT_CONFIG:
764 if (conn->type == AMP_LINK) {
765 struct hci_cp_disconn_phy_link cp;
766
767 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
768 cp.reason = reason;
769 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
770 &cp);
771 } else {
772 struct hci_cp_disconnect dc;
773
774 dc.handle = cpu_to_le16(conn->handle);
775 dc.reason = reason;
776 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
777 }
778
779 conn->state = BT_DISCONN;
780
781 break;
782 case BT_CONNECT:
783 if (conn->type == LE_LINK) {
784 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
785 break;
786 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
787 0, NULL);
788 } else if (conn->type == ACL_LINK) {
789 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
790 break;
791 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
792 6, &conn->dst);
793 }
794 break;
795 case BT_CONNECT2:
796 if (conn->type == ACL_LINK) {
797 struct hci_cp_reject_conn_req rej;
798
799 bacpy(&rej.bdaddr, &conn->dst);
800 rej.reason = reason;
801
802 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
803 sizeof(rej), &rej);
804 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
805 struct hci_cp_reject_sync_conn_req rej;
806
807 bacpy(&rej.bdaddr, &conn->dst);
808
809 /* SCO rejection has its own limited set of
810 * allowed error values (0x0D-0x0F) which isn't
811 * compatible with most values passed to this
812 * function. To be safe hard-code one of the
813 * values that's suitable for SCO.
814 */
815 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
816
817 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
818 sizeof(rej), &rej);
819 }
820 break;
821 default:
822 conn->state = BT_CLOSED;
823 break;
824 }
825}
826
827static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
828{
829 if (status)
830 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
831}
832
833int hci_abort_conn(struct hci_conn *conn, u8 reason)
834{
835 struct hci_request req;
836 int err;
837
838 hci_req_init(&req, conn->hdev);
839
840 __hci_abort_conn(&req, conn, reason);
841
842 err = hci_req_run(&req, abort_conn_complete);
843 if (err && err != -ENODATA) {
844 BT_ERR("Failed to run HCI request: err %d", err);
845 return err;
846 }
847
848 return 0;
849}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200850
Johan Hedberga1d01db2015-11-11 08:11:25 +0200851static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +0200852{
853 hci_dev_lock(req->hdev);
854 __hci_update_background_scan(req);
855 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200856 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200857}
858
859static void bg_scan_update(struct work_struct *work)
860{
861 struct hci_dev *hdev = container_of(work, struct hci_dev,
862 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +0200863 struct hci_conn *conn;
864 u8 status;
865 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200866
Johan Hedberg84235d22015-11-11 08:11:20 +0200867 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
868 if (!err)
869 return;
870
871 hci_dev_lock(hdev);
872
873 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
874 if (conn)
875 hci_le_conn_failed(conn, status);
876
877 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +0200878}
879
Johan Hedberga1d01db2015-11-11 08:11:25 +0200880static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200881{
882 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200883 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200884}
885
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200886static int bredr_inquiry(struct hci_request *req, unsigned long opt)
887{
888 u8 length = opt;
889 /* General inquiry access code (GIAC) */
890 u8 lap[3] = { 0x33, 0x8b, 0x9e };
891 struct hci_cp_inquiry cp;
892
893 BT_DBG("%s", req->hdev->name);
894
895 hci_dev_lock(req->hdev);
896 hci_inquiry_cache_flush(req->hdev);
897 hci_dev_unlock(req->hdev);
898
899 memset(&cp, 0, sizeof(cp));
900 memcpy(&cp.lap, lap, sizeof(cp.lap));
901 cp.length = length;
902
903 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
904
905 return 0;
906}
907
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200908static void le_scan_disable_work(struct work_struct *work)
909{
910 struct hci_dev *hdev = container_of(work, struct hci_dev,
911 le_scan_disable.work);
912 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200913
914 BT_DBG("%s", hdev->name);
915
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200916 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200917 return;
918
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200919 cancel_delayed_work(&hdev->le_scan_restart);
920
921 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
922 if (status) {
923 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
924 return;
925 }
926
927 hdev->discovery.scan_start = 0;
928
929 /* If we were running LE only scan, change discovery state. If
930 * we were running both LE and BR/EDR inquiry simultaneously,
931 * and BR/EDR inquiry is already finished, stop discovery,
932 * otherwise BR/EDR inquiry will stop discovery when finished.
933 * If we will resolve remote device name, do not change
934 * discovery state.
935 */
936
937 if (hdev->discovery.type == DISCOV_TYPE_LE)
938 goto discov_stopped;
939
940 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
941 return;
942
943 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
944 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
945 hdev->discovery.state != DISCOVERY_RESOLVING)
946 goto discov_stopped;
947
948 return;
949 }
950
951 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
952 HCI_CMD_TIMEOUT, &status);
953 if (status) {
954 BT_ERR("Inquiry failed: status 0x%02x", status);
955 goto discov_stopped;
956 }
957
958 return;
959
960discov_stopped:
961 hci_dev_lock(hdev);
962 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
963 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200964}
965
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200966static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200967{
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200968 struct hci_dev *hdev = req->hdev;
969 struct hci_cp_le_set_scan_enable cp;
970
971 /* If controller is not scanning we are done. */
972 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
973 return 0;
974
975 hci_req_add_le_scan_disable(req);
976
977 memset(&cp, 0, sizeof(cp));
978 cp.enable = LE_SCAN_ENABLE;
979 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
980 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
981
982 return 0;
983}
984
985static void le_scan_restart_work(struct work_struct *work)
986{
987 struct hci_dev *hdev = container_of(work, struct hci_dev,
988 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200989 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200990 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200991
992 BT_DBG("%s", hdev->name);
993
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200994 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200995 if (status) {
996 BT_ERR("Failed to restart LE scan: status %d", status);
997 return;
998 }
999
1000 hci_dev_lock(hdev);
1001
1002 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1003 !hdev->discovery.scan_start)
1004 goto unlock;
1005
1006 /* When the scan was started, hdev->le_scan_disable has been queued
1007 * after duration from scan_start. During scan restart this job
1008 * has been canceled, and we need to queue it again after proper
1009 * timeout, to make sure that scan does not run indefinitely.
1010 */
1011 duration = hdev->discovery.scan_duration;
1012 scan_start = hdev->discovery.scan_start;
1013 now = jiffies;
1014 if (now - scan_start <= duration) {
1015 int elapsed;
1016
1017 if (now >= scan_start)
1018 elapsed = now - scan_start;
1019 else
1020 elapsed = ULONG_MAX - scan_start + now;
1021
1022 timeout = duration - elapsed;
1023 } else {
1024 timeout = 0;
1025 }
1026
1027 queue_delayed_work(hdev->req_workqueue,
1028 &hdev->le_scan_disable, timeout);
1029
1030unlock:
1031 hci_dev_unlock(hdev);
1032}
1033
Johan Hedberge68f0722015-11-11 08:30:30 +02001034static void cancel_adv_timeout(struct hci_dev *hdev)
1035{
1036 if (hdev->adv_instance_timeout) {
1037 hdev->adv_instance_timeout = 0;
1038 cancel_delayed_work(&hdev->adv_instance_expire);
1039 }
1040}
1041
1042static void disable_advertising(struct hci_request *req)
1043{
1044 u8 enable = 0x00;
1045
1046 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1047}
1048
1049static int active_scan(struct hci_request *req, unsigned long opt)
1050{
1051 uint16_t interval = opt;
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_le_set_scan_param param_cp;
1054 struct hci_cp_le_set_scan_enable enable_cp;
1055 u8 own_addr_type;
1056 int err;
1057
1058 BT_DBG("%s", hdev->name);
1059
1060 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1061 hci_dev_lock(hdev);
1062
1063 /* Don't let discovery abort an outgoing connection attempt
1064 * that's using directed advertising.
1065 */
1066 if (hci_lookup_le_connect(hdev)) {
1067 hci_dev_unlock(hdev);
1068 return -EBUSY;
1069 }
1070
1071 cancel_adv_timeout(hdev);
1072 hci_dev_unlock(hdev);
1073
1074 disable_advertising(req);
1075 }
1076
1077 /* If controller is scanning, it means the background scanning is
1078 * running. Thus, we should temporarily stop it in order to set the
1079 * discovery scanning parameters.
1080 */
1081 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1082 hci_req_add_le_scan_disable(req);
1083
1084 /* All active scans will be done with either a resolvable private
1085 * address (when privacy feature has been enabled) or non-resolvable
1086 * private address.
1087 */
1088 err = hci_update_random_address(req, true, &own_addr_type);
1089 if (err < 0)
1090 own_addr_type = ADDR_LE_DEV_PUBLIC;
1091
1092 memset(&param_cp, 0, sizeof(param_cp));
1093 param_cp.type = LE_SCAN_ACTIVE;
1094 param_cp.interval = cpu_to_le16(interval);
1095 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1096 param_cp.own_address_type = own_addr_type;
1097
1098 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1099 &param_cp);
1100
1101 memset(&enable_cp, 0, sizeof(enable_cp));
1102 enable_cp.enable = LE_SCAN_ENABLE;
1103 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1104
1105 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1106 &enable_cp);
1107
1108 return 0;
1109}
1110
1111static int interleaved_discov(struct hci_request *req, unsigned long opt)
1112{
1113 int err;
1114
1115 BT_DBG("%s", req->hdev->name);
1116
1117 err = active_scan(req, opt);
1118 if (err)
1119 return err;
1120
Johan Hedberg7df26b52015-11-11 12:24:21 +02001121 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02001122}
1123
1124static void start_discovery(struct hci_dev *hdev, u8 *status)
1125{
1126 unsigned long timeout;
1127
1128 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1129
1130 switch (hdev->discovery.type) {
1131 case DISCOV_TYPE_BREDR:
1132 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02001133 hci_req_sync(hdev, bredr_inquiry,
1134 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02001135 status);
1136 return;
1137 case DISCOV_TYPE_INTERLEAVED:
1138 /* When running simultaneous discovery, the LE scanning time
1139 * should occupy the whole discovery time sine BR/EDR inquiry
1140 * and LE scanning are scheduled by the controller.
1141 *
1142 * For interleaving discovery in comparison, BR/EDR inquiry
1143 * and LE scanning are done sequentially with separate
1144 * timeouts.
1145 */
1146 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1147 &hdev->quirks)) {
1148 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1149 /* During simultaneous discovery, we double LE scan
1150 * interval. We must leave some time for the controller
1151 * to do BR/EDR inquiry.
1152 */
1153 hci_req_sync(hdev, interleaved_discov,
1154 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1155 status);
1156 break;
1157 }
1158
1159 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1160 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1161 HCI_CMD_TIMEOUT, status);
1162 break;
1163 case DISCOV_TYPE_LE:
1164 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1165 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1166 HCI_CMD_TIMEOUT, status);
1167 break;
1168 default:
1169 *status = HCI_ERROR_UNSPECIFIED;
1170 return;
1171 }
1172
1173 if (*status)
1174 return;
1175
1176 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1177
1178 /* When service discovery is used and the controller has a
1179 * strict duplicate filter, it is important to remember the
1180 * start and duration of the scan. This is required for
1181 * restarting scanning during the discovery phase.
1182 */
1183 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1184 hdev->discovery.result_filtering) {
1185 hdev->discovery.scan_start = jiffies;
1186 hdev->discovery.scan_duration = timeout;
1187 }
1188
1189 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1190 timeout);
1191}
1192
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001193bool hci_req_stop_discovery(struct hci_request *req)
1194{
1195 struct hci_dev *hdev = req->hdev;
1196 struct discovery_state *d = &hdev->discovery;
1197 struct hci_cp_remote_name_req_cancel cp;
1198 struct inquiry_entry *e;
1199 bool ret = false;
1200
1201 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1202
1203 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1204 if (test_bit(HCI_INQUIRY, &hdev->flags))
1205 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1206
1207 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1208 cancel_delayed_work(&hdev->le_scan_disable);
1209 hci_req_add_le_scan_disable(req);
1210 }
1211
1212 ret = true;
1213 } else {
1214 /* Passive scanning */
1215 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1216 hci_req_add_le_scan_disable(req);
1217 ret = true;
1218 }
1219 }
1220
1221 /* No further actions needed for LE-only discovery */
1222 if (d->type == DISCOV_TYPE_LE)
1223 return ret;
1224
1225 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1226 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1227 NAME_PENDING);
1228 if (!e)
1229 return ret;
1230
1231 bacpy(&cp.bdaddr, &e->data.bdaddr);
1232 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1233 &cp);
1234 ret = true;
1235 }
1236
1237 return ret;
1238}
1239
1240static int stop_discovery(struct hci_request *req, unsigned long opt)
1241{
1242 hci_dev_lock(req->hdev);
1243 hci_req_stop_discovery(req);
1244 hci_dev_unlock(req->hdev);
1245
1246 return 0;
1247}
1248
Johan Hedberge68f0722015-11-11 08:30:30 +02001249static void discov_update(struct work_struct *work)
1250{
1251 struct hci_dev *hdev = container_of(work, struct hci_dev,
1252 discov_update);
1253 u8 status = 0;
1254
1255 switch (hdev->discovery.state) {
1256 case DISCOVERY_STARTING:
1257 start_discovery(hdev, &status);
1258 mgmt_start_discovery_complete(hdev, status);
1259 if (status)
1260 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1261 else
1262 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1263 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001264 case DISCOVERY_STOPPING:
1265 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1266 mgmt_stop_discovery_complete(hdev, status);
1267 if (!status)
1268 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1269 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02001270 case DISCOVERY_STOPPED:
1271 default:
1272 return;
1273 }
1274}
1275
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001276void hci_request_setup(struct hci_dev *hdev)
1277{
Johan Hedberge68f0722015-11-11 08:30:30 +02001278 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001279 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001280 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001281 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1282 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001283}
1284
1285void hci_request_cancel_all(struct hci_dev *hdev)
1286{
Johan Hedberg7df0f732015-11-12 15:15:00 +02001287 hci_req_sync_cancel(hdev, ENODEV);
1288
Johan Hedberge68f0722015-11-11 08:30:30 +02001289 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001290 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001291 cancel_work_sync(&hdev->scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001292 cancel_delayed_work_sync(&hdev->le_scan_disable);
1293 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001294}