blob: e639671f54bd08a23f630b142cea990d005d73c6 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
Johan Hedbergbe91cd02015-11-10 09:44:54 +020030#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
Johan Hedberg0857dd32014-12-19 13:40:20 +020034void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
Johan Hedberge62144872015-04-02 13:41:08 +030041static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020043{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020063 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020069
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
Johan Hedberge62144872015-04-02 13:41:08 +030079int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
Johan Hedbergbe91cd02015-11-10 09:44:54 +020089static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
Johan Hedbergb5044302015-11-10 09:44:55 +0200103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
Johan Hedberga1d01db2015-11-11 08:11:25 +0200201 err = func(&req, opt);
202 if (err) {
203 if (hci_status)
204 *hci_status = HCI_ERROR_UNSPECIFIED;
205 return err;
206 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207
208 add_wait_queue(&hdev->req_wait_q, &wait);
209 set_current_state(TASK_INTERRUPTIBLE);
210
211 err = hci_req_run_skb(&req, hci_req_sync_complete);
212 if (err < 0) {
213 hdev->req_status = 0;
214
215 remove_wait_queue(&hdev->req_wait_q, &wait);
216 set_current_state(TASK_RUNNING);
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200223 if (err == -ENODATA) {
224 if (hci_status)
225 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200226 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200227 }
228
229 if (hci_status)
230 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200231
232 return err;
233 }
234
235 schedule_timeout(timeout);
236
237 remove_wait_queue(&hdev->req_wait_q, &wait);
238
239 if (signal_pending(current))
240 return -EINTR;
241
242 switch (hdev->req_status) {
243 case HCI_REQ_DONE:
244 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200245 if (hci_status)
246 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200247 break;
248
249 case HCI_REQ_CANCELED:
250 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200251 if (hci_status)
252 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200253 break;
254
255 default:
256 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200257 if (hci_status)
258 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200259 break;
260 }
261
262 hdev->req_status = hdev->req_result = 0;
263
264 BT_DBG("%s end: err %d", hdev->name, err);
265
266 return err;
267}
268
Johan Hedberga1d01db2015-11-11 08:11:25 +0200269int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
270 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200271 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200272{
273 int ret;
274
275 if (!test_bit(HCI_UP, &hdev->flags))
276 return -ENETDOWN;
277
278 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200279 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200280 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200281 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200282
283 return ret;
284}
285
Johan Hedberg0857dd32014-12-19 13:40:20 +0200286struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
287 const void *param)
288{
289 int len = HCI_COMMAND_HDR_SIZE + plen;
290 struct hci_command_hdr *hdr;
291 struct sk_buff *skb;
292
293 skb = bt_skb_alloc(len, GFP_ATOMIC);
294 if (!skb)
295 return NULL;
296
297 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
298 hdr->opcode = cpu_to_le16(opcode);
299 hdr->plen = plen;
300
301 if (plen)
302 memcpy(skb_put(skb, plen), param, plen);
303
304 BT_DBG("skb len %d", skb->len);
305
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100306 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
307 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200308
309 return skb;
310}
311
312/* Queue a command to an asynchronous HCI request */
313void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
314 const void *param, u8 event)
315{
316 struct hci_dev *hdev = req->hdev;
317 struct sk_buff *skb;
318
319 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
320
321 /* If an error occurred during request building, there is no point in
322 * queueing the HCI command. We can simply return.
323 */
324 if (req->err)
325 return;
326
327 skb = hci_prepare_cmd(hdev, opcode, plen, param);
328 if (!skb) {
329 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
330 hdev->name, opcode);
331 req->err = -ENOMEM;
332 return;
333 }
334
335 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200336 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200337
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100338 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200339
340 skb_queue_tail(&req->cmd_q, skb);
341}
342
343void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
344 const void *param)
345{
346 hci_req_add_ev(req, opcode, plen, param, 0);
347}
348
349void hci_req_add_le_scan_disable(struct hci_request *req)
350{
351 struct hci_cp_le_set_scan_enable cp;
352
353 memset(&cp, 0, sizeof(cp));
354 cp.enable = LE_SCAN_DISABLE;
355 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
356}
357
358static void add_to_white_list(struct hci_request *req,
359 struct hci_conn_params *params)
360{
361 struct hci_cp_le_add_to_white_list cp;
362
363 cp.bdaddr_type = params->addr_type;
364 bacpy(&cp.bdaddr, &params->addr);
365
366 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
367}
368
369static u8 update_white_list(struct hci_request *req)
370{
371 struct hci_dev *hdev = req->hdev;
372 struct hci_conn_params *params;
373 struct bdaddr_list *b;
374 uint8_t white_list_entries = 0;
375
376 /* Go through the current white list programmed into the
377 * controller one by one and check if that address is still
378 * in the list of pending connections or list of devices to
379 * report. If not present in either list, then queue the
380 * command to remove it from the controller.
381 */
382 list_for_each_entry(b, &hdev->le_white_list, list) {
383 struct hci_cp_le_del_from_white_list cp;
384
385 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
386 &b->bdaddr, b->bdaddr_type) ||
387 hci_pend_le_action_lookup(&hdev->pend_le_reports,
388 &b->bdaddr, b->bdaddr_type)) {
389 white_list_entries++;
390 continue;
391 }
392
393 cp.bdaddr_type = b->bdaddr_type;
394 bacpy(&cp.bdaddr, &b->bdaddr);
395
396 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
397 sizeof(cp), &cp);
398 }
399
400 /* Since all no longer valid white list entries have been
401 * removed, walk through the list of pending connections
402 * and ensure that any new device gets programmed into
403 * the controller.
404 *
405 * If the list of the devices is larger than the list of
406 * available white list entries in the controller, then
407 * just abort and return filer policy value to not use the
408 * white list.
409 */
410 list_for_each_entry(params, &hdev->pend_le_conns, action) {
411 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
412 &params->addr, params->addr_type))
413 continue;
414
415 if (white_list_entries >= hdev->le_white_list_size) {
416 /* Select filter policy to accept all advertising */
417 return 0x00;
418 }
419
420 if (hci_find_irk_by_addr(hdev, &params->addr,
421 params->addr_type)) {
422 /* White list can not be used with RPAs */
423 return 0x00;
424 }
425
426 white_list_entries++;
427 add_to_white_list(req, params);
428 }
429
430 /* After adding all new pending connections, walk through
431 * the list of pending reports and also add these to the
432 * white list if there is still space.
433 */
434 list_for_each_entry(params, &hdev->pend_le_reports, action) {
435 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
436 &params->addr, params->addr_type))
437 continue;
438
439 if (white_list_entries >= hdev->le_white_list_size) {
440 /* Select filter policy to accept all advertising */
441 return 0x00;
442 }
443
444 if (hci_find_irk_by_addr(hdev, &params->addr,
445 params->addr_type)) {
446 /* White list can not be used with RPAs */
447 return 0x00;
448 }
449
450 white_list_entries++;
451 add_to_white_list(req, params);
452 }
453
454 /* Select filter policy to use white list */
455 return 0x01;
456}
457
458void hci_req_add_le_passive_scan(struct hci_request *req)
459{
460 struct hci_cp_le_set_scan_param param_cp;
461 struct hci_cp_le_set_scan_enable enable_cp;
462 struct hci_dev *hdev = req->hdev;
463 u8 own_addr_type;
464 u8 filter_policy;
465
466 /* Set require_privacy to false since no SCAN_REQ are send
467 * during passive scanning. Not using an non-resolvable address
468 * here is important so that peer devices using direct
469 * advertising with our address will be correctly reported
470 * by the controller.
471 */
472 if (hci_update_random_address(req, false, &own_addr_type))
473 return;
474
475 /* Adding or removing entries from the white list must
476 * happen before enabling scanning. The controller does
477 * not allow white list modification while scanning.
478 */
479 filter_policy = update_white_list(req);
480
481 /* When the controller is using random resolvable addresses and
482 * with that having LE privacy enabled, then controllers with
483 * Extended Scanner Filter Policies support can now enable support
484 * for handling directed advertising.
485 *
486 * So instead of using filter polices 0x00 (no whitelist)
487 * and 0x01 (whitelist enabled) use the new filter policies
488 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
489 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700490 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200491 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
492 filter_policy |= 0x02;
493
494 memset(&param_cp, 0, sizeof(param_cp));
495 param_cp.type = LE_SCAN_PASSIVE;
496 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
497 param_cp.window = cpu_to_le16(hdev->le_scan_window);
498 param_cp.own_address_type = own_addr_type;
499 param_cp.filter_policy = filter_policy;
500 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
501 &param_cp);
502
503 memset(&enable_cp, 0, sizeof(enable_cp));
504 enable_cp.enable = LE_SCAN_ENABLE;
505 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
506 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
507 &enable_cp);
508}
509
510static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
511{
512 struct hci_dev *hdev = req->hdev;
513
514 /* If we're advertising or initiating an LE connection we can't
515 * go ahead and change the random address at this time. This is
516 * because the eventual initiator address used for the
517 * subsequently created connection will be undefined (some
518 * controllers use the new address and others the one we had
519 * when the operation started).
520 *
521 * In this kind of scenario skip the update and let the random
522 * address be updated at the next cycle.
523 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700524 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200525 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200526 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700527 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200528 return;
529 }
530
531 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
532}
533
534int hci_update_random_address(struct hci_request *req, bool require_privacy,
535 u8 *own_addr_type)
536{
537 struct hci_dev *hdev = req->hdev;
538 int err;
539
540 /* If privacy is enabled use a resolvable private address. If
541 * current RPA has expired or there is something else than
542 * the current RPA in use, then generate a new one.
543 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700544 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200545 int to;
546
547 *own_addr_type = ADDR_LE_DEV_RANDOM;
548
Marcel Holtmanna69d8922015-03-13 02:11:05 -0700549 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200550 !bacmp(&hdev->random_addr, &hdev->rpa))
551 return 0;
552
553 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
554 if (err < 0) {
555 BT_ERR("%s failed to generate new RPA", hdev->name);
556 return err;
557 }
558
559 set_random_addr(req, &hdev->rpa);
560
561 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
562 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
563
564 return 0;
565 }
566
567 /* In case of required privacy without resolvable private address,
568 * use an non-resolvable private address. This is useful for active
569 * scanning and non-connectable advertising.
570 */
571 if (require_privacy) {
572 bdaddr_t nrpa;
573
574 while (true) {
575 /* The non-resolvable private address is generated
576 * from random six bytes with the two most significant
577 * bits cleared.
578 */
579 get_random_bytes(&nrpa, 6);
580 nrpa.b[5] &= 0x3f;
581
582 /* The non-resolvable private address shall not be
583 * equal to the public address.
584 */
585 if (bacmp(&hdev->bdaddr, &nrpa))
586 break;
587 }
588
589 *own_addr_type = ADDR_LE_DEV_RANDOM;
590 set_random_addr(req, &nrpa);
591 return 0;
592 }
593
594 /* If forcing static address is in use or there is no public
595 * address use the static address as random address (but skip
596 * the HCI command if the current random address is already the
597 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100598 *
599 * In case BR/EDR has been disabled on a dual-mode controller
600 * and a static address has been configured, then use that
601 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200602 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700603 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100604 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700605 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100606 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200607 *own_addr_type = ADDR_LE_DEV_RANDOM;
608 if (bacmp(&hdev->static_addr, &hdev->random_addr))
609 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
610 &hdev->static_addr);
611 return 0;
612 }
613
614 /* Neither privacy nor static address is being used so use a
615 * public address.
616 */
617 *own_addr_type = ADDR_LE_DEV_PUBLIC;
618
619 return 0;
620}
Johan Hedberg2cf22212014-12-19 22:26:00 +0200621
Johan Hedberg405a2612014-12-19 23:18:22 +0200622static bool disconnected_whitelist_entries(struct hci_dev *hdev)
623{
624 struct bdaddr_list *b;
625
626 list_for_each_entry(b, &hdev->whitelist, list) {
627 struct hci_conn *conn;
628
629 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
630 if (!conn)
631 return true;
632
633 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
634 return true;
635 }
636
637 return false;
638}
639
640void __hci_update_page_scan(struct hci_request *req)
641{
642 struct hci_dev *hdev = req->hdev;
643 u8 scan;
644
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700645 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +0200646 return;
647
648 if (!hdev_is_powered(hdev))
649 return;
650
651 if (mgmt_powering_down(hdev))
652 return;
653
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700654 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +0200655 disconnected_whitelist_entries(hdev))
656 scan = SCAN_PAGE;
657 else
658 scan = SCAN_DISABLED;
659
660 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
661 return;
662
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700663 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +0200664 scan |= SCAN_INQUIRY;
665
666 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
667}
668
669void hci_update_page_scan(struct hci_dev *hdev)
670{
671 struct hci_request req;
672
673 hci_req_init(&req, hdev);
674 __hci_update_page_scan(&req);
675 hci_req_run(&req, NULL);
676}
677
Johan Hedberg2cf22212014-12-19 22:26:00 +0200678/* This function controls the background scanning based on hdev->pend_le_conns
679 * list. If there are pending LE connection we start the background scanning,
680 * otherwise we stop it.
681 *
682 * This function requires the caller holds hdev->lock.
683 */
Johan Hedberg145a0912015-11-11 08:11:22 +0200684static void __hci_update_background_scan(struct hci_request *req)
Johan Hedberg2cf22212014-12-19 22:26:00 +0200685{
686 struct hci_dev *hdev = req->hdev;
Johan Hedberg2cf22212014-12-19 22:26:00 +0200687
688 if (!test_bit(HCI_UP, &hdev->flags) ||
689 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700690 hci_dev_test_flag(hdev, HCI_SETUP) ||
691 hci_dev_test_flag(hdev, HCI_CONFIG) ||
692 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
693 hci_dev_test_flag(hdev, HCI_UNREGISTER))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200694 return;
695
696 /* No point in doing scanning if LE support hasn't been enabled */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700697 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200698 return;
699
700 /* If discovery is active don't interfere with it */
701 if (hdev->discovery.state != DISCOVERY_STOPPED)
702 return;
703
704 /* Reset RSSI and UUID filters when starting background scanning
705 * since these filters are meant for service discovery only.
706 *
707 * The Start Discovery and Start Service Discovery operations
708 * ensure to set proper values for RSSI threshold and UUID
709 * filter list. So it is safe to just reset them here.
710 */
711 hci_discovery_filter_clear(hdev);
712
713 if (list_empty(&hdev->pend_le_conns) &&
714 list_empty(&hdev->pend_le_reports)) {
715 /* If there is no pending LE connections or devices
716 * to be scanned for, we should stop the background
717 * scanning.
718 */
719
720 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700721 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200722 return;
723
724 hci_req_add_le_scan_disable(req);
725
726 BT_DBG("%s stopping background scanning", hdev->name);
727 } else {
728 /* If there is at least one pending LE connection, we should
729 * keep the background scan running.
730 */
731
732 /* If controller is connecting, we should not start scanning
733 * since some controllers are not able to scan and connect at
734 * the same time.
735 */
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200736 if (hci_lookup_le_connect(hdev))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200737 return;
738
739 /* If controller is currently scanning, we stop it to ensure we
740 * don't miss any advertising (due to duplicates filter).
741 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700742 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200743 hci_req_add_le_scan_disable(req);
744
745 hci_req_add_le_passive_scan(req);
746
747 BT_DBG("%s starting background scanning", hdev->name);
748 }
749}
750
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +0300751void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
752 u8 reason)
753{
754 switch (conn->state) {
755 case BT_CONNECTED:
756 case BT_CONFIG:
757 if (conn->type == AMP_LINK) {
758 struct hci_cp_disconn_phy_link cp;
759
760 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
761 cp.reason = reason;
762 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
763 &cp);
764 } else {
765 struct hci_cp_disconnect dc;
766
767 dc.handle = cpu_to_le16(conn->handle);
768 dc.reason = reason;
769 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
770 }
771
772 conn->state = BT_DISCONN;
773
774 break;
775 case BT_CONNECT:
776 if (conn->type == LE_LINK) {
777 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
778 break;
779 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
780 0, NULL);
781 } else if (conn->type == ACL_LINK) {
782 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
783 break;
784 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
785 6, &conn->dst);
786 }
787 break;
788 case BT_CONNECT2:
789 if (conn->type == ACL_LINK) {
790 struct hci_cp_reject_conn_req rej;
791
792 bacpy(&rej.bdaddr, &conn->dst);
793 rej.reason = reason;
794
795 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
796 sizeof(rej), &rej);
797 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
798 struct hci_cp_reject_sync_conn_req rej;
799
800 bacpy(&rej.bdaddr, &conn->dst);
801
802 /* SCO rejection has its own limited set of
803 * allowed error values (0x0D-0x0F) which isn't
804 * compatible with most values passed to this
805 * function. To be safe hard-code one of the
806 * values that's suitable for SCO.
807 */
808 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
809
810 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
811 sizeof(rej), &rej);
812 }
813 break;
814 default:
815 conn->state = BT_CLOSED;
816 break;
817 }
818}
819
820static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
821{
822 if (status)
823 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
824}
825
826int hci_abort_conn(struct hci_conn *conn, u8 reason)
827{
828 struct hci_request req;
829 int err;
830
831 hci_req_init(&req, conn->hdev);
832
833 __hci_abort_conn(&req, conn, reason);
834
835 err = hci_req_run(&req, abort_conn_complete);
836 if (err && err != -ENODATA) {
837 BT_ERR("Failed to run HCI request: err %d", err);
838 return err;
839 }
840
841 return 0;
842}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200843
Johan Hedberga1d01db2015-11-11 08:11:25 +0200844static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +0200845{
846 hci_dev_lock(req->hdev);
847 __hci_update_background_scan(req);
848 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200849 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200850}
851
852static void bg_scan_update(struct work_struct *work)
853{
854 struct hci_dev *hdev = container_of(work, struct hci_dev,
855 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +0200856 struct hci_conn *conn;
857 u8 status;
858 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200859
Johan Hedberg84235d22015-11-11 08:11:20 +0200860 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
861 if (!err)
862 return;
863
864 hci_dev_lock(hdev);
865
866 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
867 if (conn)
868 hci_le_conn_failed(conn, status);
869
870 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +0200871}
872
Johan Hedberga1d01db2015-11-11 08:11:25 +0200873static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200874{
875 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200876 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200877}
878
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200879static int bredr_inquiry(struct hci_request *req, unsigned long opt)
880{
881 u8 length = opt;
882 /* General inquiry access code (GIAC) */
883 u8 lap[3] = { 0x33, 0x8b, 0x9e };
884 struct hci_cp_inquiry cp;
885
886 BT_DBG("%s", req->hdev->name);
887
888 hci_dev_lock(req->hdev);
889 hci_inquiry_cache_flush(req->hdev);
890 hci_dev_unlock(req->hdev);
891
892 memset(&cp, 0, sizeof(cp));
893 memcpy(&cp.lap, lap, sizeof(cp.lap));
894 cp.length = length;
895
896 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
897
898 return 0;
899}
900
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200901static void le_scan_disable_work(struct work_struct *work)
902{
903 struct hci_dev *hdev = container_of(work, struct hci_dev,
904 le_scan_disable.work);
905 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200906
907 BT_DBG("%s", hdev->name);
908
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200909 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200910 return;
911
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200912 cancel_delayed_work(&hdev->le_scan_restart);
913
914 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
915 if (status) {
916 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
917 return;
918 }
919
920 hdev->discovery.scan_start = 0;
921
922 /* If we were running LE only scan, change discovery state. If
923 * we were running both LE and BR/EDR inquiry simultaneously,
924 * and BR/EDR inquiry is already finished, stop discovery,
925 * otherwise BR/EDR inquiry will stop discovery when finished.
926 * If we will resolve remote device name, do not change
927 * discovery state.
928 */
929
930 if (hdev->discovery.type == DISCOV_TYPE_LE)
931 goto discov_stopped;
932
933 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
934 return;
935
936 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
937 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
938 hdev->discovery.state != DISCOVERY_RESOLVING)
939 goto discov_stopped;
940
941 return;
942 }
943
944 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
945 HCI_CMD_TIMEOUT, &status);
946 if (status) {
947 BT_ERR("Inquiry failed: status 0x%02x", status);
948 goto discov_stopped;
949 }
950
951 return;
952
953discov_stopped:
954 hci_dev_lock(hdev);
955 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
956 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200957}
958
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200959static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200960{
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200961 struct hci_dev *hdev = req->hdev;
962 struct hci_cp_le_set_scan_enable cp;
963
964 /* If controller is not scanning we are done. */
965 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
966 return 0;
967
968 hci_req_add_le_scan_disable(req);
969
970 memset(&cp, 0, sizeof(cp));
971 cp.enable = LE_SCAN_ENABLE;
972 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
973 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
974
975 return 0;
976}
977
978static void le_scan_restart_work(struct work_struct *work)
979{
980 struct hci_dev *hdev = container_of(work, struct hci_dev,
981 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200982 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200983 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200984
985 BT_DBG("%s", hdev->name);
986
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200987 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200988 if (status) {
989 BT_ERR("Failed to restart LE scan: status %d", status);
990 return;
991 }
992
993 hci_dev_lock(hdev);
994
995 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
996 !hdev->discovery.scan_start)
997 goto unlock;
998
999 /* When the scan was started, hdev->le_scan_disable has been queued
1000 * after duration from scan_start. During scan restart this job
1001 * has been canceled, and we need to queue it again after proper
1002 * timeout, to make sure that scan does not run indefinitely.
1003 */
1004 duration = hdev->discovery.scan_duration;
1005 scan_start = hdev->discovery.scan_start;
1006 now = jiffies;
1007 if (now - scan_start <= duration) {
1008 int elapsed;
1009
1010 if (now >= scan_start)
1011 elapsed = now - scan_start;
1012 else
1013 elapsed = ULONG_MAX - scan_start + now;
1014
1015 timeout = duration - elapsed;
1016 } else {
1017 timeout = 0;
1018 }
1019
1020 queue_delayed_work(hdev->req_workqueue,
1021 &hdev->le_scan_disable, timeout);
1022
1023unlock:
1024 hci_dev_unlock(hdev);
1025}
1026
Johan Hedberge68f0722015-11-11 08:30:30 +02001027static void cancel_adv_timeout(struct hci_dev *hdev)
1028{
1029 if (hdev->adv_instance_timeout) {
1030 hdev->adv_instance_timeout = 0;
1031 cancel_delayed_work(&hdev->adv_instance_expire);
1032 }
1033}
1034
1035static void disable_advertising(struct hci_request *req)
1036{
1037 u8 enable = 0x00;
1038
1039 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1040}
1041
1042static int active_scan(struct hci_request *req, unsigned long opt)
1043{
1044 uint16_t interval = opt;
1045 struct hci_dev *hdev = req->hdev;
1046 struct hci_cp_le_set_scan_param param_cp;
1047 struct hci_cp_le_set_scan_enable enable_cp;
1048 u8 own_addr_type;
1049 int err;
1050
1051 BT_DBG("%s", hdev->name);
1052
1053 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1054 hci_dev_lock(hdev);
1055
1056 /* Don't let discovery abort an outgoing connection attempt
1057 * that's using directed advertising.
1058 */
1059 if (hci_lookup_le_connect(hdev)) {
1060 hci_dev_unlock(hdev);
1061 return -EBUSY;
1062 }
1063
1064 cancel_adv_timeout(hdev);
1065 hci_dev_unlock(hdev);
1066
1067 disable_advertising(req);
1068 }
1069
1070 /* If controller is scanning, it means the background scanning is
1071 * running. Thus, we should temporarily stop it in order to set the
1072 * discovery scanning parameters.
1073 */
1074 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1075 hci_req_add_le_scan_disable(req);
1076
1077 /* All active scans will be done with either a resolvable private
1078 * address (when privacy feature has been enabled) or non-resolvable
1079 * private address.
1080 */
1081 err = hci_update_random_address(req, true, &own_addr_type);
1082 if (err < 0)
1083 own_addr_type = ADDR_LE_DEV_PUBLIC;
1084
1085 memset(&param_cp, 0, sizeof(param_cp));
1086 param_cp.type = LE_SCAN_ACTIVE;
1087 param_cp.interval = cpu_to_le16(interval);
1088 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1089 param_cp.own_address_type = own_addr_type;
1090
1091 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1092 &param_cp);
1093
1094 memset(&enable_cp, 0, sizeof(enable_cp));
1095 enable_cp.enable = LE_SCAN_ENABLE;
1096 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1097
1098 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1099 &enable_cp);
1100
1101 return 0;
1102}
1103
1104static int interleaved_discov(struct hci_request *req, unsigned long opt)
1105{
1106 int err;
1107
1108 BT_DBG("%s", req->hdev->name);
1109
1110 err = active_scan(req, opt);
1111 if (err)
1112 return err;
1113
Johan Hedberg7df26b52015-11-11 12:24:21 +02001114 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02001115}
1116
1117static void start_discovery(struct hci_dev *hdev, u8 *status)
1118{
1119 unsigned long timeout;
1120
1121 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1122
1123 switch (hdev->discovery.type) {
1124 case DISCOV_TYPE_BREDR:
1125 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02001126 hci_req_sync(hdev, bredr_inquiry,
1127 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02001128 status);
1129 return;
1130 case DISCOV_TYPE_INTERLEAVED:
1131 /* When running simultaneous discovery, the LE scanning time
1132 * should occupy the whole discovery time sine BR/EDR inquiry
1133 * and LE scanning are scheduled by the controller.
1134 *
1135 * For interleaving discovery in comparison, BR/EDR inquiry
1136 * and LE scanning are done sequentially with separate
1137 * timeouts.
1138 */
1139 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1140 &hdev->quirks)) {
1141 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1142 /* During simultaneous discovery, we double LE scan
1143 * interval. We must leave some time for the controller
1144 * to do BR/EDR inquiry.
1145 */
1146 hci_req_sync(hdev, interleaved_discov,
1147 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1148 status);
1149 break;
1150 }
1151
1152 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1153 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1154 HCI_CMD_TIMEOUT, status);
1155 break;
1156 case DISCOV_TYPE_LE:
1157 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1158 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1159 HCI_CMD_TIMEOUT, status);
1160 break;
1161 default:
1162 *status = HCI_ERROR_UNSPECIFIED;
1163 return;
1164 }
1165
1166 if (*status)
1167 return;
1168
1169 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1170
1171 /* When service discovery is used and the controller has a
1172 * strict duplicate filter, it is important to remember the
1173 * start and duration of the scan. This is required for
1174 * restarting scanning during the discovery phase.
1175 */
1176 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1177 hdev->discovery.result_filtering) {
1178 hdev->discovery.scan_start = jiffies;
1179 hdev->discovery.scan_duration = timeout;
1180 }
1181
1182 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1183 timeout);
1184}
1185
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001186bool hci_req_stop_discovery(struct hci_request *req)
1187{
1188 struct hci_dev *hdev = req->hdev;
1189 struct discovery_state *d = &hdev->discovery;
1190 struct hci_cp_remote_name_req_cancel cp;
1191 struct inquiry_entry *e;
1192 bool ret = false;
1193
1194 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1195
1196 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1197 if (test_bit(HCI_INQUIRY, &hdev->flags))
1198 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1199
1200 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1201 cancel_delayed_work(&hdev->le_scan_disable);
1202 hci_req_add_le_scan_disable(req);
1203 }
1204
1205 ret = true;
1206 } else {
1207 /* Passive scanning */
1208 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1209 hci_req_add_le_scan_disable(req);
1210 ret = true;
1211 }
1212 }
1213
1214 /* No further actions needed for LE-only discovery */
1215 if (d->type == DISCOV_TYPE_LE)
1216 return ret;
1217
1218 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1219 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1220 NAME_PENDING);
1221 if (!e)
1222 return ret;
1223
1224 bacpy(&cp.bdaddr, &e->data.bdaddr);
1225 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1226 &cp);
1227 ret = true;
1228 }
1229
1230 return ret;
1231}
1232
1233static int stop_discovery(struct hci_request *req, unsigned long opt)
1234{
1235 hci_dev_lock(req->hdev);
1236 hci_req_stop_discovery(req);
1237 hci_dev_unlock(req->hdev);
1238
1239 return 0;
1240}
1241
Johan Hedberge68f0722015-11-11 08:30:30 +02001242static void discov_update(struct work_struct *work)
1243{
1244 struct hci_dev *hdev = container_of(work, struct hci_dev,
1245 discov_update);
1246 u8 status = 0;
1247
1248 switch (hdev->discovery.state) {
1249 case DISCOVERY_STARTING:
1250 start_discovery(hdev, &status);
1251 mgmt_start_discovery_complete(hdev, status);
1252 if (status)
1253 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1254 else
1255 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1256 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001257 case DISCOVERY_STOPPING:
1258 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1259 mgmt_stop_discovery_complete(hdev, status);
1260 if (!status)
1261 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1262 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02001263 case DISCOVERY_STOPPED:
1264 default:
1265 return;
1266 }
1267}
1268
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001269void hci_request_setup(struct hci_dev *hdev)
1270{
Johan Hedberge68f0722015-11-11 08:30:30 +02001271 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001272 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001273 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1274 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001275}
1276
1277void hci_request_cancel_all(struct hci_dev *hdev)
1278{
Johan Hedberg7df0f732015-11-12 15:15:00 +02001279 hci_req_sync_cancel(hdev, ENODEV);
1280
Johan Hedberge68f0722015-11-11 08:30:30 +02001281 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001282 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001283 cancel_delayed_work_sync(&hdev->le_scan_disable);
1284 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001285}