blob: 76bd912be9fe16ea3abf862e9568f449437db571 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
26
27#include "smp.h"
28#include "hci_request.h"
29
Johan Hedbergbe91cd02015-11-10 09:44:54 +020030#define HCI_REQ_DONE 0
31#define HCI_REQ_PEND 1
32#define HCI_REQ_CANCELED 2
33
Johan Hedberg0857dd32014-12-19 13:40:20 +020034void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
35{
36 skb_queue_head_init(&req->cmd_q);
37 req->hdev = hdev;
38 req->err = 0;
39}
40
Johan Hedberge62144872015-04-02 13:41:08 +030041static int req_run(struct hci_request *req, hci_req_complete_t complete,
42 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020043{
44 struct hci_dev *hdev = req->hdev;
45 struct sk_buff *skb;
46 unsigned long flags;
47
48 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
49
50 /* If an error occurred during request building, remove all HCI
51 * commands queued on the HCI request queue.
52 */
53 if (req->err) {
54 skb_queue_purge(&req->cmd_q);
55 return req->err;
56 }
57
58 /* Do not allow empty requests */
59 if (skb_queue_empty(&req->cmd_q))
60 return -ENODATA;
61
62 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020063 if (complete) {
64 bt_cb(skb)->hci.req_complete = complete;
65 } else if (complete_skb) {
66 bt_cb(skb)->hci.req_complete_skb = complete_skb;
67 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
68 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020069
70 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
71 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
72 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
73
74 queue_work(hdev->workqueue, &hdev->cmd_work);
75
76 return 0;
77}
78
Johan Hedberge62144872015-04-02 13:41:08 +030079int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
80{
81 return req_run(req, complete, NULL);
82}
83
84int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
85{
86 return req_run(req, NULL, complete);
87}
88
Johan Hedbergbe91cd02015-11-10 09:44:54 +020089static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
90 struct sk_buff *skb)
91{
92 BT_DBG("%s result 0x%2.2x", hdev->name, result);
93
94 if (hdev->req_status == HCI_REQ_PEND) {
95 hdev->req_result = result;
96 hdev->req_status = HCI_REQ_DONE;
97 if (skb)
98 hdev->req_skb = skb_get(skb);
99 wake_up_interruptible(&hdev->req_wait_q);
100 }
101}
102
Johan Hedbergb5044302015-11-10 09:44:55 +0200103void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200104{
105 BT_DBG("%s err 0x%2.2x", hdev->name, err);
106
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = err;
109 hdev->req_status = HCI_REQ_CANCELED;
110 wake_up_interruptible(&hdev->req_wait_q);
111 }
112}
113
114struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
115 const void *param, u8 event, u32 timeout)
116{
117 DECLARE_WAITQUEUE(wait, current);
118 struct hci_request req;
119 struct sk_buff *skb;
120 int err = 0;
121
122 BT_DBG("%s", hdev->name);
123
124 hci_req_init(&req, hdev);
125
126 hci_req_add_ev(&req, opcode, plen, param, event);
127
128 hdev->req_status = HCI_REQ_PEND;
129
130 add_wait_queue(&hdev->req_wait_q, &wait);
131 set_current_state(TASK_INTERRUPTIBLE);
132
133 err = hci_req_run_skb(&req, hci_req_sync_complete);
134 if (err < 0) {
135 remove_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_RUNNING);
137 return ERR_PTR(err);
138 }
139
140 schedule_timeout(timeout);
141
142 remove_wait_queue(&hdev->req_wait_q, &wait);
143
144 if (signal_pending(current))
145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
192 DECLARE_WAITQUEUE(wait, current);
193 int err = 0;
194
195 BT_DBG("%s start", hdev->name);
196
197 hci_req_init(&req, hdev);
198
199 hdev->req_status = HCI_REQ_PEND;
200
Johan Hedberga1d01db2015-11-11 08:11:25 +0200201 err = func(&req, opt);
202 if (err) {
203 if (hci_status)
204 *hci_status = HCI_ERROR_UNSPECIFIED;
205 return err;
206 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207
208 add_wait_queue(&hdev->req_wait_q, &wait);
209 set_current_state(TASK_INTERRUPTIBLE);
210
211 err = hci_req_run_skb(&req, hci_req_sync_complete);
212 if (err < 0) {
213 hdev->req_status = 0;
214
215 remove_wait_queue(&hdev->req_wait_q, &wait);
216 set_current_state(TASK_RUNNING);
217
218 /* ENODATA means the HCI request command queue is empty.
219 * This can happen when a request with conditionals doesn't
220 * trigger any commands to be sent. This is normal behavior
221 * and should not trigger an error return.
222 */
223 if (err == -ENODATA)
224 return 0;
225
226 return err;
227 }
228
229 schedule_timeout(timeout);
230
231 remove_wait_queue(&hdev->req_wait_q, &wait);
232
233 if (signal_pending(current))
234 return -EINTR;
235
236 switch (hdev->req_status) {
237 case HCI_REQ_DONE:
238 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200239 if (hci_status)
240 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200241 break;
242
243 case HCI_REQ_CANCELED:
244 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200245 if (hci_status)
246 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200247 break;
248
249 default:
250 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200251 if (hci_status)
252 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200253 break;
254 }
255
256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
291 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
296 memcpy(skb_put(skb, plen), param, plen);
297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
323 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
324 hdev->name, opcode);
325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
343void hci_req_add_le_scan_disable(struct hci_request *req)
344{
345 struct hci_cp_le_set_scan_enable cp;
346
347 memset(&cp, 0, sizeof(cp));
348 cp.enable = LE_SCAN_DISABLE;
349 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
350}
351
352static void add_to_white_list(struct hci_request *req,
353 struct hci_conn_params *params)
354{
355 struct hci_cp_le_add_to_white_list cp;
356
357 cp.bdaddr_type = params->addr_type;
358 bacpy(&cp.bdaddr, &params->addr);
359
360 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
361}
362
363static u8 update_white_list(struct hci_request *req)
364{
365 struct hci_dev *hdev = req->hdev;
366 struct hci_conn_params *params;
367 struct bdaddr_list *b;
368 uint8_t white_list_entries = 0;
369
370 /* Go through the current white list programmed into the
371 * controller one by one and check if that address is still
372 * in the list of pending connections or list of devices to
373 * report. If not present in either list, then queue the
374 * command to remove it from the controller.
375 */
376 list_for_each_entry(b, &hdev->le_white_list, list) {
377 struct hci_cp_le_del_from_white_list cp;
378
379 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
380 &b->bdaddr, b->bdaddr_type) ||
381 hci_pend_le_action_lookup(&hdev->pend_le_reports,
382 &b->bdaddr, b->bdaddr_type)) {
383 white_list_entries++;
384 continue;
385 }
386
387 cp.bdaddr_type = b->bdaddr_type;
388 bacpy(&cp.bdaddr, &b->bdaddr);
389
390 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
391 sizeof(cp), &cp);
392 }
393
394 /* Since all no longer valid white list entries have been
395 * removed, walk through the list of pending connections
396 * and ensure that any new device gets programmed into
397 * the controller.
398 *
399 * If the list of the devices is larger than the list of
400 * available white list entries in the controller, then
401 * just abort and return filer policy value to not use the
402 * white list.
403 */
404 list_for_each_entry(params, &hdev->pend_le_conns, action) {
405 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
406 &params->addr, params->addr_type))
407 continue;
408
409 if (white_list_entries >= hdev->le_white_list_size) {
410 /* Select filter policy to accept all advertising */
411 return 0x00;
412 }
413
414 if (hci_find_irk_by_addr(hdev, &params->addr,
415 params->addr_type)) {
416 /* White list can not be used with RPAs */
417 return 0x00;
418 }
419
420 white_list_entries++;
421 add_to_white_list(req, params);
422 }
423
424 /* After adding all new pending connections, walk through
425 * the list of pending reports and also add these to the
426 * white list if there is still space.
427 */
428 list_for_each_entry(params, &hdev->pend_le_reports, action) {
429 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
430 &params->addr, params->addr_type))
431 continue;
432
433 if (white_list_entries >= hdev->le_white_list_size) {
434 /* Select filter policy to accept all advertising */
435 return 0x00;
436 }
437
438 if (hci_find_irk_by_addr(hdev, &params->addr,
439 params->addr_type)) {
440 /* White list can not be used with RPAs */
441 return 0x00;
442 }
443
444 white_list_entries++;
445 add_to_white_list(req, params);
446 }
447
448 /* Select filter policy to use white list */
449 return 0x01;
450}
451
452void hci_req_add_le_passive_scan(struct hci_request *req)
453{
454 struct hci_cp_le_set_scan_param param_cp;
455 struct hci_cp_le_set_scan_enable enable_cp;
456 struct hci_dev *hdev = req->hdev;
457 u8 own_addr_type;
458 u8 filter_policy;
459
460 /* Set require_privacy to false since no SCAN_REQ are send
461 * during passive scanning. Not using an non-resolvable address
462 * here is important so that peer devices using direct
463 * advertising with our address will be correctly reported
464 * by the controller.
465 */
466 if (hci_update_random_address(req, false, &own_addr_type))
467 return;
468
469 /* Adding or removing entries from the white list must
470 * happen before enabling scanning. The controller does
471 * not allow white list modification while scanning.
472 */
473 filter_policy = update_white_list(req);
474
475 /* When the controller is using random resolvable addresses and
476 * with that having LE privacy enabled, then controllers with
477 * Extended Scanner Filter Policies support can now enable support
478 * for handling directed advertising.
479 *
480 * So instead of using filter polices 0x00 (no whitelist)
481 * and 0x01 (whitelist enabled) use the new filter policies
482 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
483 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700484 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200485 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
486 filter_policy |= 0x02;
487
488 memset(&param_cp, 0, sizeof(param_cp));
489 param_cp.type = LE_SCAN_PASSIVE;
490 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
491 param_cp.window = cpu_to_le16(hdev->le_scan_window);
492 param_cp.own_address_type = own_addr_type;
493 param_cp.filter_policy = filter_policy;
494 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
495 &param_cp);
496
497 memset(&enable_cp, 0, sizeof(enable_cp));
498 enable_cp.enable = LE_SCAN_ENABLE;
499 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
500 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
501 &enable_cp);
502}
503
504static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
505{
506 struct hci_dev *hdev = req->hdev;
507
508 /* If we're advertising or initiating an LE connection we can't
509 * go ahead and change the random address at this time. This is
510 * because the eventual initiator address used for the
511 * subsequently created connection will be undefined (some
512 * controllers use the new address and others the one we had
513 * when the operation started).
514 *
515 * In this kind of scenario skip the update and let the random
516 * address be updated at the next cycle.
517 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700518 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200519 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200520 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -0700521 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200522 return;
523 }
524
525 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
526}
527
528int hci_update_random_address(struct hci_request *req, bool require_privacy,
529 u8 *own_addr_type)
530{
531 struct hci_dev *hdev = req->hdev;
532 int err;
533
534 /* If privacy is enabled use a resolvable private address. If
535 * current RPA has expired or there is something else than
536 * the current RPA in use, then generate a new one.
537 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700538 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200539 int to;
540
541 *own_addr_type = ADDR_LE_DEV_RANDOM;
542
Marcel Holtmanna69d8922015-03-13 02:11:05 -0700543 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200544 !bacmp(&hdev->random_addr, &hdev->rpa))
545 return 0;
546
547 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
548 if (err < 0) {
549 BT_ERR("%s failed to generate new RPA", hdev->name);
550 return err;
551 }
552
553 set_random_addr(req, &hdev->rpa);
554
555 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
556 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
557
558 return 0;
559 }
560
561 /* In case of required privacy without resolvable private address,
562 * use an non-resolvable private address. This is useful for active
563 * scanning and non-connectable advertising.
564 */
565 if (require_privacy) {
566 bdaddr_t nrpa;
567
568 while (true) {
569 /* The non-resolvable private address is generated
570 * from random six bytes with the two most significant
571 * bits cleared.
572 */
573 get_random_bytes(&nrpa, 6);
574 nrpa.b[5] &= 0x3f;
575
576 /* The non-resolvable private address shall not be
577 * equal to the public address.
578 */
579 if (bacmp(&hdev->bdaddr, &nrpa))
580 break;
581 }
582
583 *own_addr_type = ADDR_LE_DEV_RANDOM;
584 set_random_addr(req, &nrpa);
585 return 0;
586 }
587
588 /* If forcing static address is in use or there is no public
589 * address use the static address as random address (but skip
590 * the HCI command if the current random address is already the
591 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100592 *
593 * In case BR/EDR has been disabled on a dual-mode controller
594 * and a static address has been configured, then use that
595 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +0200596 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -0700597 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100598 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700599 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +0100600 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +0200601 *own_addr_type = ADDR_LE_DEV_RANDOM;
602 if (bacmp(&hdev->static_addr, &hdev->random_addr))
603 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
604 &hdev->static_addr);
605 return 0;
606 }
607
608 /* Neither privacy nor static address is being used so use a
609 * public address.
610 */
611 *own_addr_type = ADDR_LE_DEV_PUBLIC;
612
613 return 0;
614}
Johan Hedberg2cf22212014-12-19 22:26:00 +0200615
Johan Hedberg405a2612014-12-19 23:18:22 +0200616static bool disconnected_whitelist_entries(struct hci_dev *hdev)
617{
618 struct bdaddr_list *b;
619
620 list_for_each_entry(b, &hdev->whitelist, list) {
621 struct hci_conn *conn;
622
623 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
624 if (!conn)
625 return true;
626
627 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
628 return true;
629 }
630
631 return false;
632}
633
634void __hci_update_page_scan(struct hci_request *req)
635{
636 struct hci_dev *hdev = req->hdev;
637 u8 scan;
638
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700639 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +0200640 return;
641
642 if (!hdev_is_powered(hdev))
643 return;
644
645 if (mgmt_powering_down(hdev))
646 return;
647
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700648 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +0200649 disconnected_whitelist_entries(hdev))
650 scan = SCAN_PAGE;
651 else
652 scan = SCAN_DISABLED;
653
654 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE))
655 return;
656
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700657 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +0200658 scan |= SCAN_INQUIRY;
659
660 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
661}
662
663void hci_update_page_scan(struct hci_dev *hdev)
664{
665 struct hci_request req;
666
667 hci_req_init(&req, hdev);
668 __hci_update_page_scan(&req);
669 hci_req_run(&req, NULL);
670}
671
Johan Hedberg2cf22212014-12-19 22:26:00 +0200672/* This function controls the background scanning based on hdev->pend_le_conns
673 * list. If there are pending LE connection we start the background scanning,
674 * otherwise we stop it.
675 *
676 * This function requires the caller holds hdev->lock.
677 */
Johan Hedberg145a0912015-11-11 08:11:22 +0200678static void __hci_update_background_scan(struct hci_request *req)
Johan Hedberg2cf22212014-12-19 22:26:00 +0200679{
680 struct hci_dev *hdev = req->hdev;
Johan Hedberg2cf22212014-12-19 22:26:00 +0200681
682 if (!test_bit(HCI_UP, &hdev->flags) ||
683 test_bit(HCI_INIT, &hdev->flags) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700684 hci_dev_test_flag(hdev, HCI_SETUP) ||
685 hci_dev_test_flag(hdev, HCI_CONFIG) ||
686 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
687 hci_dev_test_flag(hdev, HCI_UNREGISTER))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200688 return;
689
690 /* No point in doing scanning if LE support hasn't been enabled */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700691 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200692 return;
693
694 /* If discovery is active don't interfere with it */
695 if (hdev->discovery.state != DISCOVERY_STOPPED)
696 return;
697
698 /* Reset RSSI and UUID filters when starting background scanning
699 * since these filters are meant for service discovery only.
700 *
701 * The Start Discovery and Start Service Discovery operations
702 * ensure to set proper values for RSSI threshold and UUID
703 * filter list. So it is safe to just reset them here.
704 */
705 hci_discovery_filter_clear(hdev);
706
707 if (list_empty(&hdev->pend_le_conns) &&
708 list_empty(&hdev->pend_le_reports)) {
709 /* If there is no pending LE connections or devices
710 * to be scanned for, we should stop the background
711 * scanning.
712 */
713
714 /* If controller is not scanning we are done. */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700715 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200716 return;
717
718 hci_req_add_le_scan_disable(req);
719
720 BT_DBG("%s stopping background scanning", hdev->name);
721 } else {
722 /* If there is at least one pending LE connection, we should
723 * keep the background scan running.
724 */
725
726 /* If controller is connecting, we should not start scanning
727 * since some controllers are not able to scan and connect at
728 * the same time.
729 */
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +0200730 if (hci_lookup_le_connect(hdev))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200731 return;
732
733 /* If controller is currently scanning, we stop it to ensure we
734 * don't miss any advertising (due to duplicates filter).
735 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700736 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg2cf22212014-12-19 22:26:00 +0200737 hci_req_add_le_scan_disable(req);
738
739 hci_req_add_le_passive_scan(req);
740
741 BT_DBG("%s starting background scanning", hdev->name);
742 }
743}
744
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +0300745void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
746 u8 reason)
747{
748 switch (conn->state) {
749 case BT_CONNECTED:
750 case BT_CONFIG:
751 if (conn->type == AMP_LINK) {
752 struct hci_cp_disconn_phy_link cp;
753
754 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
755 cp.reason = reason;
756 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
757 &cp);
758 } else {
759 struct hci_cp_disconnect dc;
760
761 dc.handle = cpu_to_le16(conn->handle);
762 dc.reason = reason;
763 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
764 }
765
766 conn->state = BT_DISCONN;
767
768 break;
769 case BT_CONNECT:
770 if (conn->type == LE_LINK) {
771 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
772 break;
773 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
774 0, NULL);
775 } else if (conn->type == ACL_LINK) {
776 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
777 break;
778 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
779 6, &conn->dst);
780 }
781 break;
782 case BT_CONNECT2:
783 if (conn->type == ACL_LINK) {
784 struct hci_cp_reject_conn_req rej;
785
786 bacpy(&rej.bdaddr, &conn->dst);
787 rej.reason = reason;
788
789 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
790 sizeof(rej), &rej);
791 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
792 struct hci_cp_reject_sync_conn_req rej;
793
794 bacpy(&rej.bdaddr, &conn->dst);
795
796 /* SCO rejection has its own limited set of
797 * allowed error values (0x0D-0x0F) which isn't
798 * compatible with most values passed to this
799 * function. To be safe hard-code one of the
800 * values that's suitable for SCO.
801 */
802 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
803
804 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
805 sizeof(rej), &rej);
806 }
807 break;
808 default:
809 conn->state = BT_CLOSED;
810 break;
811 }
812}
813
814static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
815{
816 if (status)
817 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
818}
819
820int hci_abort_conn(struct hci_conn *conn, u8 reason)
821{
822 struct hci_request req;
823 int err;
824
825 hci_req_init(&req, conn->hdev);
826
827 __hci_abort_conn(&req, conn, reason);
828
829 err = hci_req_run(&req, abort_conn_complete);
830 if (err && err != -ENODATA) {
831 BT_ERR("Failed to run HCI request: err %d", err);
832 return err;
833 }
834
835 return 0;
836}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +0200837
Johan Hedberga1d01db2015-11-11 08:11:25 +0200838static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +0200839{
840 hci_dev_lock(req->hdev);
841 __hci_update_background_scan(req);
842 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200843 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200844}
845
846static void bg_scan_update(struct work_struct *work)
847{
848 struct hci_dev *hdev = container_of(work, struct hci_dev,
849 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +0200850 struct hci_conn *conn;
851 u8 status;
852 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +0200853
Johan Hedberg84235d22015-11-11 08:11:20 +0200854 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
855 if (!err)
856 return;
857
858 hci_dev_lock(hdev);
859
860 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
861 if (conn)
862 hci_le_conn_failed(conn, status);
863
864 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +0200865}
866
Johan Hedberga1d01db2015-11-11 08:11:25 +0200867static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200868{
869 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +0200870 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200871}
872
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200873static int bredr_inquiry(struct hci_request *req, unsigned long opt)
874{
875 u8 length = opt;
876 /* General inquiry access code (GIAC) */
877 u8 lap[3] = { 0x33, 0x8b, 0x9e };
878 struct hci_cp_inquiry cp;
879
880 BT_DBG("%s", req->hdev->name);
881
882 hci_dev_lock(req->hdev);
883 hci_inquiry_cache_flush(req->hdev);
884 hci_dev_unlock(req->hdev);
885
886 memset(&cp, 0, sizeof(cp));
887 memcpy(&cp.lap, lap, sizeof(cp.lap));
888 cp.length = length;
889
890 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
891
892 return 0;
893}
894
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200895static void le_scan_disable_work(struct work_struct *work)
896{
897 struct hci_dev *hdev = container_of(work, struct hci_dev,
898 le_scan_disable.work);
899 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200900
901 BT_DBG("%s", hdev->name);
902
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200903 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200904 return;
905
Johan Hedbergf4a2cb42015-11-11 12:24:22 +0200906 cancel_delayed_work(&hdev->le_scan_restart);
907
908 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
909 if (status) {
910 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
911 return;
912 }
913
914 hdev->discovery.scan_start = 0;
915
916 /* If we were running LE only scan, change discovery state. If
917 * we were running both LE and BR/EDR inquiry simultaneously,
918 * and BR/EDR inquiry is already finished, stop discovery,
919 * otherwise BR/EDR inquiry will stop discovery when finished.
920 * If we will resolve remote device name, do not change
921 * discovery state.
922 */
923
924 if (hdev->discovery.type == DISCOV_TYPE_LE)
925 goto discov_stopped;
926
927 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
928 return;
929
930 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
931 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
932 hdev->discovery.state != DISCOVERY_RESOLVING)
933 goto discov_stopped;
934
935 return;
936 }
937
938 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
939 HCI_CMD_TIMEOUT, &status);
940 if (status) {
941 BT_ERR("Inquiry failed: status 0x%02x", status);
942 goto discov_stopped;
943 }
944
945 return;
946
947discov_stopped:
948 hci_dev_lock(hdev);
949 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
950 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200951}
952
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200953static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200954{
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200955 struct hci_dev *hdev = req->hdev;
956 struct hci_cp_le_set_scan_enable cp;
957
958 /* If controller is not scanning we are done. */
959 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
960 return 0;
961
962 hci_req_add_le_scan_disable(req);
963
964 memset(&cp, 0, sizeof(cp));
965 cp.enable = LE_SCAN_ENABLE;
966 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
967 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
968
969 return 0;
970}
971
972static void le_scan_restart_work(struct work_struct *work)
973{
974 struct hci_dev *hdev = container_of(work, struct hci_dev,
975 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200976 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200977 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200978
979 BT_DBG("%s", hdev->name);
980
Johan Hedberg3dfe5902015-11-11 12:24:23 +0200981 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +0200982 if (status) {
983 BT_ERR("Failed to restart LE scan: status %d", status);
984 return;
985 }
986
987 hci_dev_lock(hdev);
988
989 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
990 !hdev->discovery.scan_start)
991 goto unlock;
992
993 /* When the scan was started, hdev->le_scan_disable has been queued
994 * after duration from scan_start. During scan restart this job
995 * has been canceled, and we need to queue it again after proper
996 * timeout, to make sure that scan does not run indefinitely.
997 */
998 duration = hdev->discovery.scan_duration;
999 scan_start = hdev->discovery.scan_start;
1000 now = jiffies;
1001 if (now - scan_start <= duration) {
1002 int elapsed;
1003
1004 if (now >= scan_start)
1005 elapsed = now - scan_start;
1006 else
1007 elapsed = ULONG_MAX - scan_start + now;
1008
1009 timeout = duration - elapsed;
1010 } else {
1011 timeout = 0;
1012 }
1013
1014 queue_delayed_work(hdev->req_workqueue,
1015 &hdev->le_scan_disable, timeout);
1016
1017unlock:
1018 hci_dev_unlock(hdev);
1019}
1020
Johan Hedberge68f0722015-11-11 08:30:30 +02001021static void cancel_adv_timeout(struct hci_dev *hdev)
1022{
1023 if (hdev->adv_instance_timeout) {
1024 hdev->adv_instance_timeout = 0;
1025 cancel_delayed_work(&hdev->adv_instance_expire);
1026 }
1027}
1028
1029static void disable_advertising(struct hci_request *req)
1030{
1031 u8 enable = 0x00;
1032
1033 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1034}
1035
1036static int active_scan(struct hci_request *req, unsigned long opt)
1037{
1038 uint16_t interval = opt;
1039 struct hci_dev *hdev = req->hdev;
1040 struct hci_cp_le_set_scan_param param_cp;
1041 struct hci_cp_le_set_scan_enable enable_cp;
1042 u8 own_addr_type;
1043 int err;
1044
1045 BT_DBG("%s", hdev->name);
1046
1047 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1048 hci_dev_lock(hdev);
1049
1050 /* Don't let discovery abort an outgoing connection attempt
1051 * that's using directed advertising.
1052 */
1053 if (hci_lookup_le_connect(hdev)) {
1054 hci_dev_unlock(hdev);
1055 return -EBUSY;
1056 }
1057
1058 cancel_adv_timeout(hdev);
1059 hci_dev_unlock(hdev);
1060
1061 disable_advertising(req);
1062 }
1063
1064 /* If controller is scanning, it means the background scanning is
1065 * running. Thus, we should temporarily stop it in order to set the
1066 * discovery scanning parameters.
1067 */
1068 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1069 hci_req_add_le_scan_disable(req);
1070
1071 /* All active scans will be done with either a resolvable private
1072 * address (when privacy feature has been enabled) or non-resolvable
1073 * private address.
1074 */
1075 err = hci_update_random_address(req, true, &own_addr_type);
1076 if (err < 0)
1077 own_addr_type = ADDR_LE_DEV_PUBLIC;
1078
1079 memset(&param_cp, 0, sizeof(param_cp));
1080 param_cp.type = LE_SCAN_ACTIVE;
1081 param_cp.interval = cpu_to_le16(interval);
1082 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1083 param_cp.own_address_type = own_addr_type;
1084
1085 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1086 &param_cp);
1087
1088 memset(&enable_cp, 0, sizeof(enable_cp));
1089 enable_cp.enable = LE_SCAN_ENABLE;
1090 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1091
1092 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1093 &enable_cp);
1094
1095 return 0;
1096}
1097
1098static int interleaved_discov(struct hci_request *req, unsigned long opt)
1099{
1100 int err;
1101
1102 BT_DBG("%s", req->hdev->name);
1103
1104 err = active_scan(req, opt);
1105 if (err)
1106 return err;
1107
Johan Hedberg7df26b52015-11-11 12:24:21 +02001108 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02001109}
1110
1111static void start_discovery(struct hci_dev *hdev, u8 *status)
1112{
1113 unsigned long timeout;
1114
1115 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1116
1117 switch (hdev->discovery.type) {
1118 case DISCOV_TYPE_BREDR:
1119 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02001120 hci_req_sync(hdev, bredr_inquiry,
1121 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02001122 status);
1123 return;
1124 case DISCOV_TYPE_INTERLEAVED:
1125 /* When running simultaneous discovery, the LE scanning time
1126 * should occupy the whole discovery time sine BR/EDR inquiry
1127 * and LE scanning are scheduled by the controller.
1128 *
1129 * For interleaving discovery in comparison, BR/EDR inquiry
1130 * and LE scanning are done sequentially with separate
1131 * timeouts.
1132 */
1133 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1134 &hdev->quirks)) {
1135 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1136 /* During simultaneous discovery, we double LE scan
1137 * interval. We must leave some time for the controller
1138 * to do BR/EDR inquiry.
1139 */
1140 hci_req_sync(hdev, interleaved_discov,
1141 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1142 status);
1143 break;
1144 }
1145
1146 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
1147 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1148 HCI_CMD_TIMEOUT, status);
1149 break;
1150 case DISCOV_TYPE_LE:
1151 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1152 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
1153 HCI_CMD_TIMEOUT, status);
1154 break;
1155 default:
1156 *status = HCI_ERROR_UNSPECIFIED;
1157 return;
1158 }
1159
1160 if (*status)
1161 return;
1162
1163 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
1164
1165 /* When service discovery is used and the controller has a
1166 * strict duplicate filter, it is important to remember the
1167 * start and duration of the scan. This is required for
1168 * restarting scanning during the discovery phase.
1169 */
1170 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
1171 hdev->discovery.result_filtering) {
1172 hdev->discovery.scan_start = jiffies;
1173 hdev->discovery.scan_duration = timeout;
1174 }
1175
1176 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
1177 timeout);
1178}
1179
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001180bool hci_req_stop_discovery(struct hci_request *req)
1181{
1182 struct hci_dev *hdev = req->hdev;
1183 struct discovery_state *d = &hdev->discovery;
1184 struct hci_cp_remote_name_req_cancel cp;
1185 struct inquiry_entry *e;
1186 bool ret = false;
1187
1188 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
1189
1190 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
1191 if (test_bit(HCI_INQUIRY, &hdev->flags))
1192 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1193
1194 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1195 cancel_delayed_work(&hdev->le_scan_disable);
1196 hci_req_add_le_scan_disable(req);
1197 }
1198
1199 ret = true;
1200 } else {
1201 /* Passive scanning */
1202 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
1203 hci_req_add_le_scan_disable(req);
1204 ret = true;
1205 }
1206 }
1207
1208 /* No further actions needed for LE-only discovery */
1209 if (d->type == DISCOV_TYPE_LE)
1210 return ret;
1211
1212 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
1213 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1214 NAME_PENDING);
1215 if (!e)
1216 return ret;
1217
1218 bacpy(&cp.bdaddr, &e->data.bdaddr);
1219 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1220 &cp);
1221 ret = true;
1222 }
1223
1224 return ret;
1225}
1226
1227static int stop_discovery(struct hci_request *req, unsigned long opt)
1228{
1229 hci_dev_lock(req->hdev);
1230 hci_req_stop_discovery(req);
1231 hci_dev_unlock(req->hdev);
1232
1233 return 0;
1234}
1235
Johan Hedberge68f0722015-11-11 08:30:30 +02001236static void discov_update(struct work_struct *work)
1237{
1238 struct hci_dev *hdev = container_of(work, struct hci_dev,
1239 discov_update);
1240 u8 status = 0;
1241
1242 switch (hdev->discovery.state) {
1243 case DISCOVERY_STARTING:
1244 start_discovery(hdev, &status);
1245 mgmt_start_discovery_complete(hdev, status);
1246 if (status)
1247 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1248 else
1249 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
1250 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02001251 case DISCOVERY_STOPPING:
1252 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
1253 mgmt_stop_discovery_complete(hdev, status);
1254 if (!status)
1255 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1256 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02001257 case DISCOVERY_STOPPED:
1258 default:
1259 return;
1260 }
1261}
1262
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001263void hci_request_setup(struct hci_dev *hdev)
1264{
Johan Hedberge68f0722015-11-11 08:30:30 +02001265 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001266 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001267 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1268 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001269}
1270
1271void hci_request_cancel_all(struct hci_dev *hdev)
1272{
Johan Hedberg7df0f732015-11-12 15:15:00 +02001273 hci_req_sync_cancel(hdev, ENODEV);
1274
Johan Hedberge68f0722015-11-11 08:30:30 +02001275 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001276 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001277 cancel_delayed_work_sync(&hdev->le_scan_disable);
1278 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001279}