blob: 030a1bb66ef5cdb7abc42da26f0b278aa52e1cb6 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Johan Hedbergb1a89172015-11-25 16:15:42 +020024#include <asm/unaligned.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Johan Hedberge62144872015-04-02 13:41:08 +030044static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020046{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020066 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020072
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
Johan Hedberge62144872015-04-02 13:41:08 +030082int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
Johan Hedbergbe91cd02015-11-10 09:44:54 +020092static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
Johan Hedbergb5044302015-11-10 09:44:55 +0200106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200192 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
Johan Hedberga1d01db2015-11-11 08:11:25 +0200204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257
258 default:
259 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
Johan Hedberga1d01db2015-11-11 08:11:25 +0200272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200282 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200284 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200285
286 return ret;
287}
288
Johan Hedberg0857dd32014-12-19 13:40:20 +0200289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200340
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100341 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
Johan Hedberg196a5e92015-11-22 18:55:44 +0200352/* This function controls the background scanning based on hdev->pend_le_conns
353 * list. If there are pending LE connection we start the background scanning,
354 * otherwise we stop it.
355 *
356 * This function requires the caller holds hdev->lock.
357 */
358static void __hci_update_background_scan(struct hci_request *req)
359{
360 struct hci_dev *hdev = req->hdev;
361
362 if (!test_bit(HCI_UP, &hdev->flags) ||
363 test_bit(HCI_INIT, &hdev->flags) ||
364 hci_dev_test_flag(hdev, HCI_SETUP) ||
365 hci_dev_test_flag(hdev, HCI_CONFIG) ||
366 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
367 hci_dev_test_flag(hdev, HCI_UNREGISTER))
368 return;
369
370 /* No point in doing scanning if LE support hasn't been enabled */
371 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
372 return;
373
374 /* If discovery is active don't interfere with it */
375 if (hdev->discovery.state != DISCOVERY_STOPPED)
376 return;
377
378 /* Reset RSSI and UUID filters when starting background scanning
379 * since these filters are meant for service discovery only.
380 *
381 * The Start Discovery and Start Service Discovery operations
382 * ensure to set proper values for RSSI threshold and UUID
383 * filter list. So it is safe to just reset them here.
384 */
385 hci_discovery_filter_clear(hdev);
386
387 if (list_empty(&hdev->pend_le_conns) &&
388 list_empty(&hdev->pend_le_reports)) {
389 /* If there is no pending LE connections or devices
390 * to be scanned for, we should stop the background
391 * scanning.
392 */
393
394 /* If controller is not scanning we are done. */
395 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
396 return;
397
398 hci_req_add_le_scan_disable(req);
399
400 BT_DBG("%s stopping background scanning", hdev->name);
401 } else {
402 /* If there is at least one pending LE connection, we should
403 * keep the background scan running.
404 */
405
406 /* If controller is connecting, we should not start scanning
407 * since some controllers are not able to scan and connect at
408 * the same time.
409 */
410 if (hci_lookup_le_connect(hdev))
411 return;
412
413 /* If controller is currently scanning, we stop it to ensure we
414 * don't miss any advertising (due to duplicates filter).
415 */
416 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
417 hci_req_add_le_scan_disable(req);
418
419 hci_req_add_le_passive_scan(req);
420
421 BT_DBG("%s starting background scanning", hdev->name);
422 }
423}
424
Johan Hedberg00cf5042015-11-25 16:15:41 +0200425void __hci_req_update_name(struct hci_request *req)
426{
427 struct hci_dev *hdev = req->hdev;
428 struct hci_cp_write_local_name cp;
429
430 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
431
432 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
433}
434
Johan Hedbergb1a89172015-11-25 16:15:42 +0200435#define PNP_INFO_SVCLASS_ID 0x1200
436
437static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
438{
439 u8 *ptr = data, *uuids_start = NULL;
440 struct bt_uuid *uuid;
441
442 if (len < 4)
443 return ptr;
444
445 list_for_each_entry(uuid, &hdev->uuids, list) {
446 u16 uuid16;
447
448 if (uuid->size != 16)
449 continue;
450
451 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
452 if (uuid16 < 0x1100)
453 continue;
454
455 if (uuid16 == PNP_INFO_SVCLASS_ID)
456 continue;
457
458 if (!uuids_start) {
459 uuids_start = ptr;
460 uuids_start[0] = 1;
461 uuids_start[1] = EIR_UUID16_ALL;
462 ptr += 2;
463 }
464
465 /* Stop if not enough space to put next UUID */
466 if ((ptr - data) + sizeof(u16) > len) {
467 uuids_start[1] = EIR_UUID16_SOME;
468 break;
469 }
470
471 *ptr++ = (uuid16 & 0x00ff);
472 *ptr++ = (uuid16 & 0xff00) >> 8;
473 uuids_start[0] += sizeof(uuid16);
474 }
475
476 return ptr;
477}
478
479static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
480{
481 u8 *ptr = data, *uuids_start = NULL;
482 struct bt_uuid *uuid;
483
484 if (len < 6)
485 return ptr;
486
487 list_for_each_entry(uuid, &hdev->uuids, list) {
488 if (uuid->size != 32)
489 continue;
490
491 if (!uuids_start) {
492 uuids_start = ptr;
493 uuids_start[0] = 1;
494 uuids_start[1] = EIR_UUID32_ALL;
495 ptr += 2;
496 }
497
498 /* Stop if not enough space to put next UUID */
499 if ((ptr - data) + sizeof(u32) > len) {
500 uuids_start[1] = EIR_UUID32_SOME;
501 break;
502 }
503
504 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
505 ptr += sizeof(u32);
506 uuids_start[0] += sizeof(u32);
507 }
508
509 return ptr;
510}
511
512static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
513{
514 u8 *ptr = data, *uuids_start = NULL;
515 struct bt_uuid *uuid;
516
517 if (len < 18)
518 return ptr;
519
520 list_for_each_entry(uuid, &hdev->uuids, list) {
521 if (uuid->size != 128)
522 continue;
523
524 if (!uuids_start) {
525 uuids_start = ptr;
526 uuids_start[0] = 1;
527 uuids_start[1] = EIR_UUID128_ALL;
528 ptr += 2;
529 }
530
531 /* Stop if not enough space to put next UUID */
532 if ((ptr - data) + 16 > len) {
533 uuids_start[1] = EIR_UUID128_SOME;
534 break;
535 }
536
537 memcpy(ptr, uuid->uuid, 16);
538 ptr += 16;
539 uuids_start[0] += 16;
540 }
541
542 return ptr;
543}
544
545static void create_eir(struct hci_dev *hdev, u8 *data)
546{
547 u8 *ptr = data;
548 size_t name_len;
549
550 name_len = strlen(hdev->dev_name);
551
552 if (name_len > 0) {
553 /* EIR Data type */
554 if (name_len > 48) {
555 name_len = 48;
556 ptr[1] = EIR_NAME_SHORT;
557 } else
558 ptr[1] = EIR_NAME_COMPLETE;
559
560 /* EIR Data length */
561 ptr[0] = name_len + 1;
562
563 memcpy(ptr + 2, hdev->dev_name, name_len);
564
565 ptr += (name_len + 2);
566 }
567
568 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
569 ptr[0] = 2;
570 ptr[1] = EIR_TX_POWER;
571 ptr[2] = (u8) hdev->inq_tx_power;
572
573 ptr += 3;
574 }
575
576 if (hdev->devid_source > 0) {
577 ptr[0] = 9;
578 ptr[1] = EIR_DEVICE_ID;
579
580 put_unaligned_le16(hdev->devid_source, ptr + 2);
581 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
582 put_unaligned_le16(hdev->devid_product, ptr + 6);
583 put_unaligned_le16(hdev->devid_version, ptr + 8);
584
585 ptr += 10;
586 }
587
588 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
589 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
590 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
591}
592
593void __hci_req_update_eir(struct hci_request *req)
594{
595 struct hci_dev *hdev = req->hdev;
596 struct hci_cp_write_eir cp;
597
598 if (!hdev_is_powered(hdev))
599 return;
600
601 if (!lmp_ext_inq_capable(hdev))
602 return;
603
604 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
605 return;
606
607 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
608 return;
609
610 memset(&cp, 0, sizeof(cp));
611
612 create_eir(hdev, cp.data);
613
614 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
615 return;
616
617 memcpy(hdev->eir, cp.data, sizeof(cp.data));
618
619 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
620}
621
Johan Hedberg0857dd32014-12-19 13:40:20 +0200622void hci_req_add_le_scan_disable(struct hci_request *req)
623{
624 struct hci_cp_le_set_scan_enable cp;
625
626 memset(&cp, 0, sizeof(cp));
627 cp.enable = LE_SCAN_DISABLE;
628 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
629}
630
631static void add_to_white_list(struct hci_request *req,
632 struct hci_conn_params *params)
633{
634 struct hci_cp_le_add_to_white_list cp;
635
636 cp.bdaddr_type = params->addr_type;
637 bacpy(&cp.bdaddr, &params->addr);
638
639 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
640}
641
642static u8 update_white_list(struct hci_request *req)
643{
644 struct hci_dev *hdev = req->hdev;
645 struct hci_conn_params *params;
646 struct bdaddr_list *b;
647 uint8_t white_list_entries = 0;
648
649 /* Go through the current white list programmed into the
650 * controller one by one and check if that address is still
651 * in the list of pending connections or list of devices to
652 * report. If not present in either list, then queue the
653 * command to remove it from the controller.
654 */
655 list_for_each_entry(b, &hdev->le_white_list, list) {
656 struct hci_cp_le_del_from_white_list cp;
657
658 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
659 &b->bdaddr, b->bdaddr_type) ||
660 hci_pend_le_action_lookup(&hdev->pend_le_reports,
661 &b->bdaddr, b->bdaddr_type)) {
662 white_list_entries++;
663 continue;
664 }
665
666 cp.bdaddr_type = b->bdaddr_type;
667 bacpy(&cp.bdaddr, &b->bdaddr);
668
669 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
670 sizeof(cp), &cp);
671 }
672
673 /* Since all no longer valid white list entries have been
674 * removed, walk through the list of pending connections
675 * and ensure that any new device gets programmed into
676 * the controller.
677 *
678 * If the list of the devices is larger than the list of
679 * available white list entries in the controller, then
680 * just abort and return filer policy value to not use the
681 * white list.
682 */
683 list_for_each_entry(params, &hdev->pend_le_conns, action) {
684 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
685 &params->addr, params->addr_type))
686 continue;
687
688 if (white_list_entries >= hdev->le_white_list_size) {
689 /* Select filter policy to accept all advertising */
690 return 0x00;
691 }
692
693 if (hci_find_irk_by_addr(hdev, &params->addr,
694 params->addr_type)) {
695 /* White list can not be used with RPAs */
696 return 0x00;
697 }
698
699 white_list_entries++;
700 add_to_white_list(req, params);
701 }
702
703 /* After adding all new pending connections, walk through
704 * the list of pending reports and also add these to the
705 * white list if there is still space.
706 */
707 list_for_each_entry(params, &hdev->pend_le_reports, action) {
708 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
709 &params->addr, params->addr_type))
710 continue;
711
712 if (white_list_entries >= hdev->le_white_list_size) {
713 /* Select filter policy to accept all advertising */
714 return 0x00;
715 }
716
717 if (hci_find_irk_by_addr(hdev, &params->addr,
718 params->addr_type)) {
719 /* White list can not be used with RPAs */
720 return 0x00;
721 }
722
723 white_list_entries++;
724 add_to_white_list(req, params);
725 }
726
727 /* Select filter policy to use white list */
728 return 0x01;
729}
730
731void hci_req_add_le_passive_scan(struct hci_request *req)
732{
733 struct hci_cp_le_set_scan_param param_cp;
734 struct hci_cp_le_set_scan_enable enable_cp;
735 struct hci_dev *hdev = req->hdev;
736 u8 own_addr_type;
737 u8 filter_policy;
738
739 /* Set require_privacy to false since no SCAN_REQ are send
740 * during passive scanning. Not using an non-resolvable address
741 * here is important so that peer devices using direct
742 * advertising with our address will be correctly reported
743 * by the controller.
744 */
745 if (hci_update_random_address(req, false, &own_addr_type))
746 return;
747
748 /* Adding or removing entries from the white list must
749 * happen before enabling scanning. The controller does
750 * not allow white list modification while scanning.
751 */
752 filter_policy = update_white_list(req);
753
754 /* When the controller is using random resolvable addresses and
755 * with that having LE privacy enabled, then controllers with
756 * Extended Scanner Filter Policies support can now enable support
757 * for handling directed advertising.
758 *
759 * So instead of using filter polices 0x00 (no whitelist)
760 * and 0x01 (whitelist enabled) use the new filter policies
761 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
762 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700763 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200764 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
765 filter_policy |= 0x02;
766
767 memset(&param_cp, 0, sizeof(param_cp));
768 param_cp.type = LE_SCAN_PASSIVE;
769 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
770 param_cp.window = cpu_to_le16(hdev->le_scan_window);
771 param_cp.own_address_type = own_addr_type;
772 param_cp.filter_policy = filter_policy;
773 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
774 &param_cp);
775
776 memset(&enable_cp, 0, sizeof(enable_cp));
777 enable_cp.enable = LE_SCAN_ENABLE;
778 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
779 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
780 &enable_cp);
781}
782
Johan Hedbergf2252572015-11-18 12:49:20 +0200783static u8 get_current_adv_instance(struct hci_dev *hdev)
784{
785 /* The "Set Advertising" setting supersedes the "Add Advertising"
786 * setting. Here we set the advertising data based on which
787 * setting was set. When neither apply, default to the global settings,
788 * represented by instance "0".
789 */
790 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE) &&
791 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
792 return hdev->cur_adv_instance;
793
794 return 0x00;
795}
796
797static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
798{
799 u8 instance = get_current_adv_instance(hdev);
800 struct adv_info *adv_instance;
801
802 /* Ignore instance 0 */
803 if (instance == 0x00)
804 return 0;
805
806 adv_instance = hci_find_adv_instance(hdev, instance);
807 if (!adv_instance)
808 return 0;
809
810 /* TODO: Take into account the "appearance" and "local-name" flags here.
811 * These are currently being ignored as they are not supported.
812 */
813 return adv_instance->scan_rsp_len;
814}
815
816void __hci_req_disable_advertising(struct hci_request *req)
817{
818 u8 enable = 0x00;
819
820 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
821}
822
823static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
824{
825 u32 flags;
826 struct adv_info *adv_instance;
827
828 if (instance == 0x00) {
829 /* Instance 0 always manages the "Tx Power" and "Flags"
830 * fields
831 */
832 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
833
834 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
835 * corresponds to the "connectable" instance flag.
836 */
837 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
838 flags |= MGMT_ADV_FLAG_CONNECTABLE;
839
840 return flags;
841 }
842
843 adv_instance = hci_find_adv_instance(hdev, instance);
844
845 /* Return 0 when we got an invalid instance identifier. */
846 if (!adv_instance)
847 return 0;
848
849 return adv_instance->flags;
850}
851
852void __hci_req_enable_advertising(struct hci_request *req)
853{
854 struct hci_dev *hdev = req->hdev;
855 struct hci_cp_le_set_adv_param cp;
856 u8 own_addr_type, enable = 0x01;
857 bool connectable;
858 u8 instance;
859 u32 flags;
860
861 if (hci_conn_num(hdev, LE_LINK) > 0)
862 return;
863
864 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
865 __hci_req_disable_advertising(req);
866
867 /* Clear the HCI_LE_ADV bit temporarily so that the
868 * hci_update_random_address knows that it's safe to go ahead
869 * and write a new random address. The flag will be set back on
870 * as soon as the SET_ADV_ENABLE HCI command completes.
871 */
872 hci_dev_clear_flag(hdev, HCI_LE_ADV);
873
874 instance = get_current_adv_instance(hdev);
875 flags = get_adv_instance_flags(hdev, instance);
876
877 /* If the "connectable" instance flag was not set, then choose between
878 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
879 */
880 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
881 mgmt_get_connectable(hdev);
882
883 /* Set require_privacy to true only when non-connectable
884 * advertising is used. In that case it is fine to use a
885 * non-resolvable private address.
886 */
887 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
888 return;
889
890 memset(&cp, 0, sizeof(cp));
891 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
892 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
893
894 if (connectable)
895 cp.type = LE_ADV_IND;
896 else if (get_cur_adv_instance_scan_rsp_len(hdev))
897 cp.type = LE_ADV_SCAN_IND;
898 else
899 cp.type = LE_ADV_NONCONN_IND;
900
901 cp.own_address_type = own_addr_type;
902 cp.channel_map = hdev->le_adv_channel_map;
903
904 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
905
906 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
907}
908
909static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
910{
911 u8 ad_len = 0;
912 size_t name_len;
913
914 name_len = strlen(hdev->dev_name);
915 if (name_len > 0) {
916 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
917
918 if (name_len > max_len) {
919 name_len = max_len;
920 ptr[1] = EIR_NAME_SHORT;
921 } else
922 ptr[1] = EIR_NAME_COMPLETE;
923
924 ptr[0] = name_len + 1;
925
926 memcpy(ptr + 2, hdev->dev_name, name_len);
927
928 ad_len += (name_len + 2);
929 ptr += (name_len + 2);
930 }
931
932 return ad_len;
933}
934
935static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
936 u8 *ptr)
937{
938 struct adv_info *adv_instance;
939
940 adv_instance = hci_find_adv_instance(hdev, instance);
941 if (!adv_instance)
942 return 0;
943
944 /* TODO: Set the appropriate entries based on advertising instance flags
945 * here once flags other than 0 are supported.
946 */
947 memcpy(ptr, adv_instance->scan_rsp_data,
948 adv_instance->scan_rsp_len);
949
950 return adv_instance->scan_rsp_len;
951}
952
953static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
954{
955 struct hci_dev *hdev = req->hdev;
956 struct hci_cp_le_set_scan_rsp_data cp;
957 u8 len;
958
959 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
960 return;
961
962 memset(&cp, 0, sizeof(cp));
963
964 if (instance)
965 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
966 else
967 len = create_default_scan_rsp_data(hdev, cp.data);
968
969 if (hdev->scan_rsp_data_len == len &&
970 !memcmp(cp.data, hdev->scan_rsp_data, len))
971 return;
972
973 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
974 hdev->scan_rsp_data_len = len;
975
976 cp.length = len;
977
978 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
979}
980
981void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
982{
983 if (instance == HCI_ADV_CURRENT)
984 instance = get_current_adv_instance(req->hdev);
985
986 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
987}
988
989static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
990{
991 struct adv_info *adv_instance = NULL;
992 u8 ad_len = 0, flags = 0;
993 u32 instance_flags;
994
995 /* Return 0 when the current instance identifier is invalid. */
996 if (instance) {
997 adv_instance = hci_find_adv_instance(hdev, instance);
998 if (!adv_instance)
999 return 0;
1000 }
1001
1002 instance_flags = get_adv_instance_flags(hdev, instance);
1003
1004 /* The Add Advertising command allows userspace to set both the general
1005 * and limited discoverable flags.
1006 */
1007 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1008 flags |= LE_AD_GENERAL;
1009
1010 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1011 flags |= LE_AD_LIMITED;
1012
1013 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1014 /* If a discovery flag wasn't provided, simply use the global
1015 * settings.
1016 */
1017 if (!flags)
1018 flags |= mgmt_get_adv_discov_flags(hdev);
1019
1020 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1021 flags |= LE_AD_NO_BREDR;
1022
1023 /* If flags would still be empty, then there is no need to
1024 * include the "Flags" AD field".
1025 */
1026 if (flags) {
1027 ptr[0] = 0x02;
1028 ptr[1] = EIR_FLAGS;
1029 ptr[2] = flags;
1030
1031 ad_len += 3;
1032 ptr += 3;
1033 }
1034 }
1035
1036 if (adv_instance) {
1037 memcpy(ptr, adv_instance->adv_data,
1038 adv_instance->adv_data_len);
1039 ad_len += adv_instance->adv_data_len;
1040 ptr += adv_instance->adv_data_len;
1041 }
1042
1043 /* Provide Tx Power only if we can provide a valid value for it */
1044 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1045 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1046 ptr[0] = 0x02;
1047 ptr[1] = EIR_TX_POWER;
1048 ptr[2] = (u8)hdev->adv_tx_power;
1049
1050 ad_len += 3;
1051 ptr += 3;
1052 }
1053
1054 return ad_len;
1055}
1056
1057static void update_inst_adv_data(struct hci_request *req, u8 instance)
1058{
1059 struct hci_dev *hdev = req->hdev;
1060 struct hci_cp_le_set_adv_data cp;
1061 u8 len;
1062
1063 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1064 return;
1065
1066 memset(&cp, 0, sizeof(cp));
1067
1068 len = create_instance_adv_data(hdev, instance, cp.data);
1069
1070 /* There's nothing to do if the data hasn't changed */
1071 if (hdev->adv_data_len == len &&
1072 memcmp(cp.data, hdev->adv_data, len) == 0)
1073 return;
1074
1075 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1076 hdev->adv_data_len = len;
1077
1078 cp.length = len;
1079
1080 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1081}
1082
1083void __hci_req_update_adv_data(struct hci_request *req, int instance)
1084{
1085 if (instance == HCI_ADV_CURRENT)
1086 instance = get_current_adv_instance(req->hdev);
1087
1088 update_inst_adv_data(req, instance);
1089}
1090
1091int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
1092{
1093 struct hci_request req;
1094
1095 hci_req_init(&req, hdev);
1096 __hci_req_update_adv_data(&req, instance);
1097
1098 return hci_req_run(&req, NULL);
1099}
1100
1101static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1102{
1103 BT_DBG("%s status %u", hdev->name, status);
1104}
1105
1106void hci_req_reenable_advertising(struct hci_dev *hdev)
1107{
1108 struct hci_request req;
1109 u8 instance;
1110
1111 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
1112 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1113 return;
1114
1115 instance = get_current_adv_instance(hdev);
1116
1117 hci_req_init(&req, hdev);
1118
1119 if (instance) {
1120 __hci_req_schedule_adv_instance(&req, instance, true);
1121 } else {
1122 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
1123 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
1124 __hci_req_enable_advertising(&req);
1125 }
1126
1127 hci_req_run(&req, adv_enable_complete);
1128}
1129
1130static void adv_timeout_expire(struct work_struct *work)
1131{
1132 struct hci_dev *hdev = container_of(work, struct hci_dev,
1133 adv_instance_expire.work);
1134
1135 struct hci_request req;
1136 u8 instance;
1137
1138 BT_DBG("%s", hdev->name);
1139
1140 hci_dev_lock(hdev);
1141
1142 hdev->adv_instance_timeout = 0;
1143
1144 instance = get_current_adv_instance(hdev);
1145 if (instance == 0x00)
1146 goto unlock;
1147
1148 hci_req_init(&req, hdev);
1149
1150 hci_req_clear_adv_instance(hdev, &req, instance, false);
1151
1152 if (list_empty(&hdev->adv_instances))
1153 __hci_req_disable_advertising(&req);
1154
1155 if (!skb_queue_empty(&req.cmd_q))
1156 hci_req_run(&req, NULL);
1157
1158unlock:
1159 hci_dev_unlock(hdev);
1160}
1161
1162int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1163 bool force)
1164{
1165 struct hci_dev *hdev = req->hdev;
1166 struct adv_info *adv_instance = NULL;
1167 u16 timeout;
1168
1169 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1170 !hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1171 return -EPERM;
1172
1173 if (hdev->adv_instance_timeout)
1174 return -EBUSY;
1175
1176 adv_instance = hci_find_adv_instance(hdev, instance);
1177 if (!adv_instance)
1178 return -ENOENT;
1179
1180 /* A zero timeout means unlimited advertising. As long as there is
1181 * only one instance, duration should be ignored. We still set a timeout
1182 * in case further instances are being added later on.
1183 *
1184 * If the remaining lifetime of the instance is more than the duration
1185 * then the timeout corresponds to the duration, otherwise it will be
1186 * reduced to the remaining instance lifetime.
1187 */
1188 if (adv_instance->timeout == 0 ||
1189 adv_instance->duration <= adv_instance->remaining_time)
1190 timeout = adv_instance->duration;
1191 else
1192 timeout = adv_instance->remaining_time;
1193
1194 /* The remaining time is being reduced unless the instance is being
1195 * advertised without time limit.
1196 */
1197 if (adv_instance->timeout)
1198 adv_instance->remaining_time =
1199 adv_instance->remaining_time - timeout;
1200
1201 hdev->adv_instance_timeout = timeout;
1202 queue_delayed_work(hdev->req_workqueue,
1203 &hdev->adv_instance_expire,
1204 msecs_to_jiffies(timeout * 1000));
1205
1206 /* If we're just re-scheduling the same instance again then do not
1207 * execute any HCI commands. This happens when a single instance is
1208 * being advertised.
1209 */
1210 if (!force && hdev->cur_adv_instance == instance &&
1211 hci_dev_test_flag(hdev, HCI_LE_ADV))
1212 return 0;
1213
1214 hdev->cur_adv_instance = instance;
1215 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1216 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1217 __hci_req_enable_advertising(req);
1218
1219 return 0;
1220}
1221
1222static void cancel_adv_timeout(struct hci_dev *hdev)
1223{
1224 if (hdev->adv_instance_timeout) {
1225 hdev->adv_instance_timeout = 0;
1226 cancel_delayed_work(&hdev->adv_instance_expire);
1227 }
1228}
1229
1230/* For a single instance:
1231 * - force == true: The instance will be removed even when its remaining
1232 * lifetime is not zero.
1233 * - force == false: the instance will be deactivated but kept stored unless
1234 * the remaining lifetime is zero.
1235 *
1236 * For instance == 0x00:
1237 * - force == true: All instances will be removed regardless of their timeout
1238 * setting.
1239 * - force == false: Only instances that have a timeout will be removed.
1240 */
1241void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1242 u8 instance, bool force)
1243{
1244 struct adv_info *adv_instance, *n, *next_instance = NULL;
1245 int err;
1246 u8 rem_inst;
1247
1248 /* Cancel any timeout concerning the removed instance(s). */
1249 if (!instance || hdev->cur_adv_instance == instance)
1250 cancel_adv_timeout(hdev);
1251
1252 /* Get the next instance to advertise BEFORE we remove
1253 * the current one. This can be the same instance again
1254 * if there is only one instance.
1255 */
1256 if (instance && hdev->cur_adv_instance == instance)
1257 next_instance = hci_get_next_instance(hdev, instance);
1258
1259 if (instance == 0x00) {
1260 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1261 list) {
1262 if (!(force || adv_instance->timeout))
1263 continue;
1264
1265 rem_inst = adv_instance->instance;
1266 err = hci_remove_adv_instance(hdev, rem_inst);
1267 if (!err)
1268 mgmt_advertising_removed(NULL, hdev, rem_inst);
1269 }
1270 hdev->cur_adv_instance = 0x00;
1271 } else {
1272 adv_instance = hci_find_adv_instance(hdev, instance);
1273
1274 if (force || (adv_instance && adv_instance->timeout &&
1275 !adv_instance->remaining_time)) {
1276 /* Don't advertise a removed instance. */
1277 if (next_instance &&
1278 next_instance->instance == instance)
1279 next_instance = NULL;
1280
1281 err = hci_remove_adv_instance(hdev, instance);
1282 if (!err)
1283 mgmt_advertising_removed(NULL, hdev, instance);
1284 }
1285 }
1286
1287 if (list_empty(&hdev->adv_instances)) {
1288 hdev->cur_adv_instance = 0x00;
1289 hci_dev_clear_flag(hdev, HCI_ADVERTISING_INSTANCE);
1290 }
1291
1292 if (!req || !hdev_is_powered(hdev) ||
1293 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1294 return;
1295
1296 if (next_instance)
1297 __hci_req_schedule_adv_instance(req, next_instance->instance,
1298 false);
1299}
1300
Johan Hedberg0857dd32014-12-19 13:40:20 +02001301static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1302{
1303 struct hci_dev *hdev = req->hdev;
1304
1305 /* If we're advertising or initiating an LE connection we can't
1306 * go ahead and change the random address at this time. This is
1307 * because the eventual initiator address used for the
1308 * subsequently created connection will be undefined (some
1309 * controllers use the new address and others the one we had
1310 * when the operation started).
1311 *
1312 * In this kind of scenario skip the update and let the random
1313 * address be updated at the next cycle.
1314 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001315 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001316 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001317 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001318 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001319 return;
1320 }
1321
1322 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1323}
1324
1325int hci_update_random_address(struct hci_request *req, bool require_privacy,
1326 u8 *own_addr_type)
1327{
1328 struct hci_dev *hdev = req->hdev;
1329 int err;
1330
1331 /* If privacy is enabled use a resolvable private address. If
1332 * current RPA has expired or there is something else than
1333 * the current RPA in use, then generate a new one.
1334 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001335 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001336 int to;
1337
1338 *own_addr_type = ADDR_LE_DEV_RANDOM;
1339
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001340 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001341 !bacmp(&hdev->random_addr, &hdev->rpa))
1342 return 0;
1343
1344 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1345 if (err < 0) {
1346 BT_ERR("%s failed to generate new RPA", hdev->name);
1347 return err;
1348 }
1349
1350 set_random_addr(req, &hdev->rpa);
1351
1352 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1353 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1354
1355 return 0;
1356 }
1357
1358 /* In case of required privacy without resolvable private address,
1359 * use an non-resolvable private address. This is useful for active
1360 * scanning and non-connectable advertising.
1361 */
1362 if (require_privacy) {
1363 bdaddr_t nrpa;
1364
1365 while (true) {
1366 /* The non-resolvable private address is generated
1367 * from random six bytes with the two most significant
1368 * bits cleared.
1369 */
1370 get_random_bytes(&nrpa, 6);
1371 nrpa.b[5] &= 0x3f;
1372
1373 /* The non-resolvable private address shall not be
1374 * equal to the public address.
1375 */
1376 if (bacmp(&hdev->bdaddr, &nrpa))
1377 break;
1378 }
1379
1380 *own_addr_type = ADDR_LE_DEV_RANDOM;
1381 set_random_addr(req, &nrpa);
1382 return 0;
1383 }
1384
1385 /* If forcing static address is in use or there is no public
1386 * address use the static address as random address (but skip
1387 * the HCI command if the current random address is already the
1388 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001389 *
1390 * In case BR/EDR has been disabled on a dual-mode controller
1391 * and a static address has been configured, then use that
1392 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001393 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001394 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001395 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001396 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001397 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001398 *own_addr_type = ADDR_LE_DEV_RANDOM;
1399 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1400 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1401 &hdev->static_addr);
1402 return 0;
1403 }
1404
1405 /* Neither privacy nor static address is being used so use a
1406 * public address.
1407 */
1408 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1409
1410 return 0;
1411}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001412
Johan Hedberg405a2612014-12-19 23:18:22 +02001413static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1414{
1415 struct bdaddr_list *b;
1416
1417 list_for_each_entry(b, &hdev->whitelist, list) {
1418 struct hci_conn *conn;
1419
1420 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1421 if (!conn)
1422 return true;
1423
1424 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1425 return true;
1426 }
1427
1428 return false;
1429}
1430
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001431void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001432{
1433 struct hci_dev *hdev = req->hdev;
1434 u8 scan;
1435
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001436 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001437 return;
1438
1439 if (!hdev_is_powered(hdev))
1440 return;
1441
1442 if (mgmt_powering_down(hdev))
1443 return;
1444
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001445 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001446 disconnected_whitelist_entries(hdev))
1447 scan = SCAN_PAGE;
1448 else
1449 scan = SCAN_DISABLED;
1450
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001451 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001452 scan |= SCAN_INQUIRY;
1453
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001454 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1455 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1456 return;
1457
Johan Hedberg405a2612014-12-19 23:18:22 +02001458 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1459}
1460
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001461static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001462{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001463 hci_dev_lock(req->hdev);
1464 __hci_req_update_scan(req);
1465 hci_dev_unlock(req->hdev);
1466 return 0;
1467}
Johan Hedberg405a2612014-12-19 23:18:22 +02001468
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001469static void scan_update_work(struct work_struct *work)
1470{
1471 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1472
1473 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001474}
1475
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001476static int connectable_update(struct hci_request *req, unsigned long opt)
1477{
1478 struct hci_dev *hdev = req->hdev;
1479
1480 hci_dev_lock(hdev);
1481
1482 __hci_req_update_scan(req);
1483
1484 /* If BR/EDR is not enabled and we disable advertising as a
1485 * by-product of disabling connectable, we need to update the
1486 * advertising flags.
1487 */
1488 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1489 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1490
1491 /* Update the advertising parameters if necessary */
1492 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
1493 hci_dev_test_flag(hdev, HCI_ADVERTISING_INSTANCE))
1494 __hci_req_enable_advertising(req);
1495
1496 __hci_update_background_scan(req);
1497
1498 hci_dev_unlock(hdev);
1499
1500 return 0;
1501}
1502
1503static void connectable_update_work(struct work_struct *work)
1504{
1505 struct hci_dev *hdev = container_of(work, struct hci_dev,
1506 connectable_update);
1507 u8 status;
1508
1509 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1510 mgmt_set_connectable_complete(hdev, status);
1511}
1512
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001513static u8 get_service_classes(struct hci_dev *hdev)
1514{
1515 struct bt_uuid *uuid;
1516 u8 val = 0;
1517
1518 list_for_each_entry(uuid, &hdev->uuids, list)
1519 val |= uuid->svc_hint;
1520
1521 return val;
1522}
1523
1524void __hci_req_update_class(struct hci_request *req)
1525{
1526 struct hci_dev *hdev = req->hdev;
1527 u8 cod[3];
1528
1529 BT_DBG("%s", hdev->name);
1530
1531 if (!hdev_is_powered(hdev))
1532 return;
1533
1534 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1535 return;
1536
1537 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1538 return;
1539
1540 cod[0] = hdev->minor_class;
1541 cod[1] = hdev->major_class;
1542 cod[2] = get_service_classes(hdev);
1543
1544 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1545 cod[1] |= 0x20;
1546
1547 if (memcmp(cod, hdev->dev_class, 3) == 0)
1548 return;
1549
1550 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1551}
1552
Johan Hedbergaed1a882015-11-22 17:24:44 +03001553static void write_iac(struct hci_request *req)
1554{
1555 struct hci_dev *hdev = req->hdev;
1556 struct hci_cp_write_current_iac_lap cp;
1557
1558 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1559 return;
1560
1561 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1562 /* Limited discoverable mode */
1563 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1564 cp.iac_lap[0] = 0x00; /* LIAC */
1565 cp.iac_lap[1] = 0x8b;
1566 cp.iac_lap[2] = 0x9e;
1567 cp.iac_lap[3] = 0x33; /* GIAC */
1568 cp.iac_lap[4] = 0x8b;
1569 cp.iac_lap[5] = 0x9e;
1570 } else {
1571 /* General discoverable mode */
1572 cp.num_iac = 1;
1573 cp.iac_lap[0] = 0x33; /* GIAC */
1574 cp.iac_lap[1] = 0x8b;
1575 cp.iac_lap[2] = 0x9e;
1576 }
1577
1578 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1579 (cp.num_iac * 3) + 1, &cp);
1580}
1581
1582static int discoverable_update(struct hci_request *req, unsigned long opt)
1583{
1584 struct hci_dev *hdev = req->hdev;
1585
1586 hci_dev_lock(hdev);
1587
1588 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1589 write_iac(req);
1590 __hci_req_update_scan(req);
1591 __hci_req_update_class(req);
1592 }
1593
1594 /* Advertising instances don't use the global discoverable setting, so
1595 * only update AD if advertising was enabled using Set Advertising.
1596 */
1597 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1598 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1599
1600 hci_dev_unlock(hdev);
1601
1602 return 0;
1603}
1604
1605static void discoverable_update_work(struct work_struct *work)
1606{
1607 struct hci_dev *hdev = container_of(work, struct hci_dev,
1608 discoverable_update);
1609 u8 status;
1610
1611 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1612 mgmt_set_discoverable_complete(hdev, status);
1613}
1614
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001615void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1616 u8 reason)
1617{
1618 switch (conn->state) {
1619 case BT_CONNECTED:
1620 case BT_CONFIG:
1621 if (conn->type == AMP_LINK) {
1622 struct hci_cp_disconn_phy_link cp;
1623
1624 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1625 cp.reason = reason;
1626 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1627 &cp);
1628 } else {
1629 struct hci_cp_disconnect dc;
1630
1631 dc.handle = cpu_to_le16(conn->handle);
1632 dc.reason = reason;
1633 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1634 }
1635
1636 conn->state = BT_DISCONN;
1637
1638 break;
1639 case BT_CONNECT:
1640 if (conn->type == LE_LINK) {
1641 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1642 break;
1643 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1644 0, NULL);
1645 } else if (conn->type == ACL_LINK) {
1646 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1647 break;
1648 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1649 6, &conn->dst);
1650 }
1651 break;
1652 case BT_CONNECT2:
1653 if (conn->type == ACL_LINK) {
1654 struct hci_cp_reject_conn_req rej;
1655
1656 bacpy(&rej.bdaddr, &conn->dst);
1657 rej.reason = reason;
1658
1659 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1660 sizeof(rej), &rej);
1661 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1662 struct hci_cp_reject_sync_conn_req rej;
1663
1664 bacpy(&rej.bdaddr, &conn->dst);
1665
1666 /* SCO rejection has its own limited set of
1667 * allowed error values (0x0D-0x0F) which isn't
1668 * compatible with most values passed to this
1669 * function. To be safe hard-code one of the
1670 * values that's suitable for SCO.
1671 */
1672 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1673
1674 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1675 sizeof(rej), &rej);
1676 }
1677 break;
1678 default:
1679 conn->state = BT_CLOSED;
1680 break;
1681 }
1682}
1683
1684static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1685{
1686 if (status)
1687 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1688}
1689
1690int hci_abort_conn(struct hci_conn *conn, u8 reason)
1691{
1692 struct hci_request req;
1693 int err;
1694
1695 hci_req_init(&req, conn->hdev);
1696
1697 __hci_abort_conn(&req, conn, reason);
1698
1699 err = hci_req_run(&req, abort_conn_complete);
1700 if (err && err != -ENODATA) {
1701 BT_ERR("Failed to run HCI request: err %d", err);
1702 return err;
1703 }
1704
1705 return 0;
1706}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001707
Johan Hedberga1d01db2015-11-11 08:11:25 +02001708static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001709{
1710 hci_dev_lock(req->hdev);
1711 __hci_update_background_scan(req);
1712 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001713 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001714}
1715
1716static void bg_scan_update(struct work_struct *work)
1717{
1718 struct hci_dev *hdev = container_of(work, struct hci_dev,
1719 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001720 struct hci_conn *conn;
1721 u8 status;
1722 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001723
Johan Hedberg84235d22015-11-11 08:11:20 +02001724 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1725 if (!err)
1726 return;
1727
1728 hci_dev_lock(hdev);
1729
1730 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1731 if (conn)
1732 hci_le_conn_failed(conn, status);
1733
1734 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001735}
1736
Johan Hedberga1d01db2015-11-11 08:11:25 +02001737static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001738{
1739 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001740 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001741}
1742
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001743static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1744{
1745 u8 length = opt;
1746 /* General inquiry access code (GIAC) */
1747 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1748 struct hci_cp_inquiry cp;
1749
1750 BT_DBG("%s", req->hdev->name);
1751
1752 hci_dev_lock(req->hdev);
1753 hci_inquiry_cache_flush(req->hdev);
1754 hci_dev_unlock(req->hdev);
1755
1756 memset(&cp, 0, sizeof(cp));
1757 memcpy(&cp.lap, lap, sizeof(cp.lap));
1758 cp.length = length;
1759
1760 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1761
1762 return 0;
1763}
1764
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001765static void le_scan_disable_work(struct work_struct *work)
1766{
1767 struct hci_dev *hdev = container_of(work, struct hci_dev,
1768 le_scan_disable.work);
1769 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001770
1771 BT_DBG("%s", hdev->name);
1772
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001773 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001774 return;
1775
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001776 cancel_delayed_work(&hdev->le_scan_restart);
1777
1778 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1779 if (status) {
1780 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1781 return;
1782 }
1783
1784 hdev->discovery.scan_start = 0;
1785
1786 /* If we were running LE only scan, change discovery state. If
1787 * we were running both LE and BR/EDR inquiry simultaneously,
1788 * and BR/EDR inquiry is already finished, stop discovery,
1789 * otherwise BR/EDR inquiry will stop discovery when finished.
1790 * If we will resolve remote device name, do not change
1791 * discovery state.
1792 */
1793
1794 if (hdev->discovery.type == DISCOV_TYPE_LE)
1795 goto discov_stopped;
1796
1797 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1798 return;
1799
1800 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1801 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1802 hdev->discovery.state != DISCOVERY_RESOLVING)
1803 goto discov_stopped;
1804
1805 return;
1806 }
1807
1808 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1809 HCI_CMD_TIMEOUT, &status);
1810 if (status) {
1811 BT_ERR("Inquiry failed: status 0x%02x", status);
1812 goto discov_stopped;
1813 }
1814
1815 return;
1816
1817discov_stopped:
1818 hci_dev_lock(hdev);
1819 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1820 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001821}
1822
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001823static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001824{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001825 struct hci_dev *hdev = req->hdev;
1826 struct hci_cp_le_set_scan_enable cp;
1827
1828 /* If controller is not scanning we are done. */
1829 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1830 return 0;
1831
1832 hci_req_add_le_scan_disable(req);
1833
1834 memset(&cp, 0, sizeof(cp));
1835 cp.enable = LE_SCAN_ENABLE;
1836 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1837 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1838
1839 return 0;
1840}
1841
1842static void le_scan_restart_work(struct work_struct *work)
1843{
1844 struct hci_dev *hdev = container_of(work, struct hci_dev,
1845 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001846 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001847 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001848
1849 BT_DBG("%s", hdev->name);
1850
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001851 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001852 if (status) {
1853 BT_ERR("Failed to restart LE scan: status %d", status);
1854 return;
1855 }
1856
1857 hci_dev_lock(hdev);
1858
1859 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1860 !hdev->discovery.scan_start)
1861 goto unlock;
1862
1863 /* When the scan was started, hdev->le_scan_disable has been queued
1864 * after duration from scan_start. During scan restart this job
1865 * has been canceled, and we need to queue it again after proper
1866 * timeout, to make sure that scan does not run indefinitely.
1867 */
1868 duration = hdev->discovery.scan_duration;
1869 scan_start = hdev->discovery.scan_start;
1870 now = jiffies;
1871 if (now - scan_start <= duration) {
1872 int elapsed;
1873
1874 if (now >= scan_start)
1875 elapsed = now - scan_start;
1876 else
1877 elapsed = ULONG_MAX - scan_start + now;
1878
1879 timeout = duration - elapsed;
1880 } else {
1881 timeout = 0;
1882 }
1883
1884 queue_delayed_work(hdev->req_workqueue,
1885 &hdev->le_scan_disable, timeout);
1886
1887unlock:
1888 hci_dev_unlock(hdev);
1889}
1890
Johan Hedberge68f0722015-11-11 08:30:30 +02001891static void disable_advertising(struct hci_request *req)
1892{
1893 u8 enable = 0x00;
1894
1895 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1896}
1897
1898static int active_scan(struct hci_request *req, unsigned long opt)
1899{
1900 uint16_t interval = opt;
1901 struct hci_dev *hdev = req->hdev;
1902 struct hci_cp_le_set_scan_param param_cp;
1903 struct hci_cp_le_set_scan_enable enable_cp;
1904 u8 own_addr_type;
1905 int err;
1906
1907 BT_DBG("%s", hdev->name);
1908
1909 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1910 hci_dev_lock(hdev);
1911
1912 /* Don't let discovery abort an outgoing connection attempt
1913 * that's using directed advertising.
1914 */
1915 if (hci_lookup_le_connect(hdev)) {
1916 hci_dev_unlock(hdev);
1917 return -EBUSY;
1918 }
1919
1920 cancel_adv_timeout(hdev);
1921 hci_dev_unlock(hdev);
1922
1923 disable_advertising(req);
1924 }
1925
1926 /* If controller is scanning, it means the background scanning is
1927 * running. Thus, we should temporarily stop it in order to set the
1928 * discovery scanning parameters.
1929 */
1930 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1931 hci_req_add_le_scan_disable(req);
1932
1933 /* All active scans will be done with either a resolvable private
1934 * address (when privacy feature has been enabled) or non-resolvable
1935 * private address.
1936 */
1937 err = hci_update_random_address(req, true, &own_addr_type);
1938 if (err < 0)
1939 own_addr_type = ADDR_LE_DEV_PUBLIC;
1940
1941 memset(&param_cp, 0, sizeof(param_cp));
1942 param_cp.type = LE_SCAN_ACTIVE;
1943 param_cp.interval = cpu_to_le16(interval);
1944 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1945 param_cp.own_address_type = own_addr_type;
1946
1947 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1948 &param_cp);
1949
1950 memset(&enable_cp, 0, sizeof(enable_cp));
1951 enable_cp.enable = LE_SCAN_ENABLE;
1952 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1953
1954 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1955 &enable_cp);
1956
1957 return 0;
1958}
1959
1960static int interleaved_discov(struct hci_request *req, unsigned long opt)
1961{
1962 int err;
1963
1964 BT_DBG("%s", req->hdev->name);
1965
1966 err = active_scan(req, opt);
1967 if (err)
1968 return err;
1969
Johan Hedberg7df26b52015-11-11 12:24:21 +02001970 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02001971}
1972
1973static void start_discovery(struct hci_dev *hdev, u8 *status)
1974{
1975 unsigned long timeout;
1976
1977 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1978
1979 switch (hdev->discovery.type) {
1980 case DISCOV_TYPE_BREDR:
1981 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02001982 hci_req_sync(hdev, bredr_inquiry,
1983 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02001984 status);
1985 return;
1986 case DISCOV_TYPE_INTERLEAVED:
1987 /* When running simultaneous discovery, the LE scanning time
1988 * should occupy the whole discovery time sine BR/EDR inquiry
1989 * and LE scanning are scheduled by the controller.
1990 *
1991 * For interleaving discovery in comparison, BR/EDR inquiry
1992 * and LE scanning are done sequentially with separate
1993 * timeouts.
1994 */
1995 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1996 &hdev->quirks)) {
1997 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1998 /* During simultaneous discovery, we double LE scan
1999 * interval. We must leave some time for the controller
2000 * to do BR/EDR inquiry.
2001 */
2002 hci_req_sync(hdev, interleaved_discov,
2003 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2004 status);
2005 break;
2006 }
2007
2008 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2009 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2010 HCI_CMD_TIMEOUT, status);
2011 break;
2012 case DISCOV_TYPE_LE:
2013 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2014 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2015 HCI_CMD_TIMEOUT, status);
2016 break;
2017 default:
2018 *status = HCI_ERROR_UNSPECIFIED;
2019 return;
2020 }
2021
2022 if (*status)
2023 return;
2024
2025 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2026
2027 /* When service discovery is used and the controller has a
2028 * strict duplicate filter, it is important to remember the
2029 * start and duration of the scan. This is required for
2030 * restarting scanning during the discovery phase.
2031 */
2032 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2033 hdev->discovery.result_filtering) {
2034 hdev->discovery.scan_start = jiffies;
2035 hdev->discovery.scan_duration = timeout;
2036 }
2037
2038 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2039 timeout);
2040}
2041
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002042bool hci_req_stop_discovery(struct hci_request *req)
2043{
2044 struct hci_dev *hdev = req->hdev;
2045 struct discovery_state *d = &hdev->discovery;
2046 struct hci_cp_remote_name_req_cancel cp;
2047 struct inquiry_entry *e;
2048 bool ret = false;
2049
2050 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2051
2052 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2053 if (test_bit(HCI_INQUIRY, &hdev->flags))
2054 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2055
2056 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2057 cancel_delayed_work(&hdev->le_scan_disable);
2058 hci_req_add_le_scan_disable(req);
2059 }
2060
2061 ret = true;
2062 } else {
2063 /* Passive scanning */
2064 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2065 hci_req_add_le_scan_disable(req);
2066 ret = true;
2067 }
2068 }
2069
2070 /* No further actions needed for LE-only discovery */
2071 if (d->type == DISCOV_TYPE_LE)
2072 return ret;
2073
2074 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2075 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2076 NAME_PENDING);
2077 if (!e)
2078 return ret;
2079
2080 bacpy(&cp.bdaddr, &e->data.bdaddr);
2081 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2082 &cp);
2083 ret = true;
2084 }
2085
2086 return ret;
2087}
2088
2089static int stop_discovery(struct hci_request *req, unsigned long opt)
2090{
2091 hci_dev_lock(req->hdev);
2092 hci_req_stop_discovery(req);
2093 hci_dev_unlock(req->hdev);
2094
2095 return 0;
2096}
2097
Johan Hedberge68f0722015-11-11 08:30:30 +02002098static void discov_update(struct work_struct *work)
2099{
2100 struct hci_dev *hdev = container_of(work, struct hci_dev,
2101 discov_update);
2102 u8 status = 0;
2103
2104 switch (hdev->discovery.state) {
2105 case DISCOVERY_STARTING:
2106 start_discovery(hdev, &status);
2107 mgmt_start_discovery_complete(hdev, status);
2108 if (status)
2109 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2110 else
2111 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2112 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002113 case DISCOVERY_STOPPING:
2114 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2115 mgmt_stop_discovery_complete(hdev, status);
2116 if (!status)
2117 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2118 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002119 case DISCOVERY_STOPPED:
2120 default:
2121 return;
2122 }
2123}
2124
Johan Hedbergc366f552015-11-23 15:43:06 +02002125static void discov_off(struct work_struct *work)
2126{
2127 struct hci_dev *hdev = container_of(work, struct hci_dev,
2128 discov_off.work);
2129
2130 BT_DBG("%s", hdev->name);
2131
2132 hci_dev_lock(hdev);
2133
2134 /* When discoverable timeout triggers, then just make sure
2135 * the limited discoverable flag is cleared. Even in the case
2136 * of a timeout triggered from general discoverable, it is
2137 * safe to unconditionally clear the flag.
2138 */
2139 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2140 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2141 hdev->discov_timeout = 0;
2142
2143 hci_dev_unlock(hdev);
2144
2145 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2146 mgmt_new_settings(hdev);
2147}
2148
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002149void hci_request_setup(struct hci_dev *hdev)
2150{
Johan Hedberge68f0722015-11-11 08:30:30 +02002151 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002152 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002153 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002154 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002155 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002156 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002157 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2158 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002159 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002160}
2161
2162void hci_request_cancel_all(struct hci_dev *hdev)
2163{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002164 hci_req_sync_cancel(hdev, ENODEV);
2165
Johan Hedberge68f0722015-11-11 08:30:30 +02002166 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002167 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002168 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002169 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002170 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002171 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002172 cancel_delayed_work_sync(&hdev->le_scan_disable);
2173 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002174
2175 if (hdev->adv_instance_timeout) {
2176 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2177 hdev->adv_instance_timeout = 0;
2178 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002179}