blob: 215059a7646ec51ce5be06bdc6ef5e958525ab7e [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
Johan Hedberge62144872015-04-02 13:41:08 +030049static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020051{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
60 */
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020071 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020077
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
Johan Hedberge62144872015-04-02 13:41:08 +030087int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
Johan Hedbergbe91cd02015-11-10 09:44:54 +020097static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
Johan Hedbergb5044302015-11-10 09:44:55 +0200111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200125 struct hci_request req;
126 struct sk_buff *skb;
127 int err = 0;
128
129 BT_DBG("%s", hdev->name);
130
131 hci_req_init(&req, hdev);
132
133 hci_req_add_ev(&req, opcode, plen, param, event);
134
135 hdev->req_status = HCI_REQ_PEND;
136
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100138 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200140
John Keeping67d8cee2018-04-19 16:29:37 +0100141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
196 hci_req_init(&req, hdev);
197
198 hdev->req_status = HCI_REQ_PEND;
199
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 err = func(&req, opt);
201 if (err) {
202 if (hci_status)
203 *hci_status = HCI_ERROR_UNSPECIFIED;
204 return err;
205 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200206
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207 err = hci_req_run_skb(&req, hci_req_sync_complete);
208 if (err < 0) {
209 hdev->req_status = 0;
210
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
215 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200216 if (err == -ENODATA) {
217 if (hci_status)
218 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200219 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200220 }
221
222 if (hci_status)
223 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224
225 return err;
226 }
227
John Keeping67d8cee2018-04-19 16:29:37 +0100228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
John Keeping67d8cee2018-04-19 16:29:37 +0100231 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200232 return -EINTR;
233
234 switch (hdev->req_status) {
235 case HCI_REQ_DONE:
236 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200237 if (hci_status)
238 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 break;
240
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 default:
248 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252 }
253
Frederic Dalleau9afee942016-08-23 07:59:19 +0200254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
Johannes Berg4df864c2017-06-16 14:29:21 +0200291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200296 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200343void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344{
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
347 u8 type;
348
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 return;
351
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 return;
354
355 if (enable) {
356 type = PAGE_SCAN_TYPE_INTERLACED;
357
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
360 } else {
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
365 }
366
367 acp.window = cpu_to_le16(0x0012);
368
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 sizeof(acp), &acp);
373
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376}
377
Johan Hedberg196a5e92015-11-22 18:55:44 +0200378/* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
381 *
382 * This function requires the caller holds hdev->lock.
383 */
384static void __hci_update_background_scan(struct hci_request *req)
385{
386 struct hci_dev *hdev = req->hdev;
387
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 return;
395
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 return;
399
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
402 return;
403
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
406 *
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
410 */
411 hci_discovery_filter_clear(hdev);
412
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
417 * scanning.
418 */
419
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 return;
423
424 hci_req_add_le_scan_disable(req);
425
426 BT_DBG("%s stopping background scanning", hdev->name);
427 } else {
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
430 */
431
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
434 * the same time.
435 */
436 if (hci_lookup_le_connect(hdev))
437 return;
438
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
441 */
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
444
445 hci_req_add_le_passive_scan(req);
446
447 BT_DBG("%s starting background scanning", hdev->name);
448 }
449}
450
Johan Hedberg00cf5042015-11-25 16:15:41 +0200451void __hci_req_update_name(struct hci_request *req)
452{
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
455
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459}
460
Johan Hedbergb1a89172015-11-25 16:15:42 +0200461#define PNP_INFO_SVCLASS_ID 0x1200
462
463static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464{
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503}
504
505static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506{
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536}
537
538static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539{
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569}
570
571static void create_eir(struct hci_dev *hdev, u8 *data)
572{
573 u8 *ptr = data;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577
578 if (name_len > 0) {
579 /* EIR Data type */
580 if (name_len > 48) {
581 name_len = 48;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
588
589 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591 ptr += (name_len + 2);
592 }
593
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 ptr[0] = 2;
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
598
599 ptr += 3;
600 }
601
602 if (hdev->devid_source > 0) {
603 ptr[0] = 9;
604 ptr[1] = EIR_DEVICE_ID;
605
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611 ptr += 10;
612 }
613
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617}
618
619void __hci_req_update_eir(struct hci_request *req)
620{
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
623
624 if (!hdev_is_powered(hdev))
625 return;
626
627 if (!lmp_ext_inq_capable(hdev))
628 return;
629
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 return;
632
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 return;
635
636 memset(&cp, 0, sizeof(cp));
637
638 create_eir(hdev, cp.data);
639
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 return;
642
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646}
647
Johan Hedberg0857dd32014-12-19 13:40:20 +0200648void hci_req_add_le_scan_disable(struct hci_request *req)
649{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530650 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200651
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530652 if (use_ext_scan(hdev)) {
653 struct hci_cp_le_set_ext_scan_enable cp;
654
655 memset(&cp, 0, sizeof(cp));
656 cp.enable = LE_SCAN_DISABLE;
657 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
658 &cp);
659 } else {
660 struct hci_cp_le_set_scan_enable cp;
661
662 memset(&cp, 0, sizeof(cp));
663 cp.enable = LE_SCAN_DISABLE;
664 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
665 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200666}
667
668static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670{
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, &params->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677}
678
679static u8 update_white_list(struct hci_request *req)
680{
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200701
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
704
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200707 continue;
708 }
709
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 &params->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, &params->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 &params->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, &params->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774}
775
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200776static bool scan_use_rpa(struct hci_dev *hdev)
777{
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779}
780
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530781static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
782 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200783{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530784 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530785
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530786 /* Use ext scanning if set ext scan param and ext scan enable is
787 * supported
788 */
789 if (use_ext_scan(hdev)) {
790 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
791 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
792 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530793 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
794 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530795
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530796 ext_param_cp = (void *)data;
797 phy_params = (void *)ext_param_cp->data;
798
799 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
800 ext_param_cp->own_addr_type = own_addr_type;
801 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530802
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530803 plen = sizeof(*ext_param_cp);
804
805 if (scan_1m(hdev) || scan_2m(hdev)) {
806 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
807
808 memset(phy_params, 0, sizeof(*phy_params));
809 phy_params->type = type;
810 phy_params->interval = cpu_to_le16(interval);
811 phy_params->window = cpu_to_le16(window);
812
813 plen += sizeof(*phy_params);
814 phy_params++;
815 }
816
817 if (scan_coded(hdev)) {
818 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
819
820 memset(phy_params, 0, sizeof(*phy_params));
821 phy_params->type = type;
822 phy_params->interval = cpu_to_le16(interval);
823 phy_params->window = cpu_to_le16(window);
824
825 plen += sizeof(*phy_params);
826 phy_params++;
827 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530828
829 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530830 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530831
832 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
833 ext_enable_cp.enable = LE_SCAN_ENABLE;
834 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835
836 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
837 sizeof(ext_enable_cp), &ext_enable_cp);
838 } else {
839 struct hci_cp_le_set_scan_param param_cp;
840 struct hci_cp_le_set_scan_enable enable_cp;
841
842 memset(&param_cp, 0, sizeof(param_cp));
843 param_cp.type = type;
844 param_cp.interval = cpu_to_le16(interval);
845 param_cp.window = cpu_to_le16(window);
846 param_cp.own_address_type = own_addr_type;
847 param_cp.filter_policy = filter_policy;
848 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
849 &param_cp);
850
851 memset(&enable_cp, 0, sizeof(enable_cp));
852 enable_cp.enable = LE_SCAN_ENABLE;
853 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
854 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
855 &enable_cp);
856 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530857}
858
859void hci_req_add_le_passive_scan(struct hci_request *req)
860{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200861 struct hci_dev *hdev = req->hdev;
862 u8 own_addr_type;
863 u8 filter_policy;
864
865 /* Set require_privacy to false since no SCAN_REQ are send
866 * during passive scanning. Not using an non-resolvable address
867 * here is important so that peer devices using direct
868 * advertising with our address will be correctly reported
869 * by the controller.
870 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200871 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
872 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200873 return;
874
875 /* Adding or removing entries from the white list must
876 * happen before enabling scanning. The controller does
877 * not allow white list modification while scanning.
878 */
879 filter_policy = update_white_list(req);
880
881 /* When the controller is using random resolvable addresses and
882 * with that having LE privacy enabled, then controllers with
883 * Extended Scanner Filter Policies support can now enable support
884 * for handling directed advertising.
885 *
886 * So instead of using filter polices 0x00 (no whitelist)
887 * and 0x01 (whitelist enabled) use the new filter policies
888 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
889 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700890 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200891 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
892 filter_policy |= 0x02;
893
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530894 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
895 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200896}
897
Johan Hedbergf2252572015-11-18 12:49:20 +0200898static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
899{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200900 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200901 struct adv_info *adv_instance;
902
903 /* Ignore instance 0 */
904 if (instance == 0x00)
905 return 0;
906
907 adv_instance = hci_find_adv_instance(hdev, instance);
908 if (!adv_instance)
909 return 0;
910
911 /* TODO: Take into account the "appearance" and "local-name" flags here.
912 * These are currently being ignored as they are not supported.
913 */
914 return adv_instance->scan_rsp_len;
915}
916
917void __hci_req_disable_advertising(struct hci_request *req)
918{
919 u8 enable = 0x00;
920
921 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
922}
923
924static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
925{
926 u32 flags;
927 struct adv_info *adv_instance;
928
929 if (instance == 0x00) {
930 /* Instance 0 always manages the "Tx Power" and "Flags"
931 * fields
932 */
933 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
934
935 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
936 * corresponds to the "connectable" instance flag.
937 */
938 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
939 flags |= MGMT_ADV_FLAG_CONNECTABLE;
940
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200941 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
942 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
943 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200944 flags |= MGMT_ADV_FLAG_DISCOV;
945
Johan Hedbergf2252572015-11-18 12:49:20 +0200946 return flags;
947 }
948
949 adv_instance = hci_find_adv_instance(hdev, instance);
950
951 /* Return 0 when we got an invalid instance identifier. */
952 if (!adv_instance)
953 return 0;
954
955 return adv_instance->flags;
956}
957
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200958static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
959{
960 /* If privacy is not enabled don't use RPA */
961 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
962 return false;
963
964 /* If basic privacy mode is enabled use RPA */
965 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
966 return true;
967
968 /* If limited privacy mode is enabled don't use RPA if we're
969 * both discoverable and bondable.
970 */
971 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
972 hci_dev_test_flag(hdev, HCI_BONDABLE))
973 return false;
974
975 /* We're neither bondable nor discoverable in the limited
976 * privacy mode, therefore use RPA.
977 */
978 return true;
979}
980
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100981static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
982{
983 /* If there is no connection we are OK to advertise. */
984 if (hci_conn_num(hdev, LE_LINK) == 0)
985 return true;
986
987 /* Check le_states if there is any connection in slave role. */
988 if (hdev->conn_hash.le_num_slave > 0) {
989 /* Slave connection state and non connectable mode bit 20. */
990 if (!connectable && !(hdev->le_states[2] & 0x10))
991 return false;
992
993 /* Slave connection state and connectable mode bit 38
994 * and scannable bit 21.
995 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +0100996 if (connectable && (!(hdev->le_states[4] & 0x40) ||
997 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100998 return false;
999 }
1000
1001 /* Check le_states if there is any connection in master role. */
1002 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1003 /* Master connection state and non connectable mode bit 18. */
1004 if (!connectable && !(hdev->le_states[2] & 0x02))
1005 return false;
1006
1007 /* Master connection state and connectable mode bit 35 and
1008 * scannable 19.
1009 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001010 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001011 !(hdev->le_states[2] & 0x08)))
1012 return false;
1013 }
1014
1015 return true;
1016}
1017
Johan Hedbergf2252572015-11-18 12:49:20 +02001018void __hci_req_enable_advertising(struct hci_request *req)
1019{
1020 struct hci_dev *hdev = req->hdev;
1021 struct hci_cp_le_set_adv_param cp;
1022 u8 own_addr_type, enable = 0x01;
1023 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +02001024 u32 flags;
1025
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001026 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1027
1028 /* If the "connectable" instance flag was not set, then choose between
1029 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1030 */
1031 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1032 mgmt_get_connectable(hdev);
1033
1034 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001035 return;
1036
1037 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1038 __hci_req_disable_advertising(req);
1039
1040 /* Clear the HCI_LE_ADV bit temporarily so that the
1041 * hci_update_random_address knows that it's safe to go ahead
1042 * and write a new random address. The flag will be set back on
1043 * as soon as the SET_ADV_ENABLE HCI command completes.
1044 */
1045 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1046
Johan Hedbergf2252572015-11-18 12:49:20 +02001047 /* Set require_privacy to true only when non-connectable
1048 * advertising is used. In that case it is fine to use a
1049 * non-resolvable private address.
1050 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001051 if (hci_update_random_address(req, !connectable,
1052 adv_use_rpa(hdev, flags),
1053 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001054 return;
1055
1056 memset(&cp, 0, sizeof(cp));
1057 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1058 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1059
1060 if (connectable)
1061 cp.type = LE_ADV_IND;
1062 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1063 cp.type = LE_ADV_SCAN_IND;
1064 else
1065 cp.type = LE_ADV_NONCONN_IND;
1066
1067 cp.own_address_type = own_addr_type;
1068 cp.channel_map = hdev->le_adv_channel_map;
1069
1070 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1071
1072 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1073}
1074
Michał Narajowskif61851f2016-10-19 10:20:27 +02001075u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001076{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001077 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001078 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001079
Michał Narajowskif61851f2016-10-19 10:20:27 +02001080 /* no space left for name (+ NULL + type + len) */
1081 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1082 return ad_len;
1083
1084 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001085 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001086 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001087 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001088 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001089
Michał Narajowskif61851f2016-10-19 10:20:27 +02001090 /* use short name if present */
1091 short_len = strlen(hdev->short_name);
1092 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001093 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001094 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001095
Michał Narajowskif61851f2016-10-19 10:20:27 +02001096 /* use shortened full name if present, we already know that name
1097 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1098 */
1099 if (complete_len) {
1100 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1101
1102 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1103 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1104
1105 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1106 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001107 }
1108
1109 return ad_len;
1110}
1111
Michał Narajowski1b422062016-10-05 12:28:27 +02001112static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1113{
1114 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1115}
1116
Michał Narajowski7c295c42016-09-18 12:50:02 +02001117static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1118{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001119 u8 scan_rsp_len = 0;
1120
1121 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001122 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001123 }
1124
Michał Narajowski1b422062016-10-05 12:28:27 +02001125 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001126}
1127
Johan Hedbergf2252572015-11-18 12:49:20 +02001128static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1129 u8 *ptr)
1130{
1131 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001132 u32 instance_flags;
1133 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001134
1135 adv_instance = hci_find_adv_instance(hdev, instance);
1136 if (!adv_instance)
1137 return 0;
1138
Michał Narajowski7c295c42016-09-18 12:50:02 +02001139 instance_flags = adv_instance->flags;
1140
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001141 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001142 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001143 }
1144
Michał Narajowski1b422062016-10-05 12:28:27 +02001145 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001146 adv_instance->scan_rsp_len);
1147
Michał Narajowski7c295c42016-09-18 12:50:02 +02001148 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001149
1150 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1151 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1152
1153 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001154}
1155
Johan Hedbergcab054a2015-11-30 11:21:45 +02001156void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001157{
1158 struct hci_dev *hdev = req->hdev;
1159 struct hci_cp_le_set_scan_rsp_data cp;
1160 u8 len;
1161
1162 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1163 return;
1164
1165 memset(&cp, 0, sizeof(cp));
1166
1167 if (instance)
1168 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1169 else
1170 len = create_default_scan_rsp_data(hdev, cp.data);
1171
1172 if (hdev->scan_rsp_data_len == len &&
1173 !memcmp(cp.data, hdev->scan_rsp_data, len))
1174 return;
1175
1176 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1177 hdev->scan_rsp_data_len = len;
1178
1179 cp.length = len;
1180
1181 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1182}
1183
Johan Hedbergf2252572015-11-18 12:49:20 +02001184static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1185{
1186 struct adv_info *adv_instance = NULL;
1187 u8 ad_len = 0, flags = 0;
1188 u32 instance_flags;
1189
1190 /* Return 0 when the current instance identifier is invalid. */
1191 if (instance) {
1192 adv_instance = hci_find_adv_instance(hdev, instance);
1193 if (!adv_instance)
1194 return 0;
1195 }
1196
1197 instance_flags = get_adv_instance_flags(hdev, instance);
1198
1199 /* The Add Advertising command allows userspace to set both the general
1200 * and limited discoverable flags.
1201 */
1202 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1203 flags |= LE_AD_GENERAL;
1204
1205 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1206 flags |= LE_AD_LIMITED;
1207
Johan Hedbergf18ba582016-04-06 13:09:05 +03001208 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1209 flags |= LE_AD_NO_BREDR;
1210
Johan Hedbergf2252572015-11-18 12:49:20 +02001211 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1212 /* If a discovery flag wasn't provided, simply use the global
1213 * settings.
1214 */
1215 if (!flags)
1216 flags |= mgmt_get_adv_discov_flags(hdev);
1217
Johan Hedbergf2252572015-11-18 12:49:20 +02001218 /* If flags would still be empty, then there is no need to
1219 * include the "Flags" AD field".
1220 */
1221 if (flags) {
1222 ptr[0] = 0x02;
1223 ptr[1] = EIR_FLAGS;
1224 ptr[2] = flags;
1225
1226 ad_len += 3;
1227 ptr += 3;
1228 }
1229 }
1230
1231 if (adv_instance) {
1232 memcpy(ptr, adv_instance->adv_data,
1233 adv_instance->adv_data_len);
1234 ad_len += adv_instance->adv_data_len;
1235 ptr += adv_instance->adv_data_len;
1236 }
1237
1238 /* Provide Tx Power only if we can provide a valid value for it */
1239 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1240 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1241 ptr[0] = 0x02;
1242 ptr[1] = EIR_TX_POWER;
1243 ptr[2] = (u8)hdev->adv_tx_power;
1244
1245 ad_len += 3;
1246 ptr += 3;
1247 }
1248
1249 return ad_len;
1250}
1251
Johan Hedbergcab054a2015-11-30 11:21:45 +02001252void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001253{
1254 struct hci_dev *hdev = req->hdev;
1255 struct hci_cp_le_set_adv_data cp;
1256 u8 len;
1257
1258 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1259 return;
1260
1261 memset(&cp, 0, sizeof(cp));
1262
1263 len = create_instance_adv_data(hdev, instance, cp.data);
1264
1265 /* There's nothing to do if the data hasn't changed */
1266 if (hdev->adv_data_len == len &&
1267 memcmp(cp.data, hdev->adv_data, len) == 0)
1268 return;
1269
1270 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1271 hdev->adv_data_len = len;
1272
1273 cp.length = len;
1274
1275 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1276}
1277
Johan Hedbergcab054a2015-11-30 11:21:45 +02001278int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001279{
1280 struct hci_request req;
1281
1282 hci_req_init(&req, hdev);
1283 __hci_req_update_adv_data(&req, instance);
1284
1285 return hci_req_run(&req, NULL);
1286}
1287
1288static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1289{
1290 BT_DBG("%s status %u", hdev->name, status);
1291}
1292
1293void hci_req_reenable_advertising(struct hci_dev *hdev)
1294{
1295 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001296
1297 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001298 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001299 return;
1300
Johan Hedbergf2252572015-11-18 12:49:20 +02001301 hci_req_init(&req, hdev);
1302
Johan Hedbergcab054a2015-11-30 11:21:45 +02001303 if (hdev->cur_adv_instance) {
1304 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1305 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001306 } else {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001307 __hci_req_update_adv_data(&req, 0x00);
1308 __hci_req_update_scan_rsp_data(&req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001309 __hci_req_enable_advertising(&req);
1310 }
1311
1312 hci_req_run(&req, adv_enable_complete);
1313}
1314
1315static void adv_timeout_expire(struct work_struct *work)
1316{
1317 struct hci_dev *hdev = container_of(work, struct hci_dev,
1318 adv_instance_expire.work);
1319
1320 struct hci_request req;
1321 u8 instance;
1322
1323 BT_DBG("%s", hdev->name);
1324
1325 hci_dev_lock(hdev);
1326
1327 hdev->adv_instance_timeout = 0;
1328
Johan Hedbergcab054a2015-11-30 11:21:45 +02001329 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001330 if (instance == 0x00)
1331 goto unlock;
1332
1333 hci_req_init(&req, hdev);
1334
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001335 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001336
1337 if (list_empty(&hdev->adv_instances))
1338 __hci_req_disable_advertising(&req);
1339
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001340 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001341
1342unlock:
1343 hci_dev_unlock(hdev);
1344}
1345
1346int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1347 bool force)
1348{
1349 struct hci_dev *hdev = req->hdev;
1350 struct adv_info *adv_instance = NULL;
1351 u16 timeout;
1352
1353 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001354 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001355 return -EPERM;
1356
1357 if (hdev->adv_instance_timeout)
1358 return -EBUSY;
1359
1360 adv_instance = hci_find_adv_instance(hdev, instance);
1361 if (!adv_instance)
1362 return -ENOENT;
1363
1364 /* A zero timeout means unlimited advertising. As long as there is
1365 * only one instance, duration should be ignored. We still set a timeout
1366 * in case further instances are being added later on.
1367 *
1368 * If the remaining lifetime of the instance is more than the duration
1369 * then the timeout corresponds to the duration, otherwise it will be
1370 * reduced to the remaining instance lifetime.
1371 */
1372 if (adv_instance->timeout == 0 ||
1373 adv_instance->duration <= adv_instance->remaining_time)
1374 timeout = adv_instance->duration;
1375 else
1376 timeout = adv_instance->remaining_time;
1377
1378 /* The remaining time is being reduced unless the instance is being
1379 * advertised without time limit.
1380 */
1381 if (adv_instance->timeout)
1382 adv_instance->remaining_time =
1383 adv_instance->remaining_time - timeout;
1384
1385 hdev->adv_instance_timeout = timeout;
1386 queue_delayed_work(hdev->req_workqueue,
1387 &hdev->adv_instance_expire,
1388 msecs_to_jiffies(timeout * 1000));
1389
1390 /* If we're just re-scheduling the same instance again then do not
1391 * execute any HCI commands. This happens when a single instance is
1392 * being advertised.
1393 */
1394 if (!force && hdev->cur_adv_instance == instance &&
1395 hci_dev_test_flag(hdev, HCI_LE_ADV))
1396 return 0;
1397
1398 hdev->cur_adv_instance = instance;
Johan Hedbergcab054a2015-11-30 11:21:45 +02001399 __hci_req_update_adv_data(req, instance);
1400 __hci_req_update_scan_rsp_data(req, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001401 __hci_req_enable_advertising(req);
1402
1403 return 0;
1404}
1405
1406static void cancel_adv_timeout(struct hci_dev *hdev)
1407{
1408 if (hdev->adv_instance_timeout) {
1409 hdev->adv_instance_timeout = 0;
1410 cancel_delayed_work(&hdev->adv_instance_expire);
1411 }
1412}
1413
1414/* For a single instance:
1415 * - force == true: The instance will be removed even when its remaining
1416 * lifetime is not zero.
1417 * - force == false: the instance will be deactivated but kept stored unless
1418 * the remaining lifetime is zero.
1419 *
1420 * For instance == 0x00:
1421 * - force == true: All instances will be removed regardless of their timeout
1422 * setting.
1423 * - force == false: Only instances that have a timeout will be removed.
1424 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001425void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1426 struct hci_request *req, u8 instance,
1427 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001428{
1429 struct adv_info *adv_instance, *n, *next_instance = NULL;
1430 int err;
1431 u8 rem_inst;
1432
1433 /* Cancel any timeout concerning the removed instance(s). */
1434 if (!instance || hdev->cur_adv_instance == instance)
1435 cancel_adv_timeout(hdev);
1436
1437 /* Get the next instance to advertise BEFORE we remove
1438 * the current one. This can be the same instance again
1439 * if there is only one instance.
1440 */
1441 if (instance && hdev->cur_adv_instance == instance)
1442 next_instance = hci_get_next_instance(hdev, instance);
1443
1444 if (instance == 0x00) {
1445 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1446 list) {
1447 if (!(force || adv_instance->timeout))
1448 continue;
1449
1450 rem_inst = adv_instance->instance;
1451 err = hci_remove_adv_instance(hdev, rem_inst);
1452 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001453 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001454 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001455 } else {
1456 adv_instance = hci_find_adv_instance(hdev, instance);
1457
1458 if (force || (adv_instance && adv_instance->timeout &&
1459 !adv_instance->remaining_time)) {
1460 /* Don't advertise a removed instance. */
1461 if (next_instance &&
1462 next_instance->instance == instance)
1463 next_instance = NULL;
1464
1465 err = hci_remove_adv_instance(hdev, instance);
1466 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001467 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001468 }
1469 }
1470
Johan Hedbergf2252572015-11-18 12:49:20 +02001471 if (!req || !hdev_is_powered(hdev) ||
1472 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1473 return;
1474
1475 if (next_instance)
1476 __hci_req_schedule_adv_instance(req, next_instance->instance,
1477 false);
1478}
1479
Johan Hedberg0857dd32014-12-19 13:40:20 +02001480static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1481{
1482 struct hci_dev *hdev = req->hdev;
1483
1484 /* If we're advertising or initiating an LE connection we can't
1485 * go ahead and change the random address at this time. This is
1486 * because the eventual initiator address used for the
1487 * subsequently created connection will be undefined (some
1488 * controllers use the new address and others the one we had
1489 * when the operation started).
1490 *
1491 * In this kind of scenario skip the update and let the random
1492 * address be updated at the next cycle.
1493 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001494 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001495 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001496 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001497 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001498 return;
1499 }
1500
1501 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1502}
1503
1504int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001505 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001506{
1507 struct hci_dev *hdev = req->hdev;
1508 int err;
1509
1510 /* If privacy is enabled use a resolvable private address. If
1511 * current RPA has expired or there is something else than
1512 * the current RPA in use, then generate a new one.
1513 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001514 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001515 int to;
1516
1517 *own_addr_type = ADDR_LE_DEV_RANDOM;
1518
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001519 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001520 !bacmp(&hdev->random_addr, &hdev->rpa))
1521 return 0;
1522
1523 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1524 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001525 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001526 return err;
1527 }
1528
1529 set_random_addr(req, &hdev->rpa);
1530
1531 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1532 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1533
1534 return 0;
1535 }
1536
1537 /* In case of required privacy without resolvable private address,
1538 * use an non-resolvable private address. This is useful for active
1539 * scanning and non-connectable advertising.
1540 */
1541 if (require_privacy) {
1542 bdaddr_t nrpa;
1543
1544 while (true) {
1545 /* The non-resolvable private address is generated
1546 * from random six bytes with the two most significant
1547 * bits cleared.
1548 */
1549 get_random_bytes(&nrpa, 6);
1550 nrpa.b[5] &= 0x3f;
1551
1552 /* The non-resolvable private address shall not be
1553 * equal to the public address.
1554 */
1555 if (bacmp(&hdev->bdaddr, &nrpa))
1556 break;
1557 }
1558
1559 *own_addr_type = ADDR_LE_DEV_RANDOM;
1560 set_random_addr(req, &nrpa);
1561 return 0;
1562 }
1563
1564 /* If forcing static address is in use or there is no public
1565 * address use the static address as random address (but skip
1566 * the HCI command if the current random address is already the
1567 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001568 *
1569 * In case BR/EDR has been disabled on a dual-mode controller
1570 * and a static address has been configured, then use that
1571 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001572 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001573 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001574 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001575 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001576 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001577 *own_addr_type = ADDR_LE_DEV_RANDOM;
1578 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1579 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1580 &hdev->static_addr);
1581 return 0;
1582 }
1583
1584 /* Neither privacy nor static address is being used so use a
1585 * public address.
1586 */
1587 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1588
1589 return 0;
1590}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001591
Johan Hedberg405a2612014-12-19 23:18:22 +02001592static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1593{
1594 struct bdaddr_list *b;
1595
1596 list_for_each_entry(b, &hdev->whitelist, list) {
1597 struct hci_conn *conn;
1598
1599 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1600 if (!conn)
1601 return true;
1602
1603 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1604 return true;
1605 }
1606
1607 return false;
1608}
1609
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001610void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001611{
1612 struct hci_dev *hdev = req->hdev;
1613 u8 scan;
1614
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001615 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001616 return;
1617
1618 if (!hdev_is_powered(hdev))
1619 return;
1620
1621 if (mgmt_powering_down(hdev))
1622 return;
1623
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001624 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001625 disconnected_whitelist_entries(hdev))
1626 scan = SCAN_PAGE;
1627 else
1628 scan = SCAN_DISABLED;
1629
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001630 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001631 scan |= SCAN_INQUIRY;
1632
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001633 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1634 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1635 return;
1636
Johan Hedberg405a2612014-12-19 23:18:22 +02001637 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1638}
1639
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001640static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001641{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001642 hci_dev_lock(req->hdev);
1643 __hci_req_update_scan(req);
1644 hci_dev_unlock(req->hdev);
1645 return 0;
1646}
Johan Hedberg405a2612014-12-19 23:18:22 +02001647
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001648static void scan_update_work(struct work_struct *work)
1649{
1650 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1651
1652 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001653}
1654
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001655static int connectable_update(struct hci_request *req, unsigned long opt)
1656{
1657 struct hci_dev *hdev = req->hdev;
1658
1659 hci_dev_lock(hdev);
1660
1661 __hci_req_update_scan(req);
1662
1663 /* If BR/EDR is not enabled and we disable advertising as a
1664 * by-product of disabling connectable, we need to update the
1665 * advertising flags.
1666 */
1667 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001668 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001669
1670 /* Update the advertising parameters if necessary */
1671 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001672 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001673 __hci_req_enable_advertising(req);
1674
1675 __hci_update_background_scan(req);
1676
1677 hci_dev_unlock(hdev);
1678
1679 return 0;
1680}
1681
1682static void connectable_update_work(struct work_struct *work)
1683{
1684 struct hci_dev *hdev = container_of(work, struct hci_dev,
1685 connectable_update);
1686 u8 status;
1687
1688 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1689 mgmt_set_connectable_complete(hdev, status);
1690}
1691
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001692static u8 get_service_classes(struct hci_dev *hdev)
1693{
1694 struct bt_uuid *uuid;
1695 u8 val = 0;
1696
1697 list_for_each_entry(uuid, &hdev->uuids, list)
1698 val |= uuid->svc_hint;
1699
1700 return val;
1701}
1702
1703void __hci_req_update_class(struct hci_request *req)
1704{
1705 struct hci_dev *hdev = req->hdev;
1706 u8 cod[3];
1707
1708 BT_DBG("%s", hdev->name);
1709
1710 if (!hdev_is_powered(hdev))
1711 return;
1712
1713 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1714 return;
1715
1716 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1717 return;
1718
1719 cod[0] = hdev->minor_class;
1720 cod[1] = hdev->major_class;
1721 cod[2] = get_service_classes(hdev);
1722
1723 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1724 cod[1] |= 0x20;
1725
1726 if (memcmp(cod, hdev->dev_class, 3) == 0)
1727 return;
1728
1729 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1730}
1731
Johan Hedbergaed1a882015-11-22 17:24:44 +03001732static void write_iac(struct hci_request *req)
1733{
1734 struct hci_dev *hdev = req->hdev;
1735 struct hci_cp_write_current_iac_lap cp;
1736
1737 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1738 return;
1739
1740 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1741 /* Limited discoverable mode */
1742 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1743 cp.iac_lap[0] = 0x00; /* LIAC */
1744 cp.iac_lap[1] = 0x8b;
1745 cp.iac_lap[2] = 0x9e;
1746 cp.iac_lap[3] = 0x33; /* GIAC */
1747 cp.iac_lap[4] = 0x8b;
1748 cp.iac_lap[5] = 0x9e;
1749 } else {
1750 /* General discoverable mode */
1751 cp.num_iac = 1;
1752 cp.iac_lap[0] = 0x33; /* GIAC */
1753 cp.iac_lap[1] = 0x8b;
1754 cp.iac_lap[2] = 0x9e;
1755 }
1756
1757 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1758 (cp.num_iac * 3) + 1, &cp);
1759}
1760
1761static int discoverable_update(struct hci_request *req, unsigned long opt)
1762{
1763 struct hci_dev *hdev = req->hdev;
1764
1765 hci_dev_lock(hdev);
1766
1767 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1768 write_iac(req);
1769 __hci_req_update_scan(req);
1770 __hci_req_update_class(req);
1771 }
1772
1773 /* Advertising instances don't use the global discoverable setting, so
1774 * only update AD if advertising was enabled using Set Advertising.
1775 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001776 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001777 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001778
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001779 /* Discoverable mode affects the local advertising
1780 * address in limited privacy mode.
1781 */
1782 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1783 __hci_req_enable_advertising(req);
1784 }
1785
Johan Hedbergaed1a882015-11-22 17:24:44 +03001786 hci_dev_unlock(hdev);
1787
1788 return 0;
1789}
1790
1791static void discoverable_update_work(struct work_struct *work)
1792{
1793 struct hci_dev *hdev = container_of(work, struct hci_dev,
1794 discoverable_update);
1795 u8 status;
1796
1797 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1798 mgmt_set_discoverable_complete(hdev, status);
1799}
1800
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001801void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1802 u8 reason)
1803{
1804 switch (conn->state) {
1805 case BT_CONNECTED:
1806 case BT_CONFIG:
1807 if (conn->type == AMP_LINK) {
1808 struct hci_cp_disconn_phy_link cp;
1809
1810 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1811 cp.reason = reason;
1812 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1813 &cp);
1814 } else {
1815 struct hci_cp_disconnect dc;
1816
1817 dc.handle = cpu_to_le16(conn->handle);
1818 dc.reason = reason;
1819 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1820 }
1821
1822 conn->state = BT_DISCONN;
1823
1824 break;
1825 case BT_CONNECT:
1826 if (conn->type == LE_LINK) {
1827 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1828 break;
1829 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1830 0, NULL);
1831 } else if (conn->type == ACL_LINK) {
1832 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1833 break;
1834 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1835 6, &conn->dst);
1836 }
1837 break;
1838 case BT_CONNECT2:
1839 if (conn->type == ACL_LINK) {
1840 struct hci_cp_reject_conn_req rej;
1841
1842 bacpy(&rej.bdaddr, &conn->dst);
1843 rej.reason = reason;
1844
1845 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1846 sizeof(rej), &rej);
1847 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1848 struct hci_cp_reject_sync_conn_req rej;
1849
1850 bacpy(&rej.bdaddr, &conn->dst);
1851
1852 /* SCO rejection has its own limited set of
1853 * allowed error values (0x0D-0x0F) which isn't
1854 * compatible with most values passed to this
1855 * function. To be safe hard-code one of the
1856 * values that's suitable for SCO.
1857 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02001858 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001859
1860 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1861 sizeof(rej), &rej);
1862 }
1863 break;
1864 default:
1865 conn->state = BT_CLOSED;
1866 break;
1867 }
1868}
1869
1870static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1871{
1872 if (status)
1873 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1874}
1875
1876int hci_abort_conn(struct hci_conn *conn, u8 reason)
1877{
1878 struct hci_request req;
1879 int err;
1880
1881 hci_req_init(&req, conn->hdev);
1882
1883 __hci_abort_conn(&req, conn, reason);
1884
1885 err = hci_req_run(&req, abort_conn_complete);
1886 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001887 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001888 return err;
1889 }
1890
1891 return 0;
1892}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001893
Johan Hedberga1d01db2015-11-11 08:11:25 +02001894static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001895{
1896 hci_dev_lock(req->hdev);
1897 __hci_update_background_scan(req);
1898 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001899 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001900}
1901
1902static void bg_scan_update(struct work_struct *work)
1903{
1904 struct hci_dev *hdev = container_of(work, struct hci_dev,
1905 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001906 struct hci_conn *conn;
1907 u8 status;
1908 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001909
Johan Hedberg84235d22015-11-11 08:11:20 +02001910 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1911 if (!err)
1912 return;
1913
1914 hci_dev_lock(hdev);
1915
1916 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1917 if (conn)
1918 hci_le_conn_failed(conn, status);
1919
1920 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001921}
1922
Johan Hedberga1d01db2015-11-11 08:11:25 +02001923static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001924{
1925 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001926 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001927}
1928
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001929static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1930{
1931 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02001932 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1933 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001934 struct hci_cp_inquiry cp;
1935
1936 BT_DBG("%s", req->hdev->name);
1937
1938 hci_dev_lock(req->hdev);
1939 hci_inquiry_cache_flush(req->hdev);
1940 hci_dev_unlock(req->hdev);
1941
1942 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02001943
1944 if (req->hdev->discovery.limited)
1945 memcpy(&cp.lap, liac, sizeof(cp.lap));
1946 else
1947 memcpy(&cp.lap, giac, sizeof(cp.lap));
1948
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001949 cp.length = length;
1950
1951 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1952
1953 return 0;
1954}
1955
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001956static void le_scan_disable_work(struct work_struct *work)
1957{
1958 struct hci_dev *hdev = container_of(work, struct hci_dev,
1959 le_scan_disable.work);
1960 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001961
1962 BT_DBG("%s", hdev->name);
1963
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001964 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001965 return;
1966
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001967 cancel_delayed_work(&hdev->le_scan_restart);
1968
1969 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1970 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001971 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
1972 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001973 return;
1974 }
1975
1976 hdev->discovery.scan_start = 0;
1977
1978 /* If we were running LE only scan, change discovery state. If
1979 * we were running both LE and BR/EDR inquiry simultaneously,
1980 * and BR/EDR inquiry is already finished, stop discovery,
1981 * otherwise BR/EDR inquiry will stop discovery when finished.
1982 * If we will resolve remote device name, do not change
1983 * discovery state.
1984 */
1985
1986 if (hdev->discovery.type == DISCOV_TYPE_LE)
1987 goto discov_stopped;
1988
1989 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1990 return;
1991
1992 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1993 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1994 hdev->discovery.state != DISCOVERY_RESOLVING)
1995 goto discov_stopped;
1996
1997 return;
1998 }
1999
2000 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2001 HCI_CMD_TIMEOUT, &status);
2002 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002003 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002004 goto discov_stopped;
2005 }
2006
2007 return;
2008
2009discov_stopped:
2010 hci_dev_lock(hdev);
2011 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2012 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002013}
2014
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002015static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002016{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002017 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002018
2019 /* If controller is not scanning we are done. */
2020 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2021 return 0;
2022
2023 hci_req_add_le_scan_disable(req);
2024
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302025 if (use_ext_scan(hdev)) {
2026 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2027
2028 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2029 ext_enable_cp.enable = LE_SCAN_ENABLE;
2030 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2031
2032 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2033 sizeof(ext_enable_cp), &ext_enable_cp);
2034 } else {
2035 struct hci_cp_le_set_scan_enable cp;
2036
2037 memset(&cp, 0, sizeof(cp));
2038 cp.enable = LE_SCAN_ENABLE;
2039 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2040 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2041 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002042
2043 return 0;
2044}
2045
2046static void le_scan_restart_work(struct work_struct *work)
2047{
2048 struct hci_dev *hdev = container_of(work, struct hci_dev,
2049 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002050 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002051 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002052
2053 BT_DBG("%s", hdev->name);
2054
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002055 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002056 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002057 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2058 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002059 return;
2060 }
2061
2062 hci_dev_lock(hdev);
2063
2064 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2065 !hdev->discovery.scan_start)
2066 goto unlock;
2067
2068 /* When the scan was started, hdev->le_scan_disable has been queued
2069 * after duration from scan_start. During scan restart this job
2070 * has been canceled, and we need to queue it again after proper
2071 * timeout, to make sure that scan does not run indefinitely.
2072 */
2073 duration = hdev->discovery.scan_duration;
2074 scan_start = hdev->discovery.scan_start;
2075 now = jiffies;
2076 if (now - scan_start <= duration) {
2077 int elapsed;
2078
2079 if (now >= scan_start)
2080 elapsed = now - scan_start;
2081 else
2082 elapsed = ULONG_MAX - scan_start + now;
2083
2084 timeout = duration - elapsed;
2085 } else {
2086 timeout = 0;
2087 }
2088
2089 queue_delayed_work(hdev->req_workqueue,
2090 &hdev->le_scan_disable, timeout);
2091
2092unlock:
2093 hci_dev_unlock(hdev);
2094}
2095
Johan Hedberge68f0722015-11-11 08:30:30 +02002096static int active_scan(struct hci_request *req, unsigned long opt)
2097{
2098 uint16_t interval = opt;
2099 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002100 u8 own_addr_type;
2101 int err;
2102
2103 BT_DBG("%s", hdev->name);
2104
2105 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2106 hci_dev_lock(hdev);
2107
2108 /* Don't let discovery abort an outgoing connection attempt
2109 * that's using directed advertising.
2110 */
2111 if (hci_lookup_le_connect(hdev)) {
2112 hci_dev_unlock(hdev);
2113 return -EBUSY;
2114 }
2115
2116 cancel_adv_timeout(hdev);
2117 hci_dev_unlock(hdev);
2118
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302119 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002120 }
2121
2122 /* If controller is scanning, it means the background scanning is
2123 * running. Thus, we should temporarily stop it in order to set the
2124 * discovery scanning parameters.
2125 */
2126 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2127 hci_req_add_le_scan_disable(req);
2128
2129 /* All active scans will be done with either a resolvable private
2130 * address (when privacy feature has been enabled) or non-resolvable
2131 * private address.
2132 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002133 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2134 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002135 if (err < 0)
2136 own_addr_type = ADDR_LE_DEV_PUBLIC;
2137
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302138 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2139 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002140 return 0;
2141}
2142
2143static int interleaved_discov(struct hci_request *req, unsigned long opt)
2144{
2145 int err;
2146
2147 BT_DBG("%s", req->hdev->name);
2148
2149 err = active_scan(req, opt);
2150 if (err)
2151 return err;
2152
Johan Hedberg7df26b52015-11-11 12:24:21 +02002153 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002154}
2155
2156static void start_discovery(struct hci_dev *hdev, u8 *status)
2157{
2158 unsigned long timeout;
2159
2160 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2161
2162 switch (hdev->discovery.type) {
2163 case DISCOV_TYPE_BREDR:
2164 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002165 hci_req_sync(hdev, bredr_inquiry,
2166 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002167 status);
2168 return;
2169 case DISCOV_TYPE_INTERLEAVED:
2170 /* When running simultaneous discovery, the LE scanning time
2171 * should occupy the whole discovery time sine BR/EDR inquiry
2172 * and LE scanning are scheduled by the controller.
2173 *
2174 * For interleaving discovery in comparison, BR/EDR inquiry
2175 * and LE scanning are done sequentially with separate
2176 * timeouts.
2177 */
2178 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2179 &hdev->quirks)) {
2180 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2181 /* During simultaneous discovery, we double LE scan
2182 * interval. We must leave some time for the controller
2183 * to do BR/EDR inquiry.
2184 */
2185 hci_req_sync(hdev, interleaved_discov,
2186 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2187 status);
2188 break;
2189 }
2190
2191 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2192 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2193 HCI_CMD_TIMEOUT, status);
2194 break;
2195 case DISCOV_TYPE_LE:
2196 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2197 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2198 HCI_CMD_TIMEOUT, status);
2199 break;
2200 default:
2201 *status = HCI_ERROR_UNSPECIFIED;
2202 return;
2203 }
2204
2205 if (*status)
2206 return;
2207
2208 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2209
2210 /* When service discovery is used and the controller has a
2211 * strict duplicate filter, it is important to remember the
2212 * start and duration of the scan. This is required for
2213 * restarting scanning during the discovery phase.
2214 */
2215 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2216 hdev->discovery.result_filtering) {
2217 hdev->discovery.scan_start = jiffies;
2218 hdev->discovery.scan_duration = timeout;
2219 }
2220
2221 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2222 timeout);
2223}
2224
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002225bool hci_req_stop_discovery(struct hci_request *req)
2226{
2227 struct hci_dev *hdev = req->hdev;
2228 struct discovery_state *d = &hdev->discovery;
2229 struct hci_cp_remote_name_req_cancel cp;
2230 struct inquiry_entry *e;
2231 bool ret = false;
2232
2233 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2234
2235 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2236 if (test_bit(HCI_INQUIRY, &hdev->flags))
2237 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2238
2239 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2240 cancel_delayed_work(&hdev->le_scan_disable);
2241 hci_req_add_le_scan_disable(req);
2242 }
2243
2244 ret = true;
2245 } else {
2246 /* Passive scanning */
2247 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2248 hci_req_add_le_scan_disable(req);
2249 ret = true;
2250 }
2251 }
2252
2253 /* No further actions needed for LE-only discovery */
2254 if (d->type == DISCOV_TYPE_LE)
2255 return ret;
2256
2257 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2258 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2259 NAME_PENDING);
2260 if (!e)
2261 return ret;
2262
2263 bacpy(&cp.bdaddr, &e->data.bdaddr);
2264 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2265 &cp);
2266 ret = true;
2267 }
2268
2269 return ret;
2270}
2271
2272static int stop_discovery(struct hci_request *req, unsigned long opt)
2273{
2274 hci_dev_lock(req->hdev);
2275 hci_req_stop_discovery(req);
2276 hci_dev_unlock(req->hdev);
2277
2278 return 0;
2279}
2280
Johan Hedberge68f0722015-11-11 08:30:30 +02002281static void discov_update(struct work_struct *work)
2282{
2283 struct hci_dev *hdev = container_of(work, struct hci_dev,
2284 discov_update);
2285 u8 status = 0;
2286
2287 switch (hdev->discovery.state) {
2288 case DISCOVERY_STARTING:
2289 start_discovery(hdev, &status);
2290 mgmt_start_discovery_complete(hdev, status);
2291 if (status)
2292 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2293 else
2294 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2295 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002296 case DISCOVERY_STOPPING:
2297 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2298 mgmt_stop_discovery_complete(hdev, status);
2299 if (!status)
2300 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2301 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002302 case DISCOVERY_STOPPED:
2303 default:
2304 return;
2305 }
2306}
2307
Johan Hedbergc366f552015-11-23 15:43:06 +02002308static void discov_off(struct work_struct *work)
2309{
2310 struct hci_dev *hdev = container_of(work, struct hci_dev,
2311 discov_off.work);
2312
2313 BT_DBG("%s", hdev->name);
2314
2315 hci_dev_lock(hdev);
2316
2317 /* When discoverable timeout triggers, then just make sure
2318 * the limited discoverable flag is cleared. Even in the case
2319 * of a timeout triggered from general discoverable, it is
2320 * safe to unconditionally clear the flag.
2321 */
2322 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2323 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2324 hdev->discov_timeout = 0;
2325
2326 hci_dev_unlock(hdev);
2327
2328 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2329 mgmt_new_settings(hdev);
2330}
2331
Johan Hedberg2ff13892015-11-25 16:15:44 +02002332static int powered_update_hci(struct hci_request *req, unsigned long opt)
2333{
2334 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002335 u8 link_sec;
2336
2337 hci_dev_lock(hdev);
2338
2339 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2340 !lmp_host_ssp_capable(hdev)) {
2341 u8 mode = 0x01;
2342
2343 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2344
2345 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2346 u8 support = 0x01;
2347
2348 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2349 sizeof(support), &support);
2350 }
2351 }
2352
2353 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2354 lmp_bredr_capable(hdev)) {
2355 struct hci_cp_write_le_host_supported cp;
2356
2357 cp.le = 0x01;
2358 cp.simul = 0x00;
2359
2360 /* Check first if we already have the right
2361 * host state (host features set)
2362 */
2363 if (cp.le != lmp_host_le_capable(hdev) ||
2364 cp.simul != lmp_host_le_br_capable(hdev))
2365 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2366 sizeof(cp), &cp);
2367 }
2368
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002369 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002370 /* Make sure the controller has a good default for
2371 * advertising data. This also applies to the case
2372 * where BR/EDR was toggled during the AUTO_OFF phase.
2373 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002374 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2375 list_empty(&hdev->adv_instances)) {
2376 __hci_req_update_adv_data(req, 0x00);
2377 __hci_req_update_scan_rsp_data(req, 0x00);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002378
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002379 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2380 __hci_req_enable_advertising(req);
2381 } else if (!list_empty(&hdev->adv_instances)) {
2382 struct adv_info *adv_instance;
2383
Johan Hedberg2ff13892015-11-25 16:15:44 +02002384 adv_instance = list_first_entry(&hdev->adv_instances,
2385 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002386 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002387 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002388 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002389 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002390 }
2391
2392 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2393 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2394 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2395 sizeof(link_sec), &link_sec);
2396
2397 if (lmp_bredr_capable(hdev)) {
2398 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2399 __hci_req_write_fast_connectable(req, true);
2400 else
2401 __hci_req_write_fast_connectable(req, false);
2402 __hci_req_update_scan(req);
2403 __hci_req_update_class(req);
2404 __hci_req_update_name(req);
2405 __hci_req_update_eir(req);
2406 }
2407
2408 hci_dev_unlock(hdev);
2409 return 0;
2410}
2411
2412int __hci_req_hci_power_on(struct hci_dev *hdev)
2413{
2414 /* Register the available SMP channels (BR/EDR and LE) only when
2415 * successfully powering on the controller. This late
2416 * registration is required so that LE SMP can clearly decide if
2417 * the public address or static address is used.
2418 */
2419 smp_register(hdev);
2420
2421 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2422 NULL);
2423}
2424
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002425void hci_request_setup(struct hci_dev *hdev)
2426{
Johan Hedberge68f0722015-11-11 08:30:30 +02002427 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002428 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002429 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002430 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002431 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002432 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002433 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2434 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002435 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002436}
2437
2438void hci_request_cancel_all(struct hci_dev *hdev)
2439{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002440 hci_req_sync_cancel(hdev, ENODEV);
2441
Johan Hedberge68f0722015-11-11 08:30:30 +02002442 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002443 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002444 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002445 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002446 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002447 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002448 cancel_delayed_work_sync(&hdev->le_scan_disable);
2449 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002450
2451 if (hdev->adv_instance_timeout) {
2452 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2453 hdev->adv_instance_timeout = 0;
2454 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002455}