blob: 4a89e121d6627bc50f691a0676711e686b3771bc [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
24#include <net/bluetooth/bluetooth.h>
25#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020026#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020027
28#include "smp.h"
29#include "hci_request.h"
30
Johan Hedbergbe91cd02015-11-10 09:44:54 +020031#define HCI_REQ_DONE 0
32#define HCI_REQ_PEND 1
33#define HCI_REQ_CANCELED 2
34
Johan Hedberg0857dd32014-12-19 13:40:20 +020035void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
36{
37 skb_queue_head_init(&req->cmd_q);
38 req->hdev = hdev;
39 req->err = 0;
40}
41
Johan Hedberge62144872015-04-02 13:41:08 +030042static int req_run(struct hci_request *req, hci_req_complete_t complete,
43 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020044{
45 struct hci_dev *hdev = req->hdev;
46 struct sk_buff *skb;
47 unsigned long flags;
48
49 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
50
51 /* If an error occurred during request building, remove all HCI
52 * commands queued on the HCI request queue.
53 */
54 if (req->err) {
55 skb_queue_purge(&req->cmd_q);
56 return req->err;
57 }
58
59 /* Do not allow empty requests */
60 if (skb_queue_empty(&req->cmd_q))
61 return -ENODATA;
62
63 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020064 if (complete) {
65 bt_cb(skb)->hci.req_complete = complete;
66 } else if (complete_skb) {
67 bt_cb(skb)->hci.req_complete_skb = complete_skb;
68 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
69 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020070
71 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
72 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
73 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
74
75 queue_work(hdev->workqueue, &hdev->cmd_work);
76
77 return 0;
78}
79
Johan Hedberge62144872015-04-02 13:41:08 +030080int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
81{
82 return req_run(req, complete, NULL);
83}
84
85int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
86{
87 return req_run(req, NULL, complete);
88}
89
Johan Hedbergbe91cd02015-11-10 09:44:54 +020090static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
91 struct sk_buff *skb)
92{
93 BT_DBG("%s result 0x%2.2x", hdev->name, result);
94
95 if (hdev->req_status == HCI_REQ_PEND) {
96 hdev->req_result = result;
97 hdev->req_status = HCI_REQ_DONE;
98 if (skb)
99 hdev->req_skb = skb_get(skb);
100 wake_up_interruptible(&hdev->req_wait_q);
101 }
102}
103
Johan Hedbergb5044302015-11-10 09:44:55 +0200104void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200105{
106 BT_DBG("%s err 0x%2.2x", hdev->name, err);
107
108 if (hdev->req_status == HCI_REQ_PEND) {
109 hdev->req_result = err;
110 hdev->req_status = HCI_REQ_CANCELED;
111 wake_up_interruptible(&hdev->req_wait_q);
112 }
113}
114
115struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
116 const void *param, u8 event, u32 timeout)
117{
118 DECLARE_WAITQUEUE(wait, current);
119 struct hci_request req;
120 struct sk_buff *skb;
121 int err = 0;
122
123 BT_DBG("%s", hdev->name);
124
125 hci_req_init(&req, hdev);
126
127 hci_req_add_ev(&req, opcode, plen, param, event);
128
129 hdev->req_status = HCI_REQ_PEND;
130
131 add_wait_queue(&hdev->req_wait_q, &wait);
132 set_current_state(TASK_INTERRUPTIBLE);
133
134 err = hci_req_run_skb(&req, hci_req_sync_complete);
135 if (err < 0) {
136 remove_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_RUNNING);
138 return ERR_PTR(err);
139 }
140
141 schedule_timeout(timeout);
142
143 remove_wait_queue(&hdev->req_wait_q, &wait);
144
145 if (signal_pending(current))
146 return ERR_PTR(-EINTR);
147
148 switch (hdev->req_status) {
149 case HCI_REQ_DONE:
150 err = -bt_to_errno(hdev->req_result);
151 break;
152
153 case HCI_REQ_CANCELED:
154 err = -hdev->req_result;
155 break;
156
157 default:
158 err = -ETIMEDOUT;
159 break;
160 }
161
162 hdev->req_status = hdev->req_result = 0;
163 skb = hdev->req_skb;
164 hdev->req_skb = NULL;
165
166 BT_DBG("%s end: err %d", hdev->name, err);
167
168 if (err < 0) {
169 kfree_skb(skb);
170 return ERR_PTR(err);
171 }
172
173 if (!skb)
174 return ERR_PTR(-ENODATA);
175
176 return skb;
177}
178EXPORT_SYMBOL(__hci_cmd_sync_ev);
179
180struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
181 const void *param, u32 timeout)
182{
183 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
184}
185EXPORT_SYMBOL(__hci_cmd_sync);
186
187/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200188int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
189 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200190 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200191{
192 struct hci_request req;
193 DECLARE_WAITQUEUE(wait, current);
194 int err = 0;
195
196 BT_DBG("%s start", hdev->name);
197
198 hci_req_init(&req, hdev);
199
200 hdev->req_status = HCI_REQ_PEND;
201
Johan Hedberga1d01db2015-11-11 08:11:25 +0200202 err = func(&req, opt);
203 if (err) {
204 if (hci_status)
205 *hci_status = HCI_ERROR_UNSPECIFIED;
206 return err;
207 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200208
209 add_wait_queue(&hdev->req_wait_q, &wait);
210 set_current_state(TASK_INTERRUPTIBLE);
211
212 err = hci_req_run_skb(&req, hci_req_sync_complete);
213 if (err < 0) {
214 hdev->req_status = 0;
215
216 remove_wait_queue(&hdev->req_wait_q, &wait);
217 set_current_state(TASK_RUNNING);
218
219 /* ENODATA means the HCI request command queue is empty.
220 * This can happen when a request with conditionals doesn't
221 * trigger any commands to be sent. This is normal behavior
222 * and should not trigger an error return.
223 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200224 if (err == -ENODATA) {
225 if (hci_status)
226 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200227 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200228 }
229
230 if (hci_status)
231 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200232
233 return err;
234 }
235
236 schedule_timeout(timeout);
237
238 remove_wait_queue(&hdev->req_wait_q, &wait);
239
240 if (signal_pending(current))
241 return -EINTR;
242
243 switch (hdev->req_status) {
244 case HCI_REQ_DONE:
245 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200246 if (hci_status)
247 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200248 break;
249
250 case HCI_REQ_CANCELED:
251 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200252 if (hci_status)
253 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200254 break;
255
256 default:
257 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200258 if (hci_status)
259 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200260 break;
261 }
262
Frederic Dalleau9afee942016-08-23 07:59:19 +0200263 kfree_skb(hdev->req_skb);
264 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
Johan Hedberga1d01db2015-11-11 08:11:25 +0200272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200282 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200284 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200285
286 return ret;
287}
288
Johan Hedberg0857dd32014-12-19 13:40:20 +0200289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200340
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100341 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
Johan Hedberg196a5e92015-11-22 18:55:44 +0200387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
Johan Hedberg00cf5042015-11-25 16:15:41 +0200460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
Johan Hedbergb1a89172015-11-25 16:15:42 +0200470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
Johan Hedberg0857dd32014-12-19 13:40:20 +0200657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500691 /* If the device is neither in pend_le_conns nor
692 * pend_le_reports then remove it from the whitelist.
693 */
694 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
695 &b->bdaddr, b->bdaddr_type) &&
696 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
697 &b->bdaddr, b->bdaddr_type)) {
698 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200699
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500700 cp.bdaddr_type = b->bdaddr_type;
701 bacpy(&cp.bdaddr, &b->bdaddr);
702
703 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
704 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200705 continue;
706 }
707
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500708 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
709 /* White list can not be used with RPAs */
710 return 0x00;
711 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200712
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500713 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200714 }
715
716 /* Since all no longer valid white list entries have been
717 * removed, walk through the list of pending connections
718 * and ensure that any new device gets programmed into
719 * the controller.
720 *
721 * If the list of the devices is larger than the list of
722 * available white list entries in the controller, then
723 * just abort and return filer policy value to not use the
724 * white list.
725 */
726 list_for_each_entry(params, &hdev->pend_le_conns, action) {
727 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
728 &params->addr, params->addr_type))
729 continue;
730
731 if (white_list_entries >= hdev->le_white_list_size) {
732 /* Select filter policy to accept all advertising */
733 return 0x00;
734 }
735
736 if (hci_find_irk_by_addr(hdev, &params->addr,
737 params->addr_type)) {
738 /* White list can not be used with RPAs */
739 return 0x00;
740 }
741
742 white_list_entries++;
743 add_to_white_list(req, params);
744 }
745
746 /* After adding all new pending connections, walk through
747 * the list of pending reports and also add these to the
748 * white list if there is still space.
749 */
750 list_for_each_entry(params, &hdev->pend_le_reports, action) {
751 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
752 &params->addr, params->addr_type))
753 continue;
754
755 if (white_list_entries >= hdev->le_white_list_size) {
756 /* Select filter policy to accept all advertising */
757 return 0x00;
758 }
759
760 if (hci_find_irk_by_addr(hdev, &params->addr,
761 params->addr_type)) {
762 /* White list can not be used with RPAs */
763 return 0x00;
764 }
765
766 white_list_entries++;
767 add_to_white_list(req, params);
768 }
769
770 /* Select filter policy to use white list */
771 return 0x01;
772}
773
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200774static bool scan_use_rpa(struct hci_dev *hdev)
775{
776 return hci_dev_test_flag(hdev, HCI_PRIVACY);
777}
778
Johan Hedberg0857dd32014-12-19 13:40:20 +0200779void hci_req_add_le_passive_scan(struct hci_request *req)
780{
781 struct hci_cp_le_set_scan_param param_cp;
782 struct hci_cp_le_set_scan_enable enable_cp;
783 struct hci_dev *hdev = req->hdev;
784 u8 own_addr_type;
785 u8 filter_policy;
786
787 /* Set require_privacy to false since no SCAN_REQ are send
788 * during passive scanning. Not using an non-resolvable address
789 * here is important so that peer devices using direct
790 * advertising with our address will be correctly reported
791 * by the controller.
792 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200793 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
794 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200795 return;
796
797 /* Adding or removing entries from the white list must
798 * happen before enabling scanning. The controller does
799 * not allow white list modification while scanning.
800 */
801 filter_policy = update_white_list(req);
802
803 /* When the controller is using random resolvable addresses and
804 * with that having LE privacy enabled, then controllers with
805 * Extended Scanner Filter Policies support can now enable support
806 * for handling directed advertising.
807 *
808 * So instead of using filter polices 0x00 (no whitelist)
809 * and 0x01 (whitelist enabled) use the new filter policies
810 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
811 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700812 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200813 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
814 filter_policy |= 0x02;
815
816 memset(&param_cp, 0, sizeof(param_cp));
817 param_cp.type = LE_SCAN_PASSIVE;
818 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
819 param_cp.window = cpu_to_le16(hdev->le_scan_window);
820 param_cp.own_address_type = own_addr_type;
821 param_cp.filter_policy = filter_policy;
822 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
823 &param_cp);
824
825 memset(&enable_cp, 0, sizeof(enable_cp));
826 enable_cp.enable = LE_SCAN_ENABLE;
827 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
828 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
829 &enable_cp);
830}
831
Johan Hedbergf2252572015-11-18 12:49:20 +0200832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200834 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200835 struct adv_info *adv_instance;
836
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
847 */
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
866 */
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
871 */
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200875 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
876 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
877 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200878 flags |= MGMT_ADV_FLAG_DISCOV;
879
Johan Hedbergf2252572015-11-18 12:49:20 +0200880 return flags;
881 }
882
883 adv_instance = hci_find_adv_instance(hdev, instance);
884
885 /* Return 0 when we got an invalid instance identifier. */
886 if (!adv_instance)
887 return 0;
888
889 return adv_instance->flags;
890}
891
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200892static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
893{
894 /* If privacy is not enabled don't use RPA */
895 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
896 return false;
897
898 /* If basic privacy mode is enabled use RPA */
899 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
900 return true;
901
902 /* If limited privacy mode is enabled don't use RPA if we're
903 * both discoverable and bondable.
904 */
905 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
906 hci_dev_test_flag(hdev, HCI_BONDABLE))
907 return false;
908
909 /* We're neither bondable nor discoverable in the limited
910 * privacy mode, therefore use RPA.
911 */
912 return true;
913}
914
Johan Hedbergf2252572015-11-18 12:49:20 +0200915void __hci_req_enable_advertising(struct hci_request *req)
916{
917 struct hci_dev *hdev = req->hdev;
918 struct hci_cp_le_set_adv_param cp;
919 u8 own_addr_type, enable = 0x01;
920 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +0200921 u32 flags;
922
923 if (hci_conn_num(hdev, LE_LINK) > 0)
924 return;
925
926 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
927 __hci_req_disable_advertising(req);
928
929 /* Clear the HCI_LE_ADV bit temporarily so that the
930 * hci_update_random_address knows that it's safe to go ahead
931 * and write a new random address. The flag will be set back on
932 * as soon as the SET_ADV_ENABLE HCI command completes.
933 */
934 hci_dev_clear_flag(hdev, HCI_LE_ADV);
935
Johan Hedbergcab054a2015-11-30 11:21:45 +0200936 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +0200937
938 /* If the "connectable" instance flag was not set, then choose between
939 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
940 */
941 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
942 mgmt_get_connectable(hdev);
943
944 /* Set require_privacy to true only when non-connectable
945 * advertising is used. In that case it is fine to use a
946 * non-resolvable private address.
947 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200948 if (hci_update_random_address(req, !connectable,
949 adv_use_rpa(hdev, flags),
950 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +0200951 return;
952
953 memset(&cp, 0, sizeof(cp));
954 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
955 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
956
957 if (connectable)
958 cp.type = LE_ADV_IND;
959 else if (get_cur_adv_instance_scan_rsp_len(hdev))
960 cp.type = LE_ADV_SCAN_IND;
961 else
962 cp.type = LE_ADV_NONCONN_IND;
963
964 cp.own_address_type = own_addr_type;
965 cp.channel_map = hdev->le_adv_channel_map;
966
967 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
968
969 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
970}
971
Michał Narajowskif61851f2016-10-19 10:20:27 +0200972u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +0200973{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +0200974 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +0200975 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +0200976
Michał Narajowskif61851f2016-10-19 10:20:27 +0200977 /* no space left for name (+ NULL + type + len) */
978 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
979 return ad_len;
980
981 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +0200982 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +0200983 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +0200984 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +0200985 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +0200986
Michał Narajowskif61851f2016-10-19 10:20:27 +0200987 /* use short name if present */
988 short_len = strlen(hdev->short_name);
989 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +0200990 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +0200991 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +0200992
Michał Narajowskif61851f2016-10-19 10:20:27 +0200993 /* use shortened full name if present, we already know that name
994 * is longer then HCI_MAX_SHORT_NAME_LENGTH
995 */
996 if (complete_len) {
997 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
998
999 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1000 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1001
1002 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1003 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001004 }
1005
1006 return ad_len;
1007}
1008
Michał Narajowski1b422062016-10-05 12:28:27 +02001009static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1010{
1011 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1012}
1013
Michał Narajowski7c295c42016-09-18 12:50:02 +02001014static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1015{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001016 u8 scan_rsp_len = 0;
1017
1018 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001019 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001020 }
1021
Michał Narajowski1b422062016-10-05 12:28:27 +02001022 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001023}
1024
Johan Hedbergf2252572015-11-18 12:49:20 +02001025static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1026 u8 *ptr)
1027{
1028 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001029 u32 instance_flags;
1030 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001031
1032 adv_instance = hci_find_adv_instance(hdev, instance);
1033 if (!adv_instance)
1034 return 0;
1035
Michał Narajowski7c295c42016-09-18 12:50:02 +02001036 instance_flags = adv_instance->flags;
1037
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001038 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001039 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001040 }
1041
Michał Narajowski1b422062016-10-05 12:28:27 +02001042 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001043 adv_instance->scan_rsp_len);
1044
Michał Narajowski7c295c42016-09-18 12:50:02 +02001045 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001046
1047 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1048 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1049
1050 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001051}
1052
Johan Hedbergcab054a2015-11-30 11:21:45 +02001053void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001054{
1055 struct hci_dev *hdev = req->hdev;
1056 struct hci_cp_le_set_scan_rsp_data cp;
1057 u8 len;
1058
1059 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1060 return;
1061
1062 memset(&cp, 0, sizeof(cp));
1063
1064 if (instance)
1065 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1066 else
1067 len = create_default_scan_rsp_data(hdev, cp.data);
1068
1069 if (hdev->scan_rsp_data_len == len &&
1070 !memcmp(cp.data, hdev->scan_rsp_data, len))
1071 return;
1072
1073 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1074 hdev->scan_rsp_data_len = len;
1075
1076 cp.length = len;
1077
1078 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1079}
1080
Johan Hedbergf2252572015-11-18 12:49:20 +02001081static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1082{
1083 struct adv_info *adv_instance = NULL;
1084 u8 ad_len = 0, flags = 0;
1085 u32 instance_flags;
1086
1087 /* Return 0 when the current instance identifier is invalid. */
1088 if (instance) {
1089 adv_instance = hci_find_adv_instance(hdev, instance);
1090 if (!adv_instance)
1091 return 0;
1092 }
1093
1094 instance_flags = get_adv_instance_flags(hdev, instance);
1095
Luiz Augusto von Dentz587af612019-11-03 23:58:15 +02001096 /* If instance already has the flags set skip adding it once
1097 * again.
1098 */
1099 if (adv_instance && eir_get_data(adv_instance->adv_data,
1100 adv_instance->adv_data_len, EIR_FLAGS,
1101 NULL))
1102 goto skip_flags;
1103
Johan Hedbergf2252572015-11-18 12:49:20 +02001104 /* The Add Advertising command allows userspace to set both the general
1105 * and limited discoverable flags.
1106 */
1107 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1108 flags |= LE_AD_GENERAL;
1109
1110 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1111 flags |= LE_AD_LIMITED;
1112
Johan Hedbergf18ba582016-04-06 13:09:05 +03001113 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1114 flags |= LE_AD_NO_BREDR;
1115
Johan Hedbergf2252572015-11-18 12:49:20 +02001116 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1117 /* If a discovery flag wasn't provided, simply use the global
1118 * settings.
1119 */
1120 if (!flags)
1121 flags |= mgmt_get_adv_discov_flags(hdev);
1122
Johan Hedbergf2252572015-11-18 12:49:20 +02001123 /* If flags would still be empty, then there is no need to
1124 * include the "Flags" AD field".
1125 */
1126 if (flags) {
1127 ptr[0] = 0x02;
1128 ptr[1] = EIR_FLAGS;
1129 ptr[2] = flags;
1130
1131 ad_len += 3;
1132 ptr += 3;
1133 }
1134 }
1135
Luiz Augusto von Dentz587af612019-11-03 23:58:15 +02001136skip_flags:
Johan Hedbergf2252572015-11-18 12:49:20 +02001137 if (adv_instance) {
1138 memcpy(ptr, adv_instance->adv_data,
1139 adv_instance->adv_data_len);
1140 ad_len += adv_instance->adv_data_len;
1141 ptr += adv_instance->adv_data_len;
1142 }
1143
1144 /* Provide Tx Power only if we can provide a valid value for it */
1145 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1146 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1147 ptr[0] = 0x02;
1148 ptr[1] = EIR_TX_POWER;
1149 ptr[2] = (u8)hdev->adv_tx_power;
1150
1151 ad_len += 3;
1152 ptr += 3;
1153 }
1154
1155 return ad_len;
1156}
1157
Johan Hedbergcab054a2015-11-30 11:21:45 +02001158void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001159{
1160 struct hci_dev *hdev = req->hdev;
1161 struct hci_cp_le_set_adv_data cp;
1162 u8 len;
1163
1164 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1165 return;
1166
1167 memset(&cp, 0, sizeof(cp));
1168
1169 len = create_instance_adv_data(hdev, instance, cp.data);
1170
1171 /* There's nothing to do if the data hasn't changed */
1172 if (hdev->adv_data_len == len &&
1173 memcmp(cp.data, hdev->adv_data, len) == 0)
1174 return;
1175
1176 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1177 hdev->adv_data_len = len;
1178
1179 cp.length = len;
1180
1181 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1182}
1183
Johan Hedbergcab054a2015-11-30 11:21:45 +02001184int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001185{
1186 struct hci_request req;
1187
1188 hci_req_init(&req, hdev);
1189 __hci_req_update_adv_data(&req, instance);
1190
1191 return hci_req_run(&req, NULL);
1192}
1193
1194static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1195{
1196 BT_DBG("%s status %u", hdev->name, status);
1197}
1198
1199void hci_req_reenable_advertising(struct hci_dev *hdev)
1200{
1201 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001202
1203 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001204 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001205 return;
1206
Johan Hedbergf2252572015-11-18 12:49:20 +02001207 hci_req_init(&req, hdev);
1208
Johan Hedbergcab054a2015-11-30 11:21:45 +02001209 if (hdev->cur_adv_instance) {
1210 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1211 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001212 } else {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001213 __hci_req_update_adv_data(&req, 0x00);
1214 __hci_req_update_scan_rsp_data(&req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001215 __hci_req_enable_advertising(&req);
1216 }
1217
1218 hci_req_run(&req, adv_enable_complete);
1219}
1220
1221static void adv_timeout_expire(struct work_struct *work)
1222{
1223 struct hci_dev *hdev = container_of(work, struct hci_dev,
1224 adv_instance_expire.work);
1225
1226 struct hci_request req;
1227 u8 instance;
1228
1229 BT_DBG("%s", hdev->name);
1230
1231 hci_dev_lock(hdev);
1232
1233 hdev->adv_instance_timeout = 0;
1234
Johan Hedbergcab054a2015-11-30 11:21:45 +02001235 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001236 if (instance == 0x00)
1237 goto unlock;
1238
1239 hci_req_init(&req, hdev);
1240
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001241 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001242
1243 if (list_empty(&hdev->adv_instances))
1244 __hci_req_disable_advertising(&req);
1245
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001246 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001247
1248unlock:
1249 hci_dev_unlock(hdev);
1250}
1251
1252int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1253 bool force)
1254{
1255 struct hci_dev *hdev = req->hdev;
1256 struct adv_info *adv_instance = NULL;
1257 u16 timeout;
1258
1259 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001260 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001261 return -EPERM;
1262
1263 if (hdev->adv_instance_timeout)
1264 return -EBUSY;
1265
1266 adv_instance = hci_find_adv_instance(hdev, instance);
1267 if (!adv_instance)
1268 return -ENOENT;
1269
1270 /* A zero timeout means unlimited advertising. As long as there is
1271 * only one instance, duration should be ignored. We still set a timeout
1272 * in case further instances are being added later on.
1273 *
1274 * If the remaining lifetime of the instance is more than the duration
1275 * then the timeout corresponds to the duration, otherwise it will be
1276 * reduced to the remaining instance lifetime.
1277 */
1278 if (adv_instance->timeout == 0 ||
1279 adv_instance->duration <= adv_instance->remaining_time)
1280 timeout = adv_instance->duration;
1281 else
1282 timeout = adv_instance->remaining_time;
1283
1284 /* The remaining time is being reduced unless the instance is being
1285 * advertised without time limit.
1286 */
1287 if (adv_instance->timeout)
1288 adv_instance->remaining_time =
1289 adv_instance->remaining_time - timeout;
1290
1291 hdev->adv_instance_timeout = timeout;
1292 queue_delayed_work(hdev->req_workqueue,
1293 &hdev->adv_instance_expire,
1294 msecs_to_jiffies(timeout * 1000));
1295
1296 /* If we're just re-scheduling the same instance again then do not
1297 * execute any HCI commands. This happens when a single instance is
1298 * being advertised.
1299 */
1300 if (!force && hdev->cur_adv_instance == instance &&
1301 hci_dev_test_flag(hdev, HCI_LE_ADV))
1302 return 0;
1303
1304 hdev->cur_adv_instance = instance;
Johan Hedbergcab054a2015-11-30 11:21:45 +02001305 __hci_req_update_adv_data(req, instance);
1306 __hci_req_update_scan_rsp_data(req, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001307 __hci_req_enable_advertising(req);
1308
1309 return 0;
1310}
1311
1312static void cancel_adv_timeout(struct hci_dev *hdev)
1313{
1314 if (hdev->adv_instance_timeout) {
1315 hdev->adv_instance_timeout = 0;
1316 cancel_delayed_work(&hdev->adv_instance_expire);
1317 }
1318}
1319
1320/* For a single instance:
1321 * - force == true: The instance will be removed even when its remaining
1322 * lifetime is not zero.
1323 * - force == false: the instance will be deactivated but kept stored unless
1324 * the remaining lifetime is zero.
1325 *
1326 * For instance == 0x00:
1327 * - force == true: All instances will be removed regardless of their timeout
1328 * setting.
1329 * - force == false: Only instances that have a timeout will be removed.
1330 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001331void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1332 struct hci_request *req, u8 instance,
1333 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001334{
1335 struct adv_info *adv_instance, *n, *next_instance = NULL;
1336 int err;
1337 u8 rem_inst;
1338
1339 /* Cancel any timeout concerning the removed instance(s). */
1340 if (!instance || hdev->cur_adv_instance == instance)
1341 cancel_adv_timeout(hdev);
1342
1343 /* Get the next instance to advertise BEFORE we remove
1344 * the current one. This can be the same instance again
1345 * if there is only one instance.
1346 */
1347 if (instance && hdev->cur_adv_instance == instance)
1348 next_instance = hci_get_next_instance(hdev, instance);
1349
1350 if (instance == 0x00) {
1351 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1352 list) {
1353 if (!(force || adv_instance->timeout))
1354 continue;
1355
1356 rem_inst = adv_instance->instance;
1357 err = hci_remove_adv_instance(hdev, rem_inst);
1358 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001359 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001360 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001361 } else {
1362 adv_instance = hci_find_adv_instance(hdev, instance);
1363
1364 if (force || (adv_instance && adv_instance->timeout &&
1365 !adv_instance->remaining_time)) {
1366 /* Don't advertise a removed instance. */
1367 if (next_instance &&
1368 next_instance->instance == instance)
1369 next_instance = NULL;
1370
1371 err = hci_remove_adv_instance(hdev, instance);
1372 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001373 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001374 }
1375 }
1376
Johan Hedbergf2252572015-11-18 12:49:20 +02001377 if (!req || !hdev_is_powered(hdev) ||
1378 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1379 return;
1380
1381 if (next_instance)
1382 __hci_req_schedule_adv_instance(req, next_instance->instance,
1383 false);
1384}
1385
Johan Hedberg0857dd32014-12-19 13:40:20 +02001386static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1387{
1388 struct hci_dev *hdev = req->hdev;
1389
1390 /* If we're advertising or initiating an LE connection we can't
1391 * go ahead and change the random address at this time. This is
1392 * because the eventual initiator address used for the
1393 * subsequently created connection will be undefined (some
1394 * controllers use the new address and others the one we had
1395 * when the operation started).
1396 *
1397 * In this kind of scenario skip the update and let the random
1398 * address be updated at the next cycle.
1399 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001400 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001401 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001402 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001403 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001404 return;
1405 }
1406
1407 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1408}
1409
1410int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001411 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001412{
1413 struct hci_dev *hdev = req->hdev;
1414 int err;
1415
1416 /* If privacy is enabled use a resolvable private address. If
1417 * current RPA has expired or there is something else than
1418 * the current RPA in use, then generate a new one.
1419 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001420 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001421 int to;
1422
1423 *own_addr_type = ADDR_LE_DEV_RANDOM;
1424
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001425 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001426 !bacmp(&hdev->random_addr, &hdev->rpa))
1427 return 0;
1428
1429 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1430 if (err < 0) {
1431 BT_ERR("%s failed to generate new RPA", hdev->name);
1432 return err;
1433 }
1434
1435 set_random_addr(req, &hdev->rpa);
1436
1437 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1438 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1439
1440 return 0;
1441 }
1442
1443 /* In case of required privacy without resolvable private address,
1444 * use an non-resolvable private address. This is useful for active
1445 * scanning and non-connectable advertising.
1446 */
1447 if (require_privacy) {
1448 bdaddr_t nrpa;
1449
1450 while (true) {
1451 /* The non-resolvable private address is generated
1452 * from random six bytes with the two most significant
1453 * bits cleared.
1454 */
1455 get_random_bytes(&nrpa, 6);
1456 nrpa.b[5] &= 0x3f;
1457
1458 /* The non-resolvable private address shall not be
1459 * equal to the public address.
1460 */
1461 if (bacmp(&hdev->bdaddr, &nrpa))
1462 break;
1463 }
1464
1465 *own_addr_type = ADDR_LE_DEV_RANDOM;
1466 set_random_addr(req, &nrpa);
1467 return 0;
1468 }
1469
1470 /* If forcing static address is in use or there is no public
1471 * address use the static address as random address (but skip
1472 * the HCI command if the current random address is already the
1473 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001474 *
1475 * In case BR/EDR has been disabled on a dual-mode controller
1476 * and a static address has been configured, then use that
1477 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001478 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001479 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001480 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001481 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001482 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001483 *own_addr_type = ADDR_LE_DEV_RANDOM;
1484 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1485 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1486 &hdev->static_addr);
1487 return 0;
1488 }
1489
1490 /* Neither privacy nor static address is being used so use a
1491 * public address.
1492 */
1493 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1494
1495 return 0;
1496}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001497
Johan Hedberg405a2612014-12-19 23:18:22 +02001498static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1499{
1500 struct bdaddr_list *b;
1501
1502 list_for_each_entry(b, &hdev->whitelist, list) {
1503 struct hci_conn *conn;
1504
1505 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1506 if (!conn)
1507 return true;
1508
1509 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1510 return true;
1511 }
1512
1513 return false;
1514}
1515
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001516void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001517{
1518 struct hci_dev *hdev = req->hdev;
1519 u8 scan;
1520
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001521 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001522 return;
1523
1524 if (!hdev_is_powered(hdev))
1525 return;
1526
1527 if (mgmt_powering_down(hdev))
1528 return;
1529
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001530 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001531 disconnected_whitelist_entries(hdev))
1532 scan = SCAN_PAGE;
1533 else
1534 scan = SCAN_DISABLED;
1535
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001536 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001537 scan |= SCAN_INQUIRY;
1538
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001539 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1540 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1541 return;
1542
Johan Hedberg405a2612014-12-19 23:18:22 +02001543 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1544}
1545
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001546static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001547{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001548 hci_dev_lock(req->hdev);
1549 __hci_req_update_scan(req);
1550 hci_dev_unlock(req->hdev);
1551 return 0;
1552}
Johan Hedberg405a2612014-12-19 23:18:22 +02001553
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001554static void scan_update_work(struct work_struct *work)
1555{
1556 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1557
1558 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001559}
1560
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001561static int connectable_update(struct hci_request *req, unsigned long opt)
1562{
1563 struct hci_dev *hdev = req->hdev;
1564
1565 hci_dev_lock(hdev);
1566
1567 __hci_req_update_scan(req);
1568
1569 /* If BR/EDR is not enabled and we disable advertising as a
1570 * by-product of disabling connectable, we need to update the
1571 * advertising flags.
1572 */
1573 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001574 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001575
1576 /* Update the advertising parameters if necessary */
1577 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001578 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001579 __hci_req_enable_advertising(req);
1580
1581 __hci_update_background_scan(req);
1582
1583 hci_dev_unlock(hdev);
1584
1585 return 0;
1586}
1587
1588static void connectable_update_work(struct work_struct *work)
1589{
1590 struct hci_dev *hdev = container_of(work, struct hci_dev,
1591 connectable_update);
1592 u8 status;
1593
1594 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1595 mgmt_set_connectable_complete(hdev, status);
1596}
1597
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001598static u8 get_service_classes(struct hci_dev *hdev)
1599{
1600 struct bt_uuid *uuid;
1601 u8 val = 0;
1602
1603 list_for_each_entry(uuid, &hdev->uuids, list)
1604 val |= uuid->svc_hint;
1605
1606 return val;
1607}
1608
1609void __hci_req_update_class(struct hci_request *req)
1610{
1611 struct hci_dev *hdev = req->hdev;
1612 u8 cod[3];
1613
1614 BT_DBG("%s", hdev->name);
1615
1616 if (!hdev_is_powered(hdev))
1617 return;
1618
1619 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1620 return;
1621
1622 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1623 return;
1624
1625 cod[0] = hdev->minor_class;
1626 cod[1] = hdev->major_class;
1627 cod[2] = get_service_classes(hdev);
1628
1629 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1630 cod[1] |= 0x20;
1631
1632 if (memcmp(cod, hdev->dev_class, 3) == 0)
1633 return;
1634
1635 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1636}
1637
Johan Hedbergaed1a882015-11-22 17:24:44 +03001638static void write_iac(struct hci_request *req)
1639{
1640 struct hci_dev *hdev = req->hdev;
1641 struct hci_cp_write_current_iac_lap cp;
1642
1643 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1644 return;
1645
1646 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1647 /* Limited discoverable mode */
1648 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1649 cp.iac_lap[0] = 0x00; /* LIAC */
1650 cp.iac_lap[1] = 0x8b;
1651 cp.iac_lap[2] = 0x9e;
1652 cp.iac_lap[3] = 0x33; /* GIAC */
1653 cp.iac_lap[4] = 0x8b;
1654 cp.iac_lap[5] = 0x9e;
1655 } else {
1656 /* General discoverable mode */
1657 cp.num_iac = 1;
1658 cp.iac_lap[0] = 0x33; /* GIAC */
1659 cp.iac_lap[1] = 0x8b;
1660 cp.iac_lap[2] = 0x9e;
1661 }
1662
1663 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1664 (cp.num_iac * 3) + 1, &cp);
1665}
1666
1667static int discoverable_update(struct hci_request *req, unsigned long opt)
1668{
1669 struct hci_dev *hdev = req->hdev;
1670
1671 hci_dev_lock(hdev);
1672
1673 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1674 write_iac(req);
1675 __hci_req_update_scan(req);
1676 __hci_req_update_class(req);
1677 }
1678
1679 /* Advertising instances don't use the global discoverable setting, so
1680 * only update AD if advertising was enabled using Set Advertising.
1681 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001682 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001683 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001684
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001685 /* Discoverable mode affects the local advertising
1686 * address in limited privacy mode.
1687 */
1688 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
1689 __hci_req_enable_advertising(req);
1690 }
1691
Johan Hedbergaed1a882015-11-22 17:24:44 +03001692 hci_dev_unlock(hdev);
1693
1694 return 0;
1695}
1696
1697static void discoverable_update_work(struct work_struct *work)
1698{
1699 struct hci_dev *hdev = container_of(work, struct hci_dev,
1700 discoverable_update);
1701 u8 status;
1702
1703 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1704 mgmt_set_discoverable_complete(hdev, status);
1705}
1706
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001707void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1708 u8 reason)
1709{
1710 switch (conn->state) {
1711 case BT_CONNECTED:
1712 case BT_CONFIG:
1713 if (conn->type == AMP_LINK) {
1714 struct hci_cp_disconn_phy_link cp;
1715
1716 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1717 cp.reason = reason;
1718 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1719 &cp);
1720 } else {
1721 struct hci_cp_disconnect dc;
1722
1723 dc.handle = cpu_to_le16(conn->handle);
1724 dc.reason = reason;
1725 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1726 }
1727
1728 conn->state = BT_DISCONN;
1729
1730 break;
1731 case BT_CONNECT:
1732 if (conn->type == LE_LINK) {
1733 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1734 break;
1735 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1736 0, NULL);
1737 } else if (conn->type == ACL_LINK) {
1738 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1739 break;
1740 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1741 6, &conn->dst);
1742 }
1743 break;
1744 case BT_CONNECT2:
1745 if (conn->type == ACL_LINK) {
1746 struct hci_cp_reject_conn_req rej;
1747
1748 bacpy(&rej.bdaddr, &conn->dst);
1749 rej.reason = reason;
1750
1751 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1752 sizeof(rej), &rej);
1753 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1754 struct hci_cp_reject_sync_conn_req rej;
1755
1756 bacpy(&rej.bdaddr, &conn->dst);
1757
1758 /* SCO rejection has its own limited set of
1759 * allowed error values (0x0D-0x0F) which isn't
1760 * compatible with most values passed to this
1761 * function. To be safe hard-code one of the
1762 * values that's suitable for SCO.
1763 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02001764 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001765
1766 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1767 sizeof(rej), &rej);
1768 }
1769 break;
1770 default:
1771 conn->state = BT_CLOSED;
1772 break;
1773 }
1774}
1775
1776static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1777{
1778 if (status)
1779 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1780}
1781
1782int hci_abort_conn(struct hci_conn *conn, u8 reason)
1783{
1784 struct hci_request req;
1785 int err;
1786
1787 hci_req_init(&req, conn->hdev);
1788
1789 __hci_abort_conn(&req, conn, reason);
1790
1791 err = hci_req_run(&req, abort_conn_complete);
1792 if (err && err != -ENODATA) {
1793 BT_ERR("Failed to run HCI request: err %d", err);
1794 return err;
1795 }
1796
1797 return 0;
1798}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001799
Johan Hedberga1d01db2015-11-11 08:11:25 +02001800static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001801{
1802 hci_dev_lock(req->hdev);
1803 __hci_update_background_scan(req);
1804 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001805 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001806}
1807
1808static void bg_scan_update(struct work_struct *work)
1809{
1810 struct hci_dev *hdev = container_of(work, struct hci_dev,
1811 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001812 struct hci_conn *conn;
1813 u8 status;
1814 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001815
Johan Hedberg84235d22015-11-11 08:11:20 +02001816 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1817 if (!err)
1818 return;
1819
1820 hci_dev_lock(hdev);
1821
1822 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1823 if (conn)
1824 hci_le_conn_failed(conn, status);
1825
1826 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001827}
1828
Johan Hedberga1d01db2015-11-11 08:11:25 +02001829static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001830{
1831 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001832 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001833}
1834
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001835static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1836{
1837 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02001838 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
1839 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001840 struct hci_cp_inquiry cp;
1841
1842 BT_DBG("%s", req->hdev->name);
1843
1844 hci_dev_lock(req->hdev);
1845 hci_inquiry_cache_flush(req->hdev);
1846 hci_dev_unlock(req->hdev);
1847
1848 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02001849
1850 if (req->hdev->discovery.limited)
1851 memcpy(&cp.lap, liac, sizeof(cp.lap));
1852 else
1853 memcpy(&cp.lap, giac, sizeof(cp.lap));
1854
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001855 cp.length = length;
1856
1857 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1858
1859 return 0;
1860}
1861
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001862static void le_scan_disable_work(struct work_struct *work)
1863{
1864 struct hci_dev *hdev = container_of(work, struct hci_dev,
1865 le_scan_disable.work);
1866 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001867
1868 BT_DBG("%s", hdev->name);
1869
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001870 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001871 return;
1872
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001873 cancel_delayed_work(&hdev->le_scan_restart);
1874
1875 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1876 if (status) {
1877 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1878 return;
1879 }
1880
1881 hdev->discovery.scan_start = 0;
1882
1883 /* If we were running LE only scan, change discovery state. If
1884 * we were running both LE and BR/EDR inquiry simultaneously,
1885 * and BR/EDR inquiry is already finished, stop discovery,
1886 * otherwise BR/EDR inquiry will stop discovery when finished.
1887 * If we will resolve remote device name, do not change
1888 * discovery state.
1889 */
1890
1891 if (hdev->discovery.type == DISCOV_TYPE_LE)
1892 goto discov_stopped;
1893
1894 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1895 return;
1896
1897 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1898 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1899 hdev->discovery.state != DISCOVERY_RESOLVING)
1900 goto discov_stopped;
1901
1902 return;
1903 }
1904
1905 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1906 HCI_CMD_TIMEOUT, &status);
1907 if (status) {
1908 BT_ERR("Inquiry failed: status 0x%02x", status);
1909 goto discov_stopped;
1910 }
1911
1912 return;
1913
1914discov_stopped:
1915 hci_dev_lock(hdev);
1916 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1917 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001918}
1919
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001920static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001921{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001922 struct hci_dev *hdev = req->hdev;
1923 struct hci_cp_le_set_scan_enable cp;
1924
1925 /* If controller is not scanning we are done. */
1926 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1927 return 0;
1928
1929 hci_req_add_le_scan_disable(req);
1930
1931 memset(&cp, 0, sizeof(cp));
1932 cp.enable = LE_SCAN_ENABLE;
1933 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1934 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935
1936 return 0;
1937}
1938
1939static void le_scan_restart_work(struct work_struct *work)
1940{
1941 struct hci_dev *hdev = container_of(work, struct hci_dev,
1942 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001943 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001944 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001945
1946 BT_DBG("%s", hdev->name);
1947
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001948 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001949 if (status) {
1950 BT_ERR("Failed to restart LE scan: status %d", status);
1951 return;
1952 }
1953
1954 hci_dev_lock(hdev);
1955
1956 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1957 !hdev->discovery.scan_start)
1958 goto unlock;
1959
1960 /* When the scan was started, hdev->le_scan_disable has been queued
1961 * after duration from scan_start. During scan restart this job
1962 * has been canceled, and we need to queue it again after proper
1963 * timeout, to make sure that scan does not run indefinitely.
1964 */
1965 duration = hdev->discovery.scan_duration;
1966 scan_start = hdev->discovery.scan_start;
1967 now = jiffies;
1968 if (now - scan_start <= duration) {
1969 int elapsed;
1970
1971 if (now >= scan_start)
1972 elapsed = now - scan_start;
1973 else
1974 elapsed = ULONG_MAX - scan_start + now;
1975
1976 timeout = duration - elapsed;
1977 } else {
1978 timeout = 0;
1979 }
1980
1981 queue_delayed_work(hdev->req_workqueue,
1982 &hdev->le_scan_disable, timeout);
1983
1984unlock:
1985 hci_dev_unlock(hdev);
1986}
1987
Johan Hedberge68f0722015-11-11 08:30:30 +02001988static void disable_advertising(struct hci_request *req)
1989{
1990 u8 enable = 0x00;
1991
1992 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1993}
1994
1995static int active_scan(struct hci_request *req, unsigned long opt)
1996{
1997 uint16_t interval = opt;
1998 struct hci_dev *hdev = req->hdev;
1999 struct hci_cp_le_set_scan_param param_cp;
2000 struct hci_cp_le_set_scan_enable enable_cp;
2001 u8 own_addr_type;
2002 int err;
2003
2004 BT_DBG("%s", hdev->name);
2005
2006 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2007 hci_dev_lock(hdev);
2008
2009 /* Don't let discovery abort an outgoing connection attempt
2010 * that's using directed advertising.
2011 */
2012 if (hci_lookup_le_connect(hdev)) {
2013 hci_dev_unlock(hdev);
2014 return -EBUSY;
2015 }
2016
2017 cancel_adv_timeout(hdev);
2018 hci_dev_unlock(hdev);
2019
2020 disable_advertising(req);
2021 }
2022
2023 /* If controller is scanning, it means the background scanning is
2024 * running. Thus, we should temporarily stop it in order to set the
2025 * discovery scanning parameters.
2026 */
2027 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2028 hci_req_add_le_scan_disable(req);
2029
2030 /* All active scans will be done with either a resolvable private
2031 * address (when privacy feature has been enabled) or non-resolvable
2032 * private address.
2033 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002034 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2035 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002036 if (err < 0)
2037 own_addr_type = ADDR_LE_DEV_PUBLIC;
2038
2039 memset(&param_cp, 0, sizeof(param_cp));
2040 param_cp.type = LE_SCAN_ACTIVE;
2041 param_cp.interval = cpu_to_le16(interval);
2042 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
2043 param_cp.own_address_type = own_addr_type;
2044
2045 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
2046 &param_cp);
2047
2048 memset(&enable_cp, 0, sizeof(enable_cp));
2049 enable_cp.enable = LE_SCAN_ENABLE;
2050 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2051
2052 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
2053 &enable_cp);
2054
2055 return 0;
2056}
2057
2058static int interleaved_discov(struct hci_request *req, unsigned long opt)
2059{
2060 int err;
2061
2062 BT_DBG("%s", req->hdev->name);
2063
2064 err = active_scan(req, opt);
2065 if (err)
2066 return err;
2067
Johan Hedberg7df26b52015-11-11 12:24:21 +02002068 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002069}
2070
2071static void start_discovery(struct hci_dev *hdev, u8 *status)
2072{
2073 unsigned long timeout;
2074
2075 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2076
2077 switch (hdev->discovery.type) {
2078 case DISCOV_TYPE_BREDR:
2079 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002080 hci_req_sync(hdev, bredr_inquiry,
2081 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002082 status);
2083 return;
2084 case DISCOV_TYPE_INTERLEAVED:
2085 /* When running simultaneous discovery, the LE scanning time
2086 * should occupy the whole discovery time sine BR/EDR inquiry
2087 * and LE scanning are scheduled by the controller.
2088 *
2089 * For interleaving discovery in comparison, BR/EDR inquiry
2090 * and LE scanning are done sequentially with separate
2091 * timeouts.
2092 */
2093 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2094 &hdev->quirks)) {
2095 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2096 /* During simultaneous discovery, we double LE scan
2097 * interval. We must leave some time for the controller
2098 * to do BR/EDR inquiry.
2099 */
2100 hci_req_sync(hdev, interleaved_discov,
2101 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2102 status);
2103 break;
2104 }
2105
2106 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2107 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2108 HCI_CMD_TIMEOUT, status);
2109 break;
2110 case DISCOV_TYPE_LE:
2111 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2112 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2113 HCI_CMD_TIMEOUT, status);
2114 break;
2115 default:
2116 *status = HCI_ERROR_UNSPECIFIED;
2117 return;
2118 }
2119
2120 if (*status)
2121 return;
2122
2123 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2124
2125 /* When service discovery is used and the controller has a
2126 * strict duplicate filter, it is important to remember the
2127 * start and duration of the scan. This is required for
2128 * restarting scanning during the discovery phase.
2129 */
2130 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2131 hdev->discovery.result_filtering) {
2132 hdev->discovery.scan_start = jiffies;
2133 hdev->discovery.scan_duration = timeout;
2134 }
2135
2136 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2137 timeout);
2138}
2139
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002140bool hci_req_stop_discovery(struct hci_request *req)
2141{
2142 struct hci_dev *hdev = req->hdev;
2143 struct discovery_state *d = &hdev->discovery;
2144 struct hci_cp_remote_name_req_cancel cp;
2145 struct inquiry_entry *e;
2146 bool ret = false;
2147
2148 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2149
2150 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2151 if (test_bit(HCI_INQUIRY, &hdev->flags))
2152 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2153
2154 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2155 cancel_delayed_work(&hdev->le_scan_disable);
2156 hci_req_add_le_scan_disable(req);
2157 }
2158
2159 ret = true;
2160 } else {
2161 /* Passive scanning */
2162 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2163 hci_req_add_le_scan_disable(req);
2164 ret = true;
2165 }
2166 }
2167
2168 /* No further actions needed for LE-only discovery */
2169 if (d->type == DISCOV_TYPE_LE)
2170 return ret;
2171
2172 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2173 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2174 NAME_PENDING);
2175 if (!e)
2176 return ret;
2177
2178 bacpy(&cp.bdaddr, &e->data.bdaddr);
2179 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2180 &cp);
2181 ret = true;
2182 }
2183
2184 return ret;
2185}
2186
2187static int stop_discovery(struct hci_request *req, unsigned long opt)
2188{
2189 hci_dev_lock(req->hdev);
2190 hci_req_stop_discovery(req);
2191 hci_dev_unlock(req->hdev);
2192
2193 return 0;
2194}
2195
Johan Hedberge68f0722015-11-11 08:30:30 +02002196static void discov_update(struct work_struct *work)
2197{
2198 struct hci_dev *hdev = container_of(work, struct hci_dev,
2199 discov_update);
2200 u8 status = 0;
2201
2202 switch (hdev->discovery.state) {
2203 case DISCOVERY_STARTING:
2204 start_discovery(hdev, &status);
2205 mgmt_start_discovery_complete(hdev, status);
2206 if (status)
2207 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2208 else
2209 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2210 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002211 case DISCOVERY_STOPPING:
2212 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2213 mgmt_stop_discovery_complete(hdev, status);
2214 if (!status)
2215 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2216 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002217 case DISCOVERY_STOPPED:
2218 default:
2219 return;
2220 }
2221}
2222
Johan Hedbergc366f552015-11-23 15:43:06 +02002223static void discov_off(struct work_struct *work)
2224{
2225 struct hci_dev *hdev = container_of(work, struct hci_dev,
2226 discov_off.work);
2227
2228 BT_DBG("%s", hdev->name);
2229
2230 hci_dev_lock(hdev);
2231
2232 /* When discoverable timeout triggers, then just make sure
2233 * the limited discoverable flag is cleared. Even in the case
2234 * of a timeout triggered from general discoverable, it is
2235 * safe to unconditionally clear the flag.
2236 */
2237 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2238 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2239 hdev->discov_timeout = 0;
2240
2241 hci_dev_unlock(hdev);
2242
2243 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2244 mgmt_new_settings(hdev);
2245}
2246
Johan Hedberg2ff13892015-11-25 16:15:44 +02002247static int powered_update_hci(struct hci_request *req, unsigned long opt)
2248{
2249 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002250 u8 link_sec;
2251
2252 hci_dev_lock(hdev);
2253
2254 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2255 !lmp_host_ssp_capable(hdev)) {
2256 u8 mode = 0x01;
2257
2258 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2259
2260 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2261 u8 support = 0x01;
2262
2263 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2264 sizeof(support), &support);
2265 }
2266 }
2267
2268 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2269 lmp_bredr_capable(hdev)) {
2270 struct hci_cp_write_le_host_supported cp;
2271
2272 cp.le = 0x01;
2273 cp.simul = 0x00;
2274
2275 /* Check first if we already have the right
2276 * host state (host features set)
2277 */
2278 if (cp.le != lmp_host_le_capable(hdev) ||
2279 cp.simul != lmp_host_le_br_capable(hdev))
2280 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2281 sizeof(cp), &cp);
2282 }
2283
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002284 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002285 /* Make sure the controller has a good default for
2286 * advertising data. This also applies to the case
2287 * where BR/EDR was toggled during the AUTO_OFF phase.
2288 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002289 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2290 list_empty(&hdev->adv_instances)) {
2291 __hci_req_update_adv_data(req, 0x00);
2292 __hci_req_update_scan_rsp_data(req, 0x00);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002293
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002294 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2295 __hci_req_enable_advertising(req);
2296 } else if (!list_empty(&hdev->adv_instances)) {
2297 struct adv_info *adv_instance;
2298
Johan Hedberg2ff13892015-11-25 16:15:44 +02002299 adv_instance = list_first_entry(&hdev->adv_instances,
2300 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002301 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002302 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002303 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002304 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002305 }
2306
2307 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2308 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2309 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2310 sizeof(link_sec), &link_sec);
2311
2312 if (lmp_bredr_capable(hdev)) {
2313 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2314 __hci_req_write_fast_connectable(req, true);
2315 else
2316 __hci_req_write_fast_connectable(req, false);
2317 __hci_req_update_scan(req);
2318 __hci_req_update_class(req);
2319 __hci_req_update_name(req);
2320 __hci_req_update_eir(req);
2321 }
2322
2323 hci_dev_unlock(hdev);
2324 return 0;
2325}
2326
2327int __hci_req_hci_power_on(struct hci_dev *hdev)
2328{
2329 /* Register the available SMP channels (BR/EDR and LE) only when
2330 * successfully powering on the controller. This late
2331 * registration is required so that LE SMP can clearly decide if
2332 * the public address or static address is used.
2333 */
2334 smp_register(hdev);
2335
2336 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2337 NULL);
2338}
2339
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002340void hci_request_setup(struct hci_dev *hdev)
2341{
Johan Hedberge68f0722015-11-11 08:30:30 +02002342 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002343 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002344 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002345 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002346 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002347 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002348 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2349 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002350 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002351}
2352
2353void hci_request_cancel_all(struct hci_dev *hdev)
2354{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002355 hci_req_sync_cancel(hdev, ENODEV);
2356
Johan Hedberge68f0722015-11-11 08:30:30 +02002357 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002358 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002359 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002360 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002361 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002362 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002363 cancel_delayed_work_sync(&hdev->le_scan_disable);
2364 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002365
2366 if (hdev->adv_instance_timeout) {
2367 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2368 hdev->adv_instance_timeout = 0;
2369 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002370}