blob: 9997c31ef98765fae51eb70f8b8092e98f693fbe [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Johan Hedbergb1a89172015-11-25 16:15:42 +020024#include <asm/unaligned.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Johan Hedberge62144872015-04-02 13:41:08 +030044static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020046{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020066 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020072
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
Johan Hedberge62144872015-04-02 13:41:08 +030082int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
Johan Hedbergbe91cd02015-11-10 09:44:54 +020092static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
Johan Hedbergb5044302015-11-10 09:44:55 +0200106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200192 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
Johan Hedberga1d01db2015-11-11 08:11:25 +0200204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257
258 default:
259 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
Johan Hedberga1d01db2015-11-11 08:11:25 +0200272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200282 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200284 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200285
286 return ret;
287}
288
Johan Hedberg0857dd32014-12-19 13:40:20 +0200289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200340
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100341 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
Johan Hedberg196a5e92015-11-22 18:55:44 +0200387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
Johan Hedberg00cf5042015-11-25 16:15:41 +0200460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
Johan Hedbergb1a89172015-11-25 16:15:42 +0200470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
Johan Hedberg0857dd32014-12-19 13:40:20 +0200657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 struct hci_cp_le_del_from_white_list cp;
692
693 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
694 &b->bdaddr, b->bdaddr_type) ||
695 hci_pend_le_action_lookup(&hdev->pend_le_reports,
696 &b->bdaddr, b->bdaddr_type)) {
697 white_list_entries++;
698 continue;
699 }
700
701 cp.bdaddr_type = b->bdaddr_type;
702 bacpy(&cp.bdaddr, &b->bdaddr);
703
704 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
705 sizeof(cp), &cp);
706 }
707
708 /* Since all no longer valid white list entries have been
709 * removed, walk through the list of pending connections
710 * and ensure that any new device gets programmed into
711 * the controller.
712 *
713 * If the list of the devices is larger than the list of
714 * available white list entries in the controller, then
715 * just abort and return filer policy value to not use the
716 * white list.
717 */
718 list_for_each_entry(params, &hdev->pend_le_conns, action) {
719 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
720 &params->addr, params->addr_type))
721 continue;
722
723 if (white_list_entries >= hdev->le_white_list_size) {
724 /* Select filter policy to accept all advertising */
725 return 0x00;
726 }
727
728 if (hci_find_irk_by_addr(hdev, &params->addr,
729 params->addr_type)) {
730 /* White list can not be used with RPAs */
731 return 0x00;
732 }
733
734 white_list_entries++;
735 add_to_white_list(req, params);
736 }
737
738 /* After adding all new pending connections, walk through
739 * the list of pending reports and also add these to the
740 * white list if there is still space.
741 */
742 list_for_each_entry(params, &hdev->pend_le_reports, action) {
743 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
744 &params->addr, params->addr_type))
745 continue;
746
747 if (white_list_entries >= hdev->le_white_list_size) {
748 /* Select filter policy to accept all advertising */
749 return 0x00;
750 }
751
752 if (hci_find_irk_by_addr(hdev, &params->addr,
753 params->addr_type)) {
754 /* White list can not be used with RPAs */
755 return 0x00;
756 }
757
758 white_list_entries++;
759 add_to_white_list(req, params);
760 }
761
762 /* Select filter policy to use white list */
763 return 0x01;
764}
765
766void hci_req_add_le_passive_scan(struct hci_request *req)
767{
768 struct hci_cp_le_set_scan_param param_cp;
769 struct hci_cp_le_set_scan_enable enable_cp;
770 struct hci_dev *hdev = req->hdev;
771 u8 own_addr_type;
772 u8 filter_policy;
773
774 /* Set require_privacy to false since no SCAN_REQ are send
775 * during passive scanning. Not using an non-resolvable address
776 * here is important so that peer devices using direct
777 * advertising with our address will be correctly reported
778 * by the controller.
779 */
780 if (hci_update_random_address(req, false, &own_addr_type))
781 return;
782
783 /* Adding or removing entries from the white list must
784 * happen before enabling scanning. The controller does
785 * not allow white list modification while scanning.
786 */
787 filter_policy = update_white_list(req);
788
789 /* When the controller is using random resolvable addresses and
790 * with that having LE privacy enabled, then controllers with
791 * Extended Scanner Filter Policies support can now enable support
792 * for handling directed advertising.
793 *
794 * So instead of using filter polices 0x00 (no whitelist)
795 * and 0x01 (whitelist enabled) use the new filter policies
796 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
797 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700798 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200799 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
800 filter_policy |= 0x02;
801
802 memset(&param_cp, 0, sizeof(param_cp));
803 param_cp.type = LE_SCAN_PASSIVE;
804 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
805 param_cp.window = cpu_to_le16(hdev->le_scan_window);
806 param_cp.own_address_type = own_addr_type;
807 param_cp.filter_policy = filter_policy;
808 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
809 &param_cp);
810
811 memset(&enable_cp, 0, sizeof(enable_cp));
812 enable_cp.enable = LE_SCAN_ENABLE;
813 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
814 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
815 &enable_cp);
816}
817
Johan Hedbergf2252572015-11-18 12:49:20 +0200818static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
819{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200820 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200821 struct adv_info *adv_instance;
822
823 /* Ignore instance 0 */
824 if (instance == 0x00)
825 return 0;
826
827 adv_instance = hci_find_adv_instance(hdev, instance);
828 if (!adv_instance)
829 return 0;
830
831 /* TODO: Take into account the "appearance" and "local-name" flags here.
832 * These are currently being ignored as they are not supported.
833 */
834 return adv_instance->scan_rsp_len;
835}
836
837void __hci_req_disable_advertising(struct hci_request *req)
838{
839 u8 enable = 0x00;
840
841 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
842}
843
844static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
845{
846 u32 flags;
847 struct adv_info *adv_instance;
848
849 if (instance == 0x00) {
850 /* Instance 0 always manages the "Tx Power" and "Flags"
851 * fields
852 */
853 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
854
855 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
856 * corresponds to the "connectable" instance flag.
857 */
858 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
859 flags |= MGMT_ADV_FLAG_CONNECTABLE;
860
861 return flags;
862 }
863
864 adv_instance = hci_find_adv_instance(hdev, instance);
865
866 /* Return 0 when we got an invalid instance identifier. */
867 if (!adv_instance)
868 return 0;
869
870 return adv_instance->flags;
871}
872
873void __hci_req_enable_advertising(struct hci_request *req)
874{
875 struct hci_dev *hdev = req->hdev;
876 struct hci_cp_le_set_adv_param cp;
877 u8 own_addr_type, enable = 0x01;
878 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +0200879 u32 flags;
880
881 if (hci_conn_num(hdev, LE_LINK) > 0)
882 return;
883
884 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
885 __hci_req_disable_advertising(req);
886
887 /* Clear the HCI_LE_ADV bit temporarily so that the
888 * hci_update_random_address knows that it's safe to go ahead
889 * and write a new random address. The flag will be set back on
890 * as soon as the SET_ADV_ENABLE HCI command completes.
891 */
892 hci_dev_clear_flag(hdev, HCI_LE_ADV);
893
Johan Hedbergcab054a2015-11-30 11:21:45 +0200894 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
Johan Hedbergf2252572015-11-18 12:49:20 +0200895
896 /* If the "connectable" instance flag was not set, then choose between
897 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
898 */
899 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
900 mgmt_get_connectable(hdev);
901
902 /* Set require_privacy to true only when non-connectable
903 * advertising is used. In that case it is fine to use a
904 * non-resolvable private address.
905 */
906 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
907 return;
908
909 memset(&cp, 0, sizeof(cp));
910 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
911 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
912
913 if (connectable)
914 cp.type = LE_ADV_IND;
915 else if (get_cur_adv_instance_scan_rsp_len(hdev))
916 cp.type = LE_ADV_SCAN_IND;
917 else
918 cp.type = LE_ADV_NONCONN_IND;
919
920 cp.own_address_type = own_addr_type;
921 cp.channel_map = hdev->le_adv_channel_map;
922
923 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
924
925 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
926}
927
928static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
929{
930 u8 ad_len = 0;
931 size_t name_len;
932
933 name_len = strlen(hdev->dev_name);
934 if (name_len > 0) {
935 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
936
937 if (name_len > max_len) {
938 name_len = max_len;
939 ptr[1] = EIR_NAME_SHORT;
940 } else
941 ptr[1] = EIR_NAME_COMPLETE;
942
943 ptr[0] = name_len + 1;
944
945 memcpy(ptr + 2, hdev->dev_name, name_len);
946
947 ad_len += (name_len + 2);
948 ptr += (name_len + 2);
949 }
950
951 return ad_len;
952}
953
954static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
955 u8 *ptr)
956{
957 struct adv_info *adv_instance;
958
959 adv_instance = hci_find_adv_instance(hdev, instance);
960 if (!adv_instance)
961 return 0;
962
963 /* TODO: Set the appropriate entries based on advertising instance flags
964 * here once flags other than 0 are supported.
965 */
966 memcpy(ptr, adv_instance->scan_rsp_data,
967 adv_instance->scan_rsp_len);
968
969 return adv_instance->scan_rsp_len;
970}
971
Johan Hedbergcab054a2015-11-30 11:21:45 +0200972void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +0200973{
974 struct hci_dev *hdev = req->hdev;
975 struct hci_cp_le_set_scan_rsp_data cp;
976 u8 len;
977
978 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
979 return;
980
981 memset(&cp, 0, sizeof(cp));
982
983 if (instance)
984 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
985 else
986 len = create_default_scan_rsp_data(hdev, cp.data);
987
988 if (hdev->scan_rsp_data_len == len &&
989 !memcmp(cp.data, hdev->scan_rsp_data, len))
990 return;
991
992 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
993 hdev->scan_rsp_data_len = len;
994
995 cp.length = len;
996
997 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
998}
999
Johan Hedbergf2252572015-11-18 12:49:20 +02001000static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1001{
1002 struct adv_info *adv_instance = NULL;
1003 u8 ad_len = 0, flags = 0;
1004 u32 instance_flags;
1005
1006 /* Return 0 when the current instance identifier is invalid. */
1007 if (instance) {
1008 adv_instance = hci_find_adv_instance(hdev, instance);
1009 if (!adv_instance)
1010 return 0;
1011 }
1012
1013 instance_flags = get_adv_instance_flags(hdev, instance);
1014
1015 /* The Add Advertising command allows userspace to set both the general
1016 * and limited discoverable flags.
1017 */
1018 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1019 flags |= LE_AD_GENERAL;
1020
1021 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1022 flags |= LE_AD_LIMITED;
1023
1024 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1025 /* If a discovery flag wasn't provided, simply use the global
1026 * settings.
1027 */
1028 if (!flags)
1029 flags |= mgmt_get_adv_discov_flags(hdev);
1030
1031 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1032 flags |= LE_AD_NO_BREDR;
1033
1034 /* If flags would still be empty, then there is no need to
1035 * include the "Flags" AD field".
1036 */
1037 if (flags) {
1038 ptr[0] = 0x02;
1039 ptr[1] = EIR_FLAGS;
1040 ptr[2] = flags;
1041
1042 ad_len += 3;
1043 ptr += 3;
1044 }
1045 }
1046
1047 if (adv_instance) {
1048 memcpy(ptr, adv_instance->adv_data,
1049 adv_instance->adv_data_len);
1050 ad_len += adv_instance->adv_data_len;
1051 ptr += adv_instance->adv_data_len;
1052 }
1053
1054 /* Provide Tx Power only if we can provide a valid value for it */
1055 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1056 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1057 ptr[0] = 0x02;
1058 ptr[1] = EIR_TX_POWER;
1059 ptr[2] = (u8)hdev->adv_tx_power;
1060
1061 ad_len += 3;
1062 ptr += 3;
1063 }
1064
1065 return ad_len;
1066}
1067
Johan Hedbergcab054a2015-11-30 11:21:45 +02001068void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001069{
1070 struct hci_dev *hdev = req->hdev;
1071 struct hci_cp_le_set_adv_data cp;
1072 u8 len;
1073
1074 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1075 return;
1076
1077 memset(&cp, 0, sizeof(cp));
1078
1079 len = create_instance_adv_data(hdev, instance, cp.data);
1080
1081 /* There's nothing to do if the data hasn't changed */
1082 if (hdev->adv_data_len == len &&
1083 memcmp(cp.data, hdev->adv_data, len) == 0)
1084 return;
1085
1086 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1087 hdev->adv_data_len = len;
1088
1089 cp.length = len;
1090
1091 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1092}
1093
Johan Hedbergcab054a2015-11-30 11:21:45 +02001094int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001095{
1096 struct hci_request req;
1097
1098 hci_req_init(&req, hdev);
1099 __hci_req_update_adv_data(&req, instance);
1100
1101 return hci_req_run(&req, NULL);
1102}
1103
1104static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1105{
1106 BT_DBG("%s status %u", hdev->name, status);
1107}
1108
1109void hci_req_reenable_advertising(struct hci_dev *hdev)
1110{
1111 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001112
1113 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001114 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001115 return;
1116
Johan Hedbergf2252572015-11-18 12:49:20 +02001117 hci_req_init(&req, hdev);
1118
Johan Hedbergcab054a2015-11-30 11:21:45 +02001119 if (hdev->cur_adv_instance) {
1120 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1121 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001122 } else {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001123 __hci_req_update_adv_data(&req, 0x00);
1124 __hci_req_update_scan_rsp_data(&req, 0x00);
Johan Hedbergf2252572015-11-18 12:49:20 +02001125 __hci_req_enable_advertising(&req);
1126 }
1127
1128 hci_req_run(&req, adv_enable_complete);
1129}
1130
1131static void adv_timeout_expire(struct work_struct *work)
1132{
1133 struct hci_dev *hdev = container_of(work, struct hci_dev,
1134 adv_instance_expire.work);
1135
1136 struct hci_request req;
1137 u8 instance;
1138
1139 BT_DBG("%s", hdev->name);
1140
1141 hci_dev_lock(hdev);
1142
1143 hdev->adv_instance_timeout = 0;
1144
Johan Hedbergcab054a2015-11-30 11:21:45 +02001145 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001146 if (instance == 0x00)
1147 goto unlock;
1148
1149 hci_req_init(&req, hdev);
1150
1151 hci_req_clear_adv_instance(hdev, &req, instance, false);
1152
1153 if (list_empty(&hdev->adv_instances))
1154 __hci_req_disable_advertising(&req);
1155
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001156 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001157
1158unlock:
1159 hci_dev_unlock(hdev);
1160}
1161
1162int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1163 bool force)
1164{
1165 struct hci_dev *hdev = req->hdev;
1166 struct adv_info *adv_instance = NULL;
1167 u16 timeout;
1168
1169 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001170 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001171 return -EPERM;
1172
1173 if (hdev->adv_instance_timeout)
1174 return -EBUSY;
1175
1176 adv_instance = hci_find_adv_instance(hdev, instance);
1177 if (!adv_instance)
1178 return -ENOENT;
1179
1180 /* A zero timeout means unlimited advertising. As long as there is
1181 * only one instance, duration should be ignored. We still set a timeout
1182 * in case further instances are being added later on.
1183 *
1184 * If the remaining lifetime of the instance is more than the duration
1185 * then the timeout corresponds to the duration, otherwise it will be
1186 * reduced to the remaining instance lifetime.
1187 */
1188 if (adv_instance->timeout == 0 ||
1189 adv_instance->duration <= adv_instance->remaining_time)
1190 timeout = adv_instance->duration;
1191 else
1192 timeout = adv_instance->remaining_time;
1193
1194 /* The remaining time is being reduced unless the instance is being
1195 * advertised without time limit.
1196 */
1197 if (adv_instance->timeout)
1198 adv_instance->remaining_time =
1199 adv_instance->remaining_time - timeout;
1200
1201 hdev->adv_instance_timeout = timeout;
1202 queue_delayed_work(hdev->req_workqueue,
1203 &hdev->adv_instance_expire,
1204 msecs_to_jiffies(timeout * 1000));
1205
1206 /* If we're just re-scheduling the same instance again then do not
1207 * execute any HCI commands. This happens when a single instance is
1208 * being advertised.
1209 */
1210 if (!force && hdev->cur_adv_instance == instance &&
1211 hci_dev_test_flag(hdev, HCI_LE_ADV))
1212 return 0;
1213
1214 hdev->cur_adv_instance = instance;
Johan Hedbergcab054a2015-11-30 11:21:45 +02001215 __hci_req_update_adv_data(req, instance);
1216 __hci_req_update_scan_rsp_data(req, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001217 __hci_req_enable_advertising(req);
1218
1219 return 0;
1220}
1221
1222static void cancel_adv_timeout(struct hci_dev *hdev)
1223{
1224 if (hdev->adv_instance_timeout) {
1225 hdev->adv_instance_timeout = 0;
1226 cancel_delayed_work(&hdev->adv_instance_expire);
1227 }
1228}
1229
1230/* For a single instance:
1231 * - force == true: The instance will be removed even when its remaining
1232 * lifetime is not zero.
1233 * - force == false: the instance will be deactivated but kept stored unless
1234 * the remaining lifetime is zero.
1235 *
1236 * For instance == 0x00:
1237 * - force == true: All instances will be removed regardless of their timeout
1238 * setting.
1239 * - force == false: Only instances that have a timeout will be removed.
1240 */
1241void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1242 u8 instance, bool force)
1243{
1244 struct adv_info *adv_instance, *n, *next_instance = NULL;
1245 int err;
1246 u8 rem_inst;
1247
1248 /* Cancel any timeout concerning the removed instance(s). */
1249 if (!instance || hdev->cur_adv_instance == instance)
1250 cancel_adv_timeout(hdev);
1251
1252 /* Get the next instance to advertise BEFORE we remove
1253 * the current one. This can be the same instance again
1254 * if there is only one instance.
1255 */
1256 if (instance && hdev->cur_adv_instance == instance)
1257 next_instance = hci_get_next_instance(hdev, instance);
1258
1259 if (instance == 0x00) {
1260 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1261 list) {
1262 if (!(force || adv_instance->timeout))
1263 continue;
1264
1265 rem_inst = adv_instance->instance;
1266 err = hci_remove_adv_instance(hdev, rem_inst);
1267 if (!err)
1268 mgmt_advertising_removed(NULL, hdev, rem_inst);
1269 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001270 } else {
1271 adv_instance = hci_find_adv_instance(hdev, instance);
1272
1273 if (force || (adv_instance && adv_instance->timeout &&
1274 !adv_instance->remaining_time)) {
1275 /* Don't advertise a removed instance. */
1276 if (next_instance &&
1277 next_instance->instance == instance)
1278 next_instance = NULL;
1279
1280 err = hci_remove_adv_instance(hdev, instance);
1281 if (!err)
1282 mgmt_advertising_removed(NULL, hdev, instance);
1283 }
1284 }
1285
Johan Hedbergf2252572015-11-18 12:49:20 +02001286 if (!req || !hdev_is_powered(hdev) ||
1287 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1288 return;
1289
1290 if (next_instance)
1291 __hci_req_schedule_adv_instance(req, next_instance->instance,
1292 false);
1293}
1294
Johan Hedberg0857dd32014-12-19 13:40:20 +02001295static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1296{
1297 struct hci_dev *hdev = req->hdev;
1298
1299 /* If we're advertising or initiating an LE connection we can't
1300 * go ahead and change the random address at this time. This is
1301 * because the eventual initiator address used for the
1302 * subsequently created connection will be undefined (some
1303 * controllers use the new address and others the one we had
1304 * when the operation started).
1305 *
1306 * In this kind of scenario skip the update and let the random
1307 * address be updated at the next cycle.
1308 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001309 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001310 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001311 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001312 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001313 return;
1314 }
1315
1316 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1317}
1318
1319int hci_update_random_address(struct hci_request *req, bool require_privacy,
1320 u8 *own_addr_type)
1321{
1322 struct hci_dev *hdev = req->hdev;
1323 int err;
1324
1325 /* If privacy is enabled use a resolvable private address. If
1326 * current RPA has expired or there is something else than
1327 * the current RPA in use, then generate a new one.
1328 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001329 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001330 int to;
1331
1332 *own_addr_type = ADDR_LE_DEV_RANDOM;
1333
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001334 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001335 !bacmp(&hdev->random_addr, &hdev->rpa))
1336 return 0;
1337
1338 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1339 if (err < 0) {
1340 BT_ERR("%s failed to generate new RPA", hdev->name);
1341 return err;
1342 }
1343
1344 set_random_addr(req, &hdev->rpa);
1345
1346 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1347 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1348
1349 return 0;
1350 }
1351
1352 /* In case of required privacy without resolvable private address,
1353 * use an non-resolvable private address. This is useful for active
1354 * scanning and non-connectable advertising.
1355 */
1356 if (require_privacy) {
1357 bdaddr_t nrpa;
1358
1359 while (true) {
1360 /* The non-resolvable private address is generated
1361 * from random six bytes with the two most significant
1362 * bits cleared.
1363 */
1364 get_random_bytes(&nrpa, 6);
1365 nrpa.b[5] &= 0x3f;
1366
1367 /* The non-resolvable private address shall not be
1368 * equal to the public address.
1369 */
1370 if (bacmp(&hdev->bdaddr, &nrpa))
1371 break;
1372 }
1373
1374 *own_addr_type = ADDR_LE_DEV_RANDOM;
1375 set_random_addr(req, &nrpa);
1376 return 0;
1377 }
1378
1379 /* If forcing static address is in use or there is no public
1380 * address use the static address as random address (but skip
1381 * the HCI command if the current random address is already the
1382 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001383 *
1384 * In case BR/EDR has been disabled on a dual-mode controller
1385 * and a static address has been configured, then use that
1386 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001387 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001388 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001389 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001390 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001391 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001392 *own_addr_type = ADDR_LE_DEV_RANDOM;
1393 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1394 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1395 &hdev->static_addr);
1396 return 0;
1397 }
1398
1399 /* Neither privacy nor static address is being used so use a
1400 * public address.
1401 */
1402 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1403
1404 return 0;
1405}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001406
Johan Hedberg405a2612014-12-19 23:18:22 +02001407static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1408{
1409 struct bdaddr_list *b;
1410
1411 list_for_each_entry(b, &hdev->whitelist, list) {
1412 struct hci_conn *conn;
1413
1414 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1415 if (!conn)
1416 return true;
1417
1418 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1419 return true;
1420 }
1421
1422 return false;
1423}
1424
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001425void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001426{
1427 struct hci_dev *hdev = req->hdev;
1428 u8 scan;
1429
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001430 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001431 return;
1432
1433 if (!hdev_is_powered(hdev))
1434 return;
1435
1436 if (mgmt_powering_down(hdev))
1437 return;
1438
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001439 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001440 disconnected_whitelist_entries(hdev))
1441 scan = SCAN_PAGE;
1442 else
1443 scan = SCAN_DISABLED;
1444
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001445 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001446 scan |= SCAN_INQUIRY;
1447
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001448 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1449 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1450 return;
1451
Johan Hedberg405a2612014-12-19 23:18:22 +02001452 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1453}
1454
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001455static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001456{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001457 hci_dev_lock(req->hdev);
1458 __hci_req_update_scan(req);
1459 hci_dev_unlock(req->hdev);
1460 return 0;
1461}
Johan Hedberg405a2612014-12-19 23:18:22 +02001462
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001463static void scan_update_work(struct work_struct *work)
1464{
1465 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1466
1467 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001468}
1469
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001470static int connectable_update(struct hci_request *req, unsigned long opt)
1471{
1472 struct hci_dev *hdev = req->hdev;
1473
1474 hci_dev_lock(hdev);
1475
1476 __hci_req_update_scan(req);
1477
1478 /* If BR/EDR is not enabled and we disable advertising as a
1479 * by-product of disabling connectable, we need to update the
1480 * advertising flags.
1481 */
1482 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001483 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001484
1485 /* Update the advertising parameters if necessary */
1486 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001487 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001488 __hci_req_enable_advertising(req);
1489
1490 __hci_update_background_scan(req);
1491
1492 hci_dev_unlock(hdev);
1493
1494 return 0;
1495}
1496
1497static void connectable_update_work(struct work_struct *work)
1498{
1499 struct hci_dev *hdev = container_of(work, struct hci_dev,
1500 connectable_update);
1501 u8 status;
1502
1503 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1504 mgmt_set_connectable_complete(hdev, status);
1505}
1506
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001507static u8 get_service_classes(struct hci_dev *hdev)
1508{
1509 struct bt_uuid *uuid;
1510 u8 val = 0;
1511
1512 list_for_each_entry(uuid, &hdev->uuids, list)
1513 val |= uuid->svc_hint;
1514
1515 return val;
1516}
1517
1518void __hci_req_update_class(struct hci_request *req)
1519{
1520 struct hci_dev *hdev = req->hdev;
1521 u8 cod[3];
1522
1523 BT_DBG("%s", hdev->name);
1524
1525 if (!hdev_is_powered(hdev))
1526 return;
1527
1528 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1529 return;
1530
1531 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1532 return;
1533
1534 cod[0] = hdev->minor_class;
1535 cod[1] = hdev->major_class;
1536 cod[2] = get_service_classes(hdev);
1537
1538 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1539 cod[1] |= 0x20;
1540
1541 if (memcmp(cod, hdev->dev_class, 3) == 0)
1542 return;
1543
1544 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1545}
1546
Johan Hedbergaed1a882015-11-22 17:24:44 +03001547static void write_iac(struct hci_request *req)
1548{
1549 struct hci_dev *hdev = req->hdev;
1550 struct hci_cp_write_current_iac_lap cp;
1551
1552 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1553 return;
1554
1555 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1556 /* Limited discoverable mode */
1557 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1558 cp.iac_lap[0] = 0x00; /* LIAC */
1559 cp.iac_lap[1] = 0x8b;
1560 cp.iac_lap[2] = 0x9e;
1561 cp.iac_lap[3] = 0x33; /* GIAC */
1562 cp.iac_lap[4] = 0x8b;
1563 cp.iac_lap[5] = 0x9e;
1564 } else {
1565 /* General discoverable mode */
1566 cp.num_iac = 1;
1567 cp.iac_lap[0] = 0x33; /* GIAC */
1568 cp.iac_lap[1] = 0x8b;
1569 cp.iac_lap[2] = 0x9e;
1570 }
1571
1572 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1573 (cp.num_iac * 3) + 1, &cp);
1574}
1575
1576static int discoverable_update(struct hci_request *req, unsigned long opt)
1577{
1578 struct hci_dev *hdev = req->hdev;
1579
1580 hci_dev_lock(hdev);
1581
1582 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1583 write_iac(req);
1584 __hci_req_update_scan(req);
1585 __hci_req_update_class(req);
1586 }
1587
1588 /* Advertising instances don't use the global discoverable setting, so
1589 * only update AD if advertising was enabled using Set Advertising.
1590 */
1591 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001592 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001593
1594 hci_dev_unlock(hdev);
1595
1596 return 0;
1597}
1598
1599static void discoverable_update_work(struct work_struct *work)
1600{
1601 struct hci_dev *hdev = container_of(work, struct hci_dev,
1602 discoverable_update);
1603 u8 status;
1604
1605 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1606 mgmt_set_discoverable_complete(hdev, status);
1607}
1608
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001609void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1610 u8 reason)
1611{
1612 switch (conn->state) {
1613 case BT_CONNECTED:
1614 case BT_CONFIG:
1615 if (conn->type == AMP_LINK) {
1616 struct hci_cp_disconn_phy_link cp;
1617
1618 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1619 cp.reason = reason;
1620 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1621 &cp);
1622 } else {
1623 struct hci_cp_disconnect dc;
1624
1625 dc.handle = cpu_to_le16(conn->handle);
1626 dc.reason = reason;
1627 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1628 }
1629
1630 conn->state = BT_DISCONN;
1631
1632 break;
1633 case BT_CONNECT:
1634 if (conn->type == LE_LINK) {
1635 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1636 break;
1637 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1638 0, NULL);
1639 } else if (conn->type == ACL_LINK) {
1640 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1641 break;
1642 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1643 6, &conn->dst);
1644 }
1645 break;
1646 case BT_CONNECT2:
1647 if (conn->type == ACL_LINK) {
1648 struct hci_cp_reject_conn_req rej;
1649
1650 bacpy(&rej.bdaddr, &conn->dst);
1651 rej.reason = reason;
1652
1653 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1654 sizeof(rej), &rej);
1655 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1656 struct hci_cp_reject_sync_conn_req rej;
1657
1658 bacpy(&rej.bdaddr, &conn->dst);
1659
1660 /* SCO rejection has its own limited set of
1661 * allowed error values (0x0D-0x0F) which isn't
1662 * compatible with most values passed to this
1663 * function. To be safe hard-code one of the
1664 * values that's suitable for SCO.
1665 */
1666 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1667
1668 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1669 sizeof(rej), &rej);
1670 }
1671 break;
1672 default:
1673 conn->state = BT_CLOSED;
1674 break;
1675 }
1676}
1677
1678static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1679{
1680 if (status)
1681 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1682}
1683
1684int hci_abort_conn(struct hci_conn *conn, u8 reason)
1685{
1686 struct hci_request req;
1687 int err;
1688
1689 hci_req_init(&req, conn->hdev);
1690
1691 __hci_abort_conn(&req, conn, reason);
1692
1693 err = hci_req_run(&req, abort_conn_complete);
1694 if (err && err != -ENODATA) {
1695 BT_ERR("Failed to run HCI request: err %d", err);
1696 return err;
1697 }
1698
1699 return 0;
1700}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001701
Johan Hedberga1d01db2015-11-11 08:11:25 +02001702static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001703{
1704 hci_dev_lock(req->hdev);
1705 __hci_update_background_scan(req);
1706 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001707 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001708}
1709
1710static void bg_scan_update(struct work_struct *work)
1711{
1712 struct hci_dev *hdev = container_of(work, struct hci_dev,
1713 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001714 struct hci_conn *conn;
1715 u8 status;
1716 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001717
Johan Hedberg84235d22015-11-11 08:11:20 +02001718 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1719 if (!err)
1720 return;
1721
1722 hci_dev_lock(hdev);
1723
1724 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1725 if (conn)
1726 hci_le_conn_failed(conn, status);
1727
1728 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001729}
1730
Johan Hedberga1d01db2015-11-11 08:11:25 +02001731static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001732{
1733 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001734 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001735}
1736
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001737static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1738{
1739 u8 length = opt;
1740 /* General inquiry access code (GIAC) */
1741 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1742 struct hci_cp_inquiry cp;
1743
1744 BT_DBG("%s", req->hdev->name);
1745
1746 hci_dev_lock(req->hdev);
1747 hci_inquiry_cache_flush(req->hdev);
1748 hci_dev_unlock(req->hdev);
1749
1750 memset(&cp, 0, sizeof(cp));
1751 memcpy(&cp.lap, lap, sizeof(cp.lap));
1752 cp.length = length;
1753
1754 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1755
1756 return 0;
1757}
1758
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001759static void le_scan_disable_work(struct work_struct *work)
1760{
1761 struct hci_dev *hdev = container_of(work, struct hci_dev,
1762 le_scan_disable.work);
1763 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001764
1765 BT_DBG("%s", hdev->name);
1766
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001767 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001768 return;
1769
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001770 cancel_delayed_work(&hdev->le_scan_restart);
1771
1772 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1773 if (status) {
1774 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1775 return;
1776 }
1777
1778 hdev->discovery.scan_start = 0;
1779
1780 /* If we were running LE only scan, change discovery state. If
1781 * we were running both LE and BR/EDR inquiry simultaneously,
1782 * and BR/EDR inquiry is already finished, stop discovery,
1783 * otherwise BR/EDR inquiry will stop discovery when finished.
1784 * If we will resolve remote device name, do not change
1785 * discovery state.
1786 */
1787
1788 if (hdev->discovery.type == DISCOV_TYPE_LE)
1789 goto discov_stopped;
1790
1791 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1792 return;
1793
1794 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1795 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1796 hdev->discovery.state != DISCOVERY_RESOLVING)
1797 goto discov_stopped;
1798
1799 return;
1800 }
1801
1802 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1803 HCI_CMD_TIMEOUT, &status);
1804 if (status) {
1805 BT_ERR("Inquiry failed: status 0x%02x", status);
1806 goto discov_stopped;
1807 }
1808
1809 return;
1810
1811discov_stopped:
1812 hci_dev_lock(hdev);
1813 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1814 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001815}
1816
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001817static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001818{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001819 struct hci_dev *hdev = req->hdev;
1820 struct hci_cp_le_set_scan_enable cp;
1821
1822 /* If controller is not scanning we are done. */
1823 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1824 return 0;
1825
1826 hci_req_add_le_scan_disable(req);
1827
1828 memset(&cp, 0, sizeof(cp));
1829 cp.enable = LE_SCAN_ENABLE;
1830 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1831 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1832
1833 return 0;
1834}
1835
1836static void le_scan_restart_work(struct work_struct *work)
1837{
1838 struct hci_dev *hdev = container_of(work, struct hci_dev,
1839 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001840 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001841 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001842
1843 BT_DBG("%s", hdev->name);
1844
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001845 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001846 if (status) {
1847 BT_ERR("Failed to restart LE scan: status %d", status);
1848 return;
1849 }
1850
1851 hci_dev_lock(hdev);
1852
1853 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1854 !hdev->discovery.scan_start)
1855 goto unlock;
1856
1857 /* When the scan was started, hdev->le_scan_disable has been queued
1858 * after duration from scan_start. During scan restart this job
1859 * has been canceled, and we need to queue it again after proper
1860 * timeout, to make sure that scan does not run indefinitely.
1861 */
1862 duration = hdev->discovery.scan_duration;
1863 scan_start = hdev->discovery.scan_start;
1864 now = jiffies;
1865 if (now - scan_start <= duration) {
1866 int elapsed;
1867
1868 if (now >= scan_start)
1869 elapsed = now - scan_start;
1870 else
1871 elapsed = ULONG_MAX - scan_start + now;
1872
1873 timeout = duration - elapsed;
1874 } else {
1875 timeout = 0;
1876 }
1877
1878 queue_delayed_work(hdev->req_workqueue,
1879 &hdev->le_scan_disable, timeout);
1880
1881unlock:
1882 hci_dev_unlock(hdev);
1883}
1884
Johan Hedberge68f0722015-11-11 08:30:30 +02001885static void disable_advertising(struct hci_request *req)
1886{
1887 u8 enable = 0x00;
1888
1889 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1890}
1891
1892static int active_scan(struct hci_request *req, unsigned long opt)
1893{
1894 uint16_t interval = opt;
1895 struct hci_dev *hdev = req->hdev;
1896 struct hci_cp_le_set_scan_param param_cp;
1897 struct hci_cp_le_set_scan_enable enable_cp;
1898 u8 own_addr_type;
1899 int err;
1900
1901 BT_DBG("%s", hdev->name);
1902
1903 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1904 hci_dev_lock(hdev);
1905
1906 /* Don't let discovery abort an outgoing connection attempt
1907 * that's using directed advertising.
1908 */
1909 if (hci_lookup_le_connect(hdev)) {
1910 hci_dev_unlock(hdev);
1911 return -EBUSY;
1912 }
1913
1914 cancel_adv_timeout(hdev);
1915 hci_dev_unlock(hdev);
1916
1917 disable_advertising(req);
1918 }
1919
1920 /* If controller is scanning, it means the background scanning is
1921 * running. Thus, we should temporarily stop it in order to set the
1922 * discovery scanning parameters.
1923 */
1924 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1925 hci_req_add_le_scan_disable(req);
1926
1927 /* All active scans will be done with either a resolvable private
1928 * address (when privacy feature has been enabled) or non-resolvable
1929 * private address.
1930 */
1931 err = hci_update_random_address(req, true, &own_addr_type);
1932 if (err < 0)
1933 own_addr_type = ADDR_LE_DEV_PUBLIC;
1934
1935 memset(&param_cp, 0, sizeof(param_cp));
1936 param_cp.type = LE_SCAN_ACTIVE;
1937 param_cp.interval = cpu_to_le16(interval);
1938 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1939 param_cp.own_address_type = own_addr_type;
1940
1941 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1942 &param_cp);
1943
1944 memset(&enable_cp, 0, sizeof(enable_cp));
1945 enable_cp.enable = LE_SCAN_ENABLE;
1946 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1947
1948 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1949 &enable_cp);
1950
1951 return 0;
1952}
1953
1954static int interleaved_discov(struct hci_request *req, unsigned long opt)
1955{
1956 int err;
1957
1958 BT_DBG("%s", req->hdev->name);
1959
1960 err = active_scan(req, opt);
1961 if (err)
1962 return err;
1963
Johan Hedberg7df26b52015-11-11 12:24:21 +02001964 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02001965}
1966
1967static void start_discovery(struct hci_dev *hdev, u8 *status)
1968{
1969 unsigned long timeout;
1970
1971 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
1972
1973 switch (hdev->discovery.type) {
1974 case DISCOV_TYPE_BREDR:
1975 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02001976 hci_req_sync(hdev, bredr_inquiry,
1977 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02001978 status);
1979 return;
1980 case DISCOV_TYPE_INTERLEAVED:
1981 /* When running simultaneous discovery, the LE scanning time
1982 * should occupy the whole discovery time sine BR/EDR inquiry
1983 * and LE scanning are scheduled by the controller.
1984 *
1985 * For interleaving discovery in comparison, BR/EDR inquiry
1986 * and LE scanning are done sequentially with separate
1987 * timeouts.
1988 */
1989 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
1990 &hdev->quirks)) {
1991 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
1992 /* During simultaneous discovery, we double LE scan
1993 * interval. We must leave some time for the controller
1994 * to do BR/EDR inquiry.
1995 */
1996 hci_req_sync(hdev, interleaved_discov,
1997 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
1998 status);
1999 break;
2000 }
2001
2002 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2003 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2004 HCI_CMD_TIMEOUT, status);
2005 break;
2006 case DISCOV_TYPE_LE:
2007 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2008 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2009 HCI_CMD_TIMEOUT, status);
2010 break;
2011 default:
2012 *status = HCI_ERROR_UNSPECIFIED;
2013 return;
2014 }
2015
2016 if (*status)
2017 return;
2018
2019 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2020
2021 /* When service discovery is used and the controller has a
2022 * strict duplicate filter, it is important to remember the
2023 * start and duration of the scan. This is required for
2024 * restarting scanning during the discovery phase.
2025 */
2026 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2027 hdev->discovery.result_filtering) {
2028 hdev->discovery.scan_start = jiffies;
2029 hdev->discovery.scan_duration = timeout;
2030 }
2031
2032 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2033 timeout);
2034}
2035
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002036bool hci_req_stop_discovery(struct hci_request *req)
2037{
2038 struct hci_dev *hdev = req->hdev;
2039 struct discovery_state *d = &hdev->discovery;
2040 struct hci_cp_remote_name_req_cancel cp;
2041 struct inquiry_entry *e;
2042 bool ret = false;
2043
2044 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2045
2046 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2047 if (test_bit(HCI_INQUIRY, &hdev->flags))
2048 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2049
2050 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2051 cancel_delayed_work(&hdev->le_scan_disable);
2052 hci_req_add_le_scan_disable(req);
2053 }
2054
2055 ret = true;
2056 } else {
2057 /* Passive scanning */
2058 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2059 hci_req_add_le_scan_disable(req);
2060 ret = true;
2061 }
2062 }
2063
2064 /* No further actions needed for LE-only discovery */
2065 if (d->type == DISCOV_TYPE_LE)
2066 return ret;
2067
2068 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2069 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2070 NAME_PENDING);
2071 if (!e)
2072 return ret;
2073
2074 bacpy(&cp.bdaddr, &e->data.bdaddr);
2075 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2076 &cp);
2077 ret = true;
2078 }
2079
2080 return ret;
2081}
2082
2083static int stop_discovery(struct hci_request *req, unsigned long opt)
2084{
2085 hci_dev_lock(req->hdev);
2086 hci_req_stop_discovery(req);
2087 hci_dev_unlock(req->hdev);
2088
2089 return 0;
2090}
2091
Johan Hedberge68f0722015-11-11 08:30:30 +02002092static void discov_update(struct work_struct *work)
2093{
2094 struct hci_dev *hdev = container_of(work, struct hci_dev,
2095 discov_update);
2096 u8 status = 0;
2097
2098 switch (hdev->discovery.state) {
2099 case DISCOVERY_STARTING:
2100 start_discovery(hdev, &status);
2101 mgmt_start_discovery_complete(hdev, status);
2102 if (status)
2103 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2104 else
2105 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2106 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002107 case DISCOVERY_STOPPING:
2108 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2109 mgmt_stop_discovery_complete(hdev, status);
2110 if (!status)
2111 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2112 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002113 case DISCOVERY_STOPPED:
2114 default:
2115 return;
2116 }
2117}
2118
Johan Hedbergc366f552015-11-23 15:43:06 +02002119static void discov_off(struct work_struct *work)
2120{
2121 struct hci_dev *hdev = container_of(work, struct hci_dev,
2122 discov_off.work);
2123
2124 BT_DBG("%s", hdev->name);
2125
2126 hci_dev_lock(hdev);
2127
2128 /* When discoverable timeout triggers, then just make sure
2129 * the limited discoverable flag is cleared. Even in the case
2130 * of a timeout triggered from general discoverable, it is
2131 * safe to unconditionally clear the flag.
2132 */
2133 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2134 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2135 hdev->discov_timeout = 0;
2136
2137 hci_dev_unlock(hdev);
2138
2139 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2140 mgmt_new_settings(hdev);
2141}
2142
Johan Hedberg2ff13892015-11-25 16:15:44 +02002143static int powered_update_hci(struct hci_request *req, unsigned long opt)
2144{
2145 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002146 u8 link_sec;
2147
2148 hci_dev_lock(hdev);
2149
2150 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2151 !lmp_host_ssp_capable(hdev)) {
2152 u8 mode = 0x01;
2153
2154 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2155
2156 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2157 u8 support = 0x01;
2158
2159 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2160 sizeof(support), &support);
2161 }
2162 }
2163
2164 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2165 lmp_bredr_capable(hdev)) {
2166 struct hci_cp_write_le_host_supported cp;
2167
2168 cp.le = 0x01;
2169 cp.simul = 0x00;
2170
2171 /* Check first if we already have the right
2172 * host state (host features set)
2173 */
2174 if (cp.le != lmp_host_le_capable(hdev) ||
2175 cp.simul != lmp_host_le_br_capable(hdev))
2176 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2177 sizeof(cp), &cp);
2178 }
2179
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002180 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002181 /* Make sure the controller has a good default for
2182 * advertising data. This also applies to the case
2183 * where BR/EDR was toggled during the AUTO_OFF phase.
2184 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002185 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2186 list_empty(&hdev->adv_instances)) {
2187 __hci_req_update_adv_data(req, 0x00);
2188 __hci_req_update_scan_rsp_data(req, 0x00);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002189
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002190 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2191 __hci_req_enable_advertising(req);
2192 } else if (!list_empty(&hdev->adv_instances)) {
2193 struct adv_info *adv_instance;
2194
Johan Hedberg2ff13892015-11-25 16:15:44 +02002195 adv_instance = list_first_entry(&hdev->adv_instances,
2196 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002197 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002198 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002199 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002200 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002201 }
2202
2203 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2204 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2205 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2206 sizeof(link_sec), &link_sec);
2207
2208 if (lmp_bredr_capable(hdev)) {
2209 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2210 __hci_req_write_fast_connectable(req, true);
2211 else
2212 __hci_req_write_fast_connectable(req, false);
2213 __hci_req_update_scan(req);
2214 __hci_req_update_class(req);
2215 __hci_req_update_name(req);
2216 __hci_req_update_eir(req);
2217 }
2218
2219 hci_dev_unlock(hdev);
2220 return 0;
2221}
2222
2223int __hci_req_hci_power_on(struct hci_dev *hdev)
2224{
2225 /* Register the available SMP channels (BR/EDR and LE) only when
2226 * successfully powering on the controller. This late
2227 * registration is required so that LE SMP can clearly decide if
2228 * the public address or static address is used.
2229 */
2230 smp_register(hdev);
2231
2232 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2233 NULL);
2234}
2235
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002236void hci_request_setup(struct hci_dev *hdev)
2237{
Johan Hedberge68f0722015-11-11 08:30:30 +02002238 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002239 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002240 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002241 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002242 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002243 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002244 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2245 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002246 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002247}
2248
2249void hci_request_cancel_all(struct hci_dev *hdev)
2250{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002251 hci_req_sync_cancel(hdev, ENODEV);
2252
Johan Hedberge68f0722015-11-11 08:30:30 +02002253 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002254 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002255 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002256 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002257 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002258 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002259 cancel_delayed_work_sync(&hdev->le_scan_disable);
2260 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002261
2262 if (hdev->adv_instance_timeout) {
2263 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2264 hdev->adv_instance_timeout = 0;
2265 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002266}