blob: adfcd6f1d0decea6a6298232260fc2e23d9ed89f [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Johan Hedbergb1a89172015-11-25 16:15:42 +020024#include <asm/unaligned.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Johan Hedberge62144872015-04-02 13:41:08 +030044static int req_run(struct hci_request *req, hci_req_complete_t complete,
45 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020046{
47 struct hci_dev *hdev = req->hdev;
48 struct sk_buff *skb;
49 unsigned long flags;
50
51 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
52
53 /* If an error occurred during request building, remove all HCI
54 * commands queued on the HCI request queue.
55 */
56 if (req->err) {
57 skb_queue_purge(&req->cmd_q);
58 return req->err;
59 }
60
61 /* Do not allow empty requests */
62 if (skb_queue_empty(&req->cmd_q))
63 return -ENODATA;
64
65 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020066 if (complete) {
67 bt_cb(skb)->hci.req_complete = complete;
68 } else if (complete_skb) {
69 bt_cb(skb)->hci.req_complete_skb = complete_skb;
70 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
71 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020072
73 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
74 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
75 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
76
77 queue_work(hdev->workqueue, &hdev->cmd_work);
78
79 return 0;
80}
81
Johan Hedberge62144872015-04-02 13:41:08 +030082int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
83{
84 return req_run(req, complete, NULL);
85}
86
87int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
88{
89 return req_run(req, NULL, complete);
90}
91
Johan Hedbergbe91cd02015-11-10 09:44:54 +020092static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
93 struct sk_buff *skb)
94{
95 BT_DBG("%s result 0x%2.2x", hdev->name, result);
96
97 if (hdev->req_status == HCI_REQ_PEND) {
98 hdev->req_result = result;
99 hdev->req_status = HCI_REQ_DONE;
100 if (skb)
101 hdev->req_skb = skb_get(skb);
102 wake_up_interruptible(&hdev->req_wait_q);
103 }
104}
105
Johan Hedbergb5044302015-11-10 09:44:55 +0200106void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200107{
108 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = err;
112 hdev->req_status = HCI_REQ_CANCELED;
113 wake_up_interruptible(&hdev->req_wait_q);
114 }
115}
116
117struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
118 const void *param, u8 event, u32 timeout)
119{
120 DECLARE_WAITQUEUE(wait, current);
121 struct hci_request req;
122 struct sk_buff *skb;
123 int err = 0;
124
125 BT_DBG("%s", hdev->name);
126
127 hci_req_init(&req, hdev);
128
129 hci_req_add_ev(&req, opcode, plen, param, event);
130
131 hdev->req_status = HCI_REQ_PEND;
132
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
135
136 err = hci_req_run_skb(&req, hci_req_sync_complete);
137 if (err < 0) {
138 remove_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_RUNNING);
140 return ERR_PTR(err);
141 }
142
143 schedule_timeout(timeout);
144
145 remove_wait_queue(&hdev->req_wait_q, &wait);
146
147 if (signal_pending(current))
148 return ERR_PTR(-EINTR);
149
150 switch (hdev->req_status) {
151 case HCI_REQ_DONE:
152 err = -bt_to_errno(hdev->req_result);
153 break;
154
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
157 break;
158
159 default:
160 err = -ETIMEDOUT;
161 break;
162 }
163
164 hdev->req_status = hdev->req_result = 0;
165 skb = hdev->req_skb;
166 hdev->req_skb = NULL;
167
168 BT_DBG("%s end: err %d", hdev->name, err);
169
170 if (err < 0) {
171 kfree_skb(skb);
172 return ERR_PTR(err);
173 }
174
175 if (!skb)
176 return ERR_PTR(-ENODATA);
177
178 return skb;
179}
180EXPORT_SYMBOL(__hci_cmd_sync_ev);
181
182struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
183 const void *param, u32 timeout)
184{
185 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
186}
187EXPORT_SYMBOL(__hci_cmd_sync);
188
189/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200190int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
191 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200192 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200193{
194 struct hci_request req;
195 DECLARE_WAITQUEUE(wait, current);
196 int err = 0;
197
198 BT_DBG("%s start", hdev->name);
199
200 hci_req_init(&req, hdev);
201
202 hdev->req_status = HCI_REQ_PEND;
203
Johan Hedberga1d01db2015-11-11 08:11:25 +0200204 err = func(&req, opt);
205 if (err) {
206 if (hci_status)
207 *hci_status = HCI_ERROR_UNSPECIFIED;
208 return err;
209 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200210
211 add_wait_queue(&hdev->req_wait_q, &wait);
212 set_current_state(TASK_INTERRUPTIBLE);
213
214 err = hci_req_run_skb(&req, hci_req_sync_complete);
215 if (err < 0) {
216 hdev->req_status = 0;
217
218 remove_wait_queue(&hdev->req_wait_q, &wait);
219 set_current_state(TASK_RUNNING);
220
221 /* ENODATA means the HCI request command queue is empty.
222 * This can happen when a request with conditionals doesn't
223 * trigger any commands to be sent. This is normal behavior
224 * and should not trigger an error return.
225 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200226 if (err == -ENODATA) {
227 if (hci_status)
228 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200229 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200230 }
231
232 if (hci_status)
233 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200234
235 return err;
236 }
237
238 schedule_timeout(timeout);
239
240 remove_wait_queue(&hdev->req_wait_q, &wait);
241
242 if (signal_pending(current))
243 return -EINTR;
244
245 switch (hdev->req_status) {
246 case HCI_REQ_DONE:
247 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200248 if (hci_status)
249 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200250 break;
251
252 case HCI_REQ_CANCELED:
253 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200254 if (hci_status)
255 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 break;
257
258 default:
259 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200260 if (hci_status)
261 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200262 break;
263 }
264
265 hdev->req_status = hdev->req_result = 0;
266
267 BT_DBG("%s end: err %d", hdev->name, err);
268
269 return err;
270}
271
Johan Hedberga1d01db2015-11-11 08:11:25 +0200272int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
273 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200275{
276 int ret;
277
278 if (!test_bit(HCI_UP, &hdev->flags))
279 return -ENETDOWN;
280
281 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200282 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200283 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200284 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200285
286 return ret;
287}
288
Johan Hedberg0857dd32014-12-19 13:40:20 +0200289struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
290 const void *param)
291{
292 int len = HCI_COMMAND_HDR_SIZE + plen;
293 struct hci_command_hdr *hdr;
294 struct sk_buff *skb;
295
296 skb = bt_skb_alloc(len, GFP_ATOMIC);
297 if (!skb)
298 return NULL;
299
300 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
301 hdr->opcode = cpu_to_le16(opcode);
302 hdr->plen = plen;
303
304 if (plen)
305 memcpy(skb_put(skb, plen), param, plen);
306
307 BT_DBG("skb len %d", skb->len);
308
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100309 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
310 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200311
312 return skb;
313}
314
315/* Queue a command to an asynchronous HCI request */
316void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
317 const void *param, u8 event)
318{
319 struct hci_dev *hdev = req->hdev;
320 struct sk_buff *skb;
321
322 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
323
324 /* If an error occurred during request building, there is no point in
325 * queueing the HCI command. We can simply return.
326 */
327 if (req->err)
328 return;
329
330 skb = hci_prepare_cmd(hdev, opcode, plen, param);
331 if (!skb) {
332 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
333 hdev->name, opcode);
334 req->err = -ENOMEM;
335 return;
336 }
337
338 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200339 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200340
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100341 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200342
343 skb_queue_tail(&req->cmd_q, skb);
344}
345
346void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
347 const void *param)
348{
349 hci_req_add_ev(req, opcode, plen, param, 0);
350}
351
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200352void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
353{
354 struct hci_dev *hdev = req->hdev;
355 struct hci_cp_write_page_scan_activity acp;
356 u8 type;
357
358 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
359 return;
360
361 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
362 return;
363
364 if (enable) {
365 type = PAGE_SCAN_TYPE_INTERLACED;
366
367 /* 160 msec page scan interval */
368 acp.interval = cpu_to_le16(0x0100);
369 } else {
370 type = PAGE_SCAN_TYPE_STANDARD; /* default */
371
372 /* default 1.28 sec page scan */
373 acp.interval = cpu_to_le16(0x0800);
374 }
375
376 acp.window = cpu_to_le16(0x0012);
377
378 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
379 __cpu_to_le16(hdev->page_scan_window) != acp.window)
380 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
381 sizeof(acp), &acp);
382
383 if (hdev->page_scan_type != type)
384 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
385}
386
Johan Hedberg196a5e92015-11-22 18:55:44 +0200387/* This function controls the background scanning based on hdev->pend_le_conns
388 * list. If there are pending LE connection we start the background scanning,
389 * otherwise we stop it.
390 *
391 * This function requires the caller holds hdev->lock.
392 */
393static void __hci_update_background_scan(struct hci_request *req)
394{
395 struct hci_dev *hdev = req->hdev;
396
397 if (!test_bit(HCI_UP, &hdev->flags) ||
398 test_bit(HCI_INIT, &hdev->flags) ||
399 hci_dev_test_flag(hdev, HCI_SETUP) ||
400 hci_dev_test_flag(hdev, HCI_CONFIG) ||
401 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
402 hci_dev_test_flag(hdev, HCI_UNREGISTER))
403 return;
404
405 /* No point in doing scanning if LE support hasn't been enabled */
406 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
407 return;
408
409 /* If discovery is active don't interfere with it */
410 if (hdev->discovery.state != DISCOVERY_STOPPED)
411 return;
412
413 /* Reset RSSI and UUID filters when starting background scanning
414 * since these filters are meant for service discovery only.
415 *
416 * The Start Discovery and Start Service Discovery operations
417 * ensure to set proper values for RSSI threshold and UUID
418 * filter list. So it is safe to just reset them here.
419 */
420 hci_discovery_filter_clear(hdev);
421
422 if (list_empty(&hdev->pend_le_conns) &&
423 list_empty(&hdev->pend_le_reports)) {
424 /* If there is no pending LE connections or devices
425 * to be scanned for, we should stop the background
426 * scanning.
427 */
428
429 /* If controller is not scanning we are done. */
430 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
431 return;
432
433 hci_req_add_le_scan_disable(req);
434
435 BT_DBG("%s stopping background scanning", hdev->name);
436 } else {
437 /* If there is at least one pending LE connection, we should
438 * keep the background scan running.
439 */
440
441 /* If controller is connecting, we should not start scanning
442 * since some controllers are not able to scan and connect at
443 * the same time.
444 */
445 if (hci_lookup_le_connect(hdev))
446 return;
447
448 /* If controller is currently scanning, we stop it to ensure we
449 * don't miss any advertising (due to duplicates filter).
450 */
451 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
452 hci_req_add_le_scan_disable(req);
453
454 hci_req_add_le_passive_scan(req);
455
456 BT_DBG("%s starting background scanning", hdev->name);
457 }
458}
459
Johan Hedberg00cf5042015-11-25 16:15:41 +0200460void __hci_req_update_name(struct hci_request *req)
461{
462 struct hci_dev *hdev = req->hdev;
463 struct hci_cp_write_local_name cp;
464
465 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
466
467 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
468}
469
Johan Hedbergb1a89172015-11-25 16:15:42 +0200470#define PNP_INFO_SVCLASS_ID 0x1200
471
472static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
473{
474 u8 *ptr = data, *uuids_start = NULL;
475 struct bt_uuid *uuid;
476
477 if (len < 4)
478 return ptr;
479
480 list_for_each_entry(uuid, &hdev->uuids, list) {
481 u16 uuid16;
482
483 if (uuid->size != 16)
484 continue;
485
486 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
487 if (uuid16 < 0x1100)
488 continue;
489
490 if (uuid16 == PNP_INFO_SVCLASS_ID)
491 continue;
492
493 if (!uuids_start) {
494 uuids_start = ptr;
495 uuids_start[0] = 1;
496 uuids_start[1] = EIR_UUID16_ALL;
497 ptr += 2;
498 }
499
500 /* Stop if not enough space to put next UUID */
501 if ((ptr - data) + sizeof(u16) > len) {
502 uuids_start[1] = EIR_UUID16_SOME;
503 break;
504 }
505
506 *ptr++ = (uuid16 & 0x00ff);
507 *ptr++ = (uuid16 & 0xff00) >> 8;
508 uuids_start[0] += sizeof(uuid16);
509 }
510
511 return ptr;
512}
513
514static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
515{
516 u8 *ptr = data, *uuids_start = NULL;
517 struct bt_uuid *uuid;
518
519 if (len < 6)
520 return ptr;
521
522 list_for_each_entry(uuid, &hdev->uuids, list) {
523 if (uuid->size != 32)
524 continue;
525
526 if (!uuids_start) {
527 uuids_start = ptr;
528 uuids_start[0] = 1;
529 uuids_start[1] = EIR_UUID32_ALL;
530 ptr += 2;
531 }
532
533 /* Stop if not enough space to put next UUID */
534 if ((ptr - data) + sizeof(u32) > len) {
535 uuids_start[1] = EIR_UUID32_SOME;
536 break;
537 }
538
539 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
540 ptr += sizeof(u32);
541 uuids_start[0] += sizeof(u32);
542 }
543
544 return ptr;
545}
546
547static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
548{
549 u8 *ptr = data, *uuids_start = NULL;
550 struct bt_uuid *uuid;
551
552 if (len < 18)
553 return ptr;
554
555 list_for_each_entry(uuid, &hdev->uuids, list) {
556 if (uuid->size != 128)
557 continue;
558
559 if (!uuids_start) {
560 uuids_start = ptr;
561 uuids_start[0] = 1;
562 uuids_start[1] = EIR_UUID128_ALL;
563 ptr += 2;
564 }
565
566 /* Stop if not enough space to put next UUID */
567 if ((ptr - data) + 16 > len) {
568 uuids_start[1] = EIR_UUID128_SOME;
569 break;
570 }
571
572 memcpy(ptr, uuid->uuid, 16);
573 ptr += 16;
574 uuids_start[0] += 16;
575 }
576
577 return ptr;
578}
579
580static void create_eir(struct hci_dev *hdev, u8 *data)
581{
582 u8 *ptr = data;
583 size_t name_len;
584
585 name_len = strlen(hdev->dev_name);
586
587 if (name_len > 0) {
588 /* EIR Data type */
589 if (name_len > 48) {
590 name_len = 48;
591 ptr[1] = EIR_NAME_SHORT;
592 } else
593 ptr[1] = EIR_NAME_COMPLETE;
594
595 /* EIR Data length */
596 ptr[0] = name_len + 1;
597
598 memcpy(ptr + 2, hdev->dev_name, name_len);
599
600 ptr += (name_len + 2);
601 }
602
603 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
604 ptr[0] = 2;
605 ptr[1] = EIR_TX_POWER;
606 ptr[2] = (u8) hdev->inq_tx_power;
607
608 ptr += 3;
609 }
610
611 if (hdev->devid_source > 0) {
612 ptr[0] = 9;
613 ptr[1] = EIR_DEVICE_ID;
614
615 put_unaligned_le16(hdev->devid_source, ptr + 2);
616 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
617 put_unaligned_le16(hdev->devid_product, ptr + 6);
618 put_unaligned_le16(hdev->devid_version, ptr + 8);
619
620 ptr += 10;
621 }
622
623 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
624 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
625 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
626}
627
628void __hci_req_update_eir(struct hci_request *req)
629{
630 struct hci_dev *hdev = req->hdev;
631 struct hci_cp_write_eir cp;
632
633 if (!hdev_is_powered(hdev))
634 return;
635
636 if (!lmp_ext_inq_capable(hdev))
637 return;
638
639 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
640 return;
641
642 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
643 return;
644
645 memset(&cp, 0, sizeof(cp));
646
647 create_eir(hdev, cp.data);
648
649 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
650 return;
651
652 memcpy(hdev->eir, cp.data, sizeof(cp.data));
653
654 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
655}
656
Johan Hedberg0857dd32014-12-19 13:40:20 +0200657void hci_req_add_le_scan_disable(struct hci_request *req)
658{
659 struct hci_cp_le_set_scan_enable cp;
660
661 memset(&cp, 0, sizeof(cp));
662 cp.enable = LE_SCAN_DISABLE;
663 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
664}
665
666static void add_to_white_list(struct hci_request *req,
667 struct hci_conn_params *params)
668{
669 struct hci_cp_le_add_to_white_list cp;
670
671 cp.bdaddr_type = params->addr_type;
672 bacpy(&cp.bdaddr, &params->addr);
673
674 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
675}
676
677static u8 update_white_list(struct hci_request *req)
678{
679 struct hci_dev *hdev = req->hdev;
680 struct hci_conn_params *params;
681 struct bdaddr_list *b;
682 uint8_t white_list_entries = 0;
683
684 /* Go through the current white list programmed into the
685 * controller one by one and check if that address is still
686 * in the list of pending connections or list of devices to
687 * report. If not present in either list, then queue the
688 * command to remove it from the controller.
689 */
690 list_for_each_entry(b, &hdev->le_white_list, list) {
691 struct hci_cp_le_del_from_white_list cp;
692
693 if (hci_pend_le_action_lookup(&hdev->pend_le_conns,
694 &b->bdaddr, b->bdaddr_type) ||
695 hci_pend_le_action_lookup(&hdev->pend_le_reports,
696 &b->bdaddr, b->bdaddr_type)) {
697 white_list_entries++;
698 continue;
699 }
700
701 cp.bdaddr_type = b->bdaddr_type;
702 bacpy(&cp.bdaddr, &b->bdaddr);
703
704 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
705 sizeof(cp), &cp);
706 }
707
708 /* Since all no longer valid white list entries have been
709 * removed, walk through the list of pending connections
710 * and ensure that any new device gets programmed into
711 * the controller.
712 *
713 * If the list of the devices is larger than the list of
714 * available white list entries in the controller, then
715 * just abort and return filer policy value to not use the
716 * white list.
717 */
718 list_for_each_entry(params, &hdev->pend_le_conns, action) {
719 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
720 &params->addr, params->addr_type))
721 continue;
722
723 if (white_list_entries >= hdev->le_white_list_size) {
724 /* Select filter policy to accept all advertising */
725 return 0x00;
726 }
727
728 if (hci_find_irk_by_addr(hdev, &params->addr,
729 params->addr_type)) {
730 /* White list can not be used with RPAs */
731 return 0x00;
732 }
733
734 white_list_entries++;
735 add_to_white_list(req, params);
736 }
737
738 /* After adding all new pending connections, walk through
739 * the list of pending reports and also add these to the
740 * white list if there is still space.
741 */
742 list_for_each_entry(params, &hdev->pend_le_reports, action) {
743 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
744 &params->addr, params->addr_type))
745 continue;
746
747 if (white_list_entries >= hdev->le_white_list_size) {
748 /* Select filter policy to accept all advertising */
749 return 0x00;
750 }
751
752 if (hci_find_irk_by_addr(hdev, &params->addr,
753 params->addr_type)) {
754 /* White list can not be used with RPAs */
755 return 0x00;
756 }
757
758 white_list_entries++;
759 add_to_white_list(req, params);
760 }
761
762 /* Select filter policy to use white list */
763 return 0x01;
764}
765
766void hci_req_add_le_passive_scan(struct hci_request *req)
767{
768 struct hci_cp_le_set_scan_param param_cp;
769 struct hci_cp_le_set_scan_enable enable_cp;
770 struct hci_dev *hdev = req->hdev;
771 u8 own_addr_type;
772 u8 filter_policy;
773
774 /* Set require_privacy to false since no SCAN_REQ are send
775 * during passive scanning. Not using an non-resolvable address
776 * here is important so that peer devices using direct
777 * advertising with our address will be correctly reported
778 * by the controller.
779 */
780 if (hci_update_random_address(req, false, &own_addr_type))
781 return;
782
783 /* Adding or removing entries from the white list must
784 * happen before enabling scanning. The controller does
785 * not allow white list modification while scanning.
786 */
787 filter_policy = update_white_list(req);
788
789 /* When the controller is using random resolvable addresses and
790 * with that having LE privacy enabled, then controllers with
791 * Extended Scanner Filter Policies support can now enable support
792 * for handling directed advertising.
793 *
794 * So instead of using filter polices 0x00 (no whitelist)
795 * and 0x01 (whitelist enabled) use the new filter policies
796 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
797 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700798 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200799 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
800 filter_policy |= 0x02;
801
802 memset(&param_cp, 0, sizeof(param_cp));
803 param_cp.type = LE_SCAN_PASSIVE;
804 param_cp.interval = cpu_to_le16(hdev->le_scan_interval);
805 param_cp.window = cpu_to_le16(hdev->le_scan_window);
806 param_cp.own_address_type = own_addr_type;
807 param_cp.filter_policy = filter_policy;
808 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
809 &param_cp);
810
811 memset(&enable_cp, 0, sizeof(enable_cp));
812 enable_cp.enable = LE_SCAN_ENABLE;
813 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
814 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
815 &enable_cp);
816}
817
Johan Hedbergf2252572015-11-18 12:49:20 +0200818static u8 get_current_adv_instance(struct hci_dev *hdev)
819{
820 /* The "Set Advertising" setting supersedes the "Add Advertising"
821 * setting. Here we set the advertising data based on which
822 * setting was set. When neither apply, default to the global settings,
823 * represented by instance "0".
824 */
Johan Hedberg17fd08f2015-11-26 12:15:59 +0200825 if (!list_empty(&hdev->adv_instances) &&
Johan Hedbergf2252572015-11-18 12:49:20 +0200826 !hci_dev_test_flag(hdev, HCI_ADVERTISING))
827 return hdev->cur_adv_instance;
828
829 return 0x00;
830}
831
832static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
833{
834 u8 instance = get_current_adv_instance(hdev);
835 struct adv_info *adv_instance;
836
837 /* Ignore instance 0 */
838 if (instance == 0x00)
839 return 0;
840
841 adv_instance = hci_find_adv_instance(hdev, instance);
842 if (!adv_instance)
843 return 0;
844
845 /* TODO: Take into account the "appearance" and "local-name" flags here.
846 * These are currently being ignored as they are not supported.
847 */
848 return adv_instance->scan_rsp_len;
849}
850
851void __hci_req_disable_advertising(struct hci_request *req)
852{
853 u8 enable = 0x00;
854
855 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
856}
857
858static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
859{
860 u32 flags;
861 struct adv_info *adv_instance;
862
863 if (instance == 0x00) {
864 /* Instance 0 always manages the "Tx Power" and "Flags"
865 * fields
866 */
867 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
868
869 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
870 * corresponds to the "connectable" instance flag.
871 */
872 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
873 flags |= MGMT_ADV_FLAG_CONNECTABLE;
874
875 return flags;
876 }
877
878 adv_instance = hci_find_adv_instance(hdev, instance);
879
880 /* Return 0 when we got an invalid instance identifier. */
881 if (!adv_instance)
882 return 0;
883
884 return adv_instance->flags;
885}
886
887void __hci_req_enable_advertising(struct hci_request *req)
888{
889 struct hci_dev *hdev = req->hdev;
890 struct hci_cp_le_set_adv_param cp;
891 u8 own_addr_type, enable = 0x01;
892 bool connectable;
893 u8 instance;
894 u32 flags;
895
896 if (hci_conn_num(hdev, LE_LINK) > 0)
897 return;
898
899 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
900 __hci_req_disable_advertising(req);
901
902 /* Clear the HCI_LE_ADV bit temporarily so that the
903 * hci_update_random_address knows that it's safe to go ahead
904 * and write a new random address. The flag will be set back on
905 * as soon as the SET_ADV_ENABLE HCI command completes.
906 */
907 hci_dev_clear_flag(hdev, HCI_LE_ADV);
908
909 instance = get_current_adv_instance(hdev);
910 flags = get_adv_instance_flags(hdev, instance);
911
912 /* If the "connectable" instance flag was not set, then choose between
913 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
914 */
915 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
916 mgmt_get_connectable(hdev);
917
918 /* Set require_privacy to true only when non-connectable
919 * advertising is used. In that case it is fine to use a
920 * non-resolvable private address.
921 */
922 if (hci_update_random_address(req, !connectable, &own_addr_type) < 0)
923 return;
924
925 memset(&cp, 0, sizeof(cp));
926 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
927 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
928
929 if (connectable)
930 cp.type = LE_ADV_IND;
931 else if (get_cur_adv_instance_scan_rsp_len(hdev))
932 cp.type = LE_ADV_SCAN_IND;
933 else
934 cp.type = LE_ADV_NONCONN_IND;
935
936 cp.own_address_type = own_addr_type;
937 cp.channel_map = hdev->le_adv_channel_map;
938
939 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
940
941 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
942}
943
944static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
945{
946 u8 ad_len = 0;
947 size_t name_len;
948
949 name_len = strlen(hdev->dev_name);
950 if (name_len > 0) {
951 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
952
953 if (name_len > max_len) {
954 name_len = max_len;
955 ptr[1] = EIR_NAME_SHORT;
956 } else
957 ptr[1] = EIR_NAME_COMPLETE;
958
959 ptr[0] = name_len + 1;
960
961 memcpy(ptr + 2, hdev->dev_name, name_len);
962
963 ad_len += (name_len + 2);
964 ptr += (name_len + 2);
965 }
966
967 return ad_len;
968}
969
970static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
971 u8 *ptr)
972{
973 struct adv_info *adv_instance;
974
975 adv_instance = hci_find_adv_instance(hdev, instance);
976 if (!adv_instance)
977 return 0;
978
979 /* TODO: Set the appropriate entries based on advertising instance flags
980 * here once flags other than 0 are supported.
981 */
982 memcpy(ptr, adv_instance->scan_rsp_data,
983 adv_instance->scan_rsp_len);
984
985 return adv_instance->scan_rsp_len;
986}
987
988static void update_inst_scan_rsp_data(struct hci_request *req, u8 instance)
989{
990 struct hci_dev *hdev = req->hdev;
991 struct hci_cp_le_set_scan_rsp_data cp;
992 u8 len;
993
994 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
995 return;
996
997 memset(&cp, 0, sizeof(cp));
998
999 if (instance)
1000 len = create_instance_scan_rsp_data(hdev, instance, cp.data);
1001 else
1002 len = create_default_scan_rsp_data(hdev, cp.data);
1003
1004 if (hdev->scan_rsp_data_len == len &&
1005 !memcmp(cp.data, hdev->scan_rsp_data, len))
1006 return;
1007
1008 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1009 hdev->scan_rsp_data_len = len;
1010
1011 cp.length = len;
1012
1013 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1014}
1015
1016void __hci_req_update_scan_rsp_data(struct hci_request *req, int instance)
1017{
1018 if (instance == HCI_ADV_CURRENT)
1019 instance = get_current_adv_instance(req->hdev);
1020
1021 update_inst_scan_rsp_data(req, get_current_adv_instance(req->hdev));
1022}
1023
1024static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1025{
1026 struct adv_info *adv_instance = NULL;
1027 u8 ad_len = 0, flags = 0;
1028 u32 instance_flags;
1029
1030 /* Return 0 when the current instance identifier is invalid. */
1031 if (instance) {
1032 adv_instance = hci_find_adv_instance(hdev, instance);
1033 if (!adv_instance)
1034 return 0;
1035 }
1036
1037 instance_flags = get_adv_instance_flags(hdev, instance);
1038
1039 /* The Add Advertising command allows userspace to set both the general
1040 * and limited discoverable flags.
1041 */
1042 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1043 flags |= LE_AD_GENERAL;
1044
1045 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1046 flags |= LE_AD_LIMITED;
1047
1048 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1049 /* If a discovery flag wasn't provided, simply use the global
1050 * settings.
1051 */
1052 if (!flags)
1053 flags |= mgmt_get_adv_discov_flags(hdev);
1054
1055 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1056 flags |= LE_AD_NO_BREDR;
1057
1058 /* If flags would still be empty, then there is no need to
1059 * include the "Flags" AD field".
1060 */
1061 if (flags) {
1062 ptr[0] = 0x02;
1063 ptr[1] = EIR_FLAGS;
1064 ptr[2] = flags;
1065
1066 ad_len += 3;
1067 ptr += 3;
1068 }
1069 }
1070
1071 if (adv_instance) {
1072 memcpy(ptr, adv_instance->adv_data,
1073 adv_instance->adv_data_len);
1074 ad_len += adv_instance->adv_data_len;
1075 ptr += adv_instance->adv_data_len;
1076 }
1077
1078 /* Provide Tx Power only if we can provide a valid value for it */
1079 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID &&
1080 (instance_flags & MGMT_ADV_FLAG_TX_POWER)) {
1081 ptr[0] = 0x02;
1082 ptr[1] = EIR_TX_POWER;
1083 ptr[2] = (u8)hdev->adv_tx_power;
1084
1085 ad_len += 3;
1086 ptr += 3;
1087 }
1088
1089 return ad_len;
1090}
1091
1092static void update_inst_adv_data(struct hci_request *req, u8 instance)
1093{
1094 struct hci_dev *hdev = req->hdev;
1095 struct hci_cp_le_set_adv_data cp;
1096 u8 len;
1097
1098 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1099 return;
1100
1101 memset(&cp, 0, sizeof(cp));
1102
1103 len = create_instance_adv_data(hdev, instance, cp.data);
1104
1105 /* There's nothing to do if the data hasn't changed */
1106 if (hdev->adv_data_len == len &&
1107 memcmp(cp.data, hdev->adv_data, len) == 0)
1108 return;
1109
1110 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1111 hdev->adv_data_len = len;
1112
1113 cp.length = len;
1114
1115 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1116}
1117
1118void __hci_req_update_adv_data(struct hci_request *req, int instance)
1119{
1120 if (instance == HCI_ADV_CURRENT)
1121 instance = get_current_adv_instance(req->hdev);
1122
1123 update_inst_adv_data(req, instance);
1124}
1125
1126int hci_req_update_adv_data(struct hci_dev *hdev, int instance)
1127{
1128 struct hci_request req;
1129
1130 hci_req_init(&req, hdev);
1131 __hci_req_update_adv_data(&req, instance);
1132
1133 return hci_req_run(&req, NULL);
1134}
1135
1136static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1137{
1138 BT_DBG("%s status %u", hdev->name, status);
1139}
1140
1141void hci_req_reenable_advertising(struct hci_dev *hdev)
1142{
1143 struct hci_request req;
1144 u8 instance;
1145
1146 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001147 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001148 return;
1149
1150 instance = get_current_adv_instance(hdev);
1151
1152 hci_req_init(&req, hdev);
1153
1154 if (instance) {
1155 __hci_req_schedule_adv_instance(&req, instance, true);
1156 } else {
1157 __hci_req_update_adv_data(&req, HCI_ADV_CURRENT);
1158 __hci_req_update_scan_rsp_data(&req, HCI_ADV_CURRENT);
1159 __hci_req_enable_advertising(&req);
1160 }
1161
1162 hci_req_run(&req, adv_enable_complete);
1163}
1164
1165static void adv_timeout_expire(struct work_struct *work)
1166{
1167 struct hci_dev *hdev = container_of(work, struct hci_dev,
1168 adv_instance_expire.work);
1169
1170 struct hci_request req;
1171 u8 instance;
1172
1173 BT_DBG("%s", hdev->name);
1174
1175 hci_dev_lock(hdev);
1176
1177 hdev->adv_instance_timeout = 0;
1178
1179 instance = get_current_adv_instance(hdev);
1180 if (instance == 0x00)
1181 goto unlock;
1182
1183 hci_req_init(&req, hdev);
1184
1185 hci_req_clear_adv_instance(hdev, &req, instance, false);
1186
1187 if (list_empty(&hdev->adv_instances))
1188 __hci_req_disable_advertising(&req);
1189
1190 if (!skb_queue_empty(&req.cmd_q))
1191 hci_req_run(&req, NULL);
1192
1193unlock:
1194 hci_dev_unlock(hdev);
1195}
1196
1197int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1198 bool force)
1199{
1200 struct hci_dev *hdev = req->hdev;
1201 struct adv_info *adv_instance = NULL;
1202 u16 timeout;
1203
1204 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001205 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001206 return -EPERM;
1207
1208 if (hdev->adv_instance_timeout)
1209 return -EBUSY;
1210
1211 adv_instance = hci_find_adv_instance(hdev, instance);
1212 if (!adv_instance)
1213 return -ENOENT;
1214
1215 /* A zero timeout means unlimited advertising. As long as there is
1216 * only one instance, duration should be ignored. We still set a timeout
1217 * in case further instances are being added later on.
1218 *
1219 * If the remaining lifetime of the instance is more than the duration
1220 * then the timeout corresponds to the duration, otherwise it will be
1221 * reduced to the remaining instance lifetime.
1222 */
1223 if (adv_instance->timeout == 0 ||
1224 adv_instance->duration <= adv_instance->remaining_time)
1225 timeout = adv_instance->duration;
1226 else
1227 timeout = adv_instance->remaining_time;
1228
1229 /* The remaining time is being reduced unless the instance is being
1230 * advertised without time limit.
1231 */
1232 if (adv_instance->timeout)
1233 adv_instance->remaining_time =
1234 adv_instance->remaining_time - timeout;
1235
1236 hdev->adv_instance_timeout = timeout;
1237 queue_delayed_work(hdev->req_workqueue,
1238 &hdev->adv_instance_expire,
1239 msecs_to_jiffies(timeout * 1000));
1240
1241 /* If we're just re-scheduling the same instance again then do not
1242 * execute any HCI commands. This happens when a single instance is
1243 * being advertised.
1244 */
1245 if (!force && hdev->cur_adv_instance == instance &&
1246 hci_dev_test_flag(hdev, HCI_LE_ADV))
1247 return 0;
1248
1249 hdev->cur_adv_instance = instance;
1250 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1251 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
1252 __hci_req_enable_advertising(req);
1253
1254 return 0;
1255}
1256
1257static void cancel_adv_timeout(struct hci_dev *hdev)
1258{
1259 if (hdev->adv_instance_timeout) {
1260 hdev->adv_instance_timeout = 0;
1261 cancel_delayed_work(&hdev->adv_instance_expire);
1262 }
1263}
1264
1265/* For a single instance:
1266 * - force == true: The instance will be removed even when its remaining
1267 * lifetime is not zero.
1268 * - force == false: the instance will be deactivated but kept stored unless
1269 * the remaining lifetime is zero.
1270 *
1271 * For instance == 0x00:
1272 * - force == true: All instances will be removed regardless of their timeout
1273 * setting.
1274 * - force == false: Only instances that have a timeout will be removed.
1275 */
1276void hci_req_clear_adv_instance(struct hci_dev *hdev, struct hci_request *req,
1277 u8 instance, bool force)
1278{
1279 struct adv_info *adv_instance, *n, *next_instance = NULL;
1280 int err;
1281 u8 rem_inst;
1282
1283 /* Cancel any timeout concerning the removed instance(s). */
1284 if (!instance || hdev->cur_adv_instance == instance)
1285 cancel_adv_timeout(hdev);
1286
1287 /* Get the next instance to advertise BEFORE we remove
1288 * the current one. This can be the same instance again
1289 * if there is only one instance.
1290 */
1291 if (instance && hdev->cur_adv_instance == instance)
1292 next_instance = hci_get_next_instance(hdev, instance);
1293
1294 if (instance == 0x00) {
1295 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1296 list) {
1297 if (!(force || adv_instance->timeout))
1298 continue;
1299
1300 rem_inst = adv_instance->instance;
1301 err = hci_remove_adv_instance(hdev, rem_inst);
1302 if (!err)
1303 mgmt_advertising_removed(NULL, hdev, rem_inst);
1304 }
1305 hdev->cur_adv_instance = 0x00;
1306 } else {
1307 adv_instance = hci_find_adv_instance(hdev, instance);
1308
1309 if (force || (adv_instance && adv_instance->timeout &&
1310 !adv_instance->remaining_time)) {
1311 /* Don't advertise a removed instance. */
1312 if (next_instance &&
1313 next_instance->instance == instance)
1314 next_instance = NULL;
1315
1316 err = hci_remove_adv_instance(hdev, instance);
1317 if (!err)
1318 mgmt_advertising_removed(NULL, hdev, instance);
1319 }
1320 }
1321
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001322 if (list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001323 hdev->cur_adv_instance = 0x00;
Johan Hedbergf2252572015-11-18 12:49:20 +02001324
1325 if (!req || !hdev_is_powered(hdev) ||
1326 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1327 return;
1328
1329 if (next_instance)
1330 __hci_req_schedule_adv_instance(req, next_instance->instance,
1331 false);
1332}
1333
Johan Hedberg0857dd32014-12-19 13:40:20 +02001334static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1335{
1336 struct hci_dev *hdev = req->hdev;
1337
1338 /* If we're advertising or initiating an LE connection we can't
1339 * go ahead and change the random address at this time. This is
1340 * because the eventual initiator address used for the
1341 * subsequently created connection will be undefined (some
1342 * controllers use the new address and others the one we had
1343 * when the operation started).
1344 *
1345 * In this kind of scenario skip the update and let the random
1346 * address be updated at the next cycle.
1347 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001348 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001349 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001350 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001351 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001352 return;
1353 }
1354
1355 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1356}
1357
1358int hci_update_random_address(struct hci_request *req, bool require_privacy,
1359 u8 *own_addr_type)
1360{
1361 struct hci_dev *hdev = req->hdev;
1362 int err;
1363
1364 /* If privacy is enabled use a resolvable private address. If
1365 * current RPA has expired or there is something else than
1366 * the current RPA in use, then generate a new one.
1367 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001368 if (hci_dev_test_flag(hdev, HCI_PRIVACY)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001369 int to;
1370
1371 *own_addr_type = ADDR_LE_DEV_RANDOM;
1372
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001373 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001374 !bacmp(&hdev->random_addr, &hdev->rpa))
1375 return 0;
1376
1377 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1378 if (err < 0) {
1379 BT_ERR("%s failed to generate new RPA", hdev->name);
1380 return err;
1381 }
1382
1383 set_random_addr(req, &hdev->rpa);
1384
1385 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1386 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1387
1388 return 0;
1389 }
1390
1391 /* In case of required privacy without resolvable private address,
1392 * use an non-resolvable private address. This is useful for active
1393 * scanning and non-connectable advertising.
1394 */
1395 if (require_privacy) {
1396 bdaddr_t nrpa;
1397
1398 while (true) {
1399 /* The non-resolvable private address is generated
1400 * from random six bytes with the two most significant
1401 * bits cleared.
1402 */
1403 get_random_bytes(&nrpa, 6);
1404 nrpa.b[5] &= 0x3f;
1405
1406 /* The non-resolvable private address shall not be
1407 * equal to the public address.
1408 */
1409 if (bacmp(&hdev->bdaddr, &nrpa))
1410 break;
1411 }
1412
1413 *own_addr_type = ADDR_LE_DEV_RANDOM;
1414 set_random_addr(req, &nrpa);
1415 return 0;
1416 }
1417
1418 /* If forcing static address is in use or there is no public
1419 * address use the static address as random address (but skip
1420 * the HCI command if the current random address is already the
1421 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001422 *
1423 * In case BR/EDR has been disabled on a dual-mode controller
1424 * and a static address has been configured, then use that
1425 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001426 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001427 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001428 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001429 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001430 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001431 *own_addr_type = ADDR_LE_DEV_RANDOM;
1432 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1433 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1434 &hdev->static_addr);
1435 return 0;
1436 }
1437
1438 /* Neither privacy nor static address is being used so use a
1439 * public address.
1440 */
1441 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1442
1443 return 0;
1444}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001445
Johan Hedberg405a2612014-12-19 23:18:22 +02001446static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1447{
1448 struct bdaddr_list *b;
1449
1450 list_for_each_entry(b, &hdev->whitelist, list) {
1451 struct hci_conn *conn;
1452
1453 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1454 if (!conn)
1455 return true;
1456
1457 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1458 return true;
1459 }
1460
1461 return false;
1462}
1463
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001464void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001465{
1466 struct hci_dev *hdev = req->hdev;
1467 u8 scan;
1468
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001469 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001470 return;
1471
1472 if (!hdev_is_powered(hdev))
1473 return;
1474
1475 if (mgmt_powering_down(hdev))
1476 return;
1477
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001478 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001479 disconnected_whitelist_entries(hdev))
1480 scan = SCAN_PAGE;
1481 else
1482 scan = SCAN_DISABLED;
1483
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001484 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001485 scan |= SCAN_INQUIRY;
1486
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001487 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1488 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1489 return;
1490
Johan Hedberg405a2612014-12-19 23:18:22 +02001491 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1492}
1493
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001494static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001495{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001496 hci_dev_lock(req->hdev);
1497 __hci_req_update_scan(req);
1498 hci_dev_unlock(req->hdev);
1499 return 0;
1500}
Johan Hedberg405a2612014-12-19 23:18:22 +02001501
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001502static void scan_update_work(struct work_struct *work)
1503{
1504 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1505
1506 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001507}
1508
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001509static int connectable_update(struct hci_request *req, unsigned long opt)
1510{
1511 struct hci_dev *hdev = req->hdev;
1512
1513 hci_dev_lock(hdev);
1514
1515 __hci_req_update_scan(req);
1516
1517 /* If BR/EDR is not enabled and we disable advertising as a
1518 * by-product of disabling connectable, we need to update the
1519 * advertising flags.
1520 */
1521 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1522 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1523
1524 /* Update the advertising parameters if necessary */
1525 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001526 !list_empty(&hdev->adv_instances))
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001527 __hci_req_enable_advertising(req);
1528
1529 __hci_update_background_scan(req);
1530
1531 hci_dev_unlock(hdev);
1532
1533 return 0;
1534}
1535
1536static void connectable_update_work(struct work_struct *work)
1537{
1538 struct hci_dev *hdev = container_of(work, struct hci_dev,
1539 connectable_update);
1540 u8 status;
1541
1542 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1543 mgmt_set_connectable_complete(hdev, status);
1544}
1545
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001546static u8 get_service_classes(struct hci_dev *hdev)
1547{
1548 struct bt_uuid *uuid;
1549 u8 val = 0;
1550
1551 list_for_each_entry(uuid, &hdev->uuids, list)
1552 val |= uuid->svc_hint;
1553
1554 return val;
1555}
1556
1557void __hci_req_update_class(struct hci_request *req)
1558{
1559 struct hci_dev *hdev = req->hdev;
1560 u8 cod[3];
1561
1562 BT_DBG("%s", hdev->name);
1563
1564 if (!hdev_is_powered(hdev))
1565 return;
1566
1567 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1568 return;
1569
1570 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1571 return;
1572
1573 cod[0] = hdev->minor_class;
1574 cod[1] = hdev->major_class;
1575 cod[2] = get_service_classes(hdev);
1576
1577 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1578 cod[1] |= 0x20;
1579
1580 if (memcmp(cod, hdev->dev_class, 3) == 0)
1581 return;
1582
1583 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1584}
1585
Johan Hedbergaed1a882015-11-22 17:24:44 +03001586static void write_iac(struct hci_request *req)
1587{
1588 struct hci_dev *hdev = req->hdev;
1589 struct hci_cp_write_current_iac_lap cp;
1590
1591 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1592 return;
1593
1594 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1595 /* Limited discoverable mode */
1596 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1597 cp.iac_lap[0] = 0x00; /* LIAC */
1598 cp.iac_lap[1] = 0x8b;
1599 cp.iac_lap[2] = 0x9e;
1600 cp.iac_lap[3] = 0x33; /* GIAC */
1601 cp.iac_lap[4] = 0x8b;
1602 cp.iac_lap[5] = 0x9e;
1603 } else {
1604 /* General discoverable mode */
1605 cp.num_iac = 1;
1606 cp.iac_lap[0] = 0x33; /* GIAC */
1607 cp.iac_lap[1] = 0x8b;
1608 cp.iac_lap[2] = 0x9e;
1609 }
1610
1611 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1612 (cp.num_iac * 3) + 1, &cp);
1613}
1614
1615static int discoverable_update(struct hci_request *req, unsigned long opt)
1616{
1617 struct hci_dev *hdev = req->hdev;
1618
1619 hci_dev_lock(hdev);
1620
1621 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1622 write_iac(req);
1623 __hci_req_update_scan(req);
1624 __hci_req_update_class(req);
1625 }
1626
1627 /* Advertising instances don't use the global discoverable setting, so
1628 * only update AD if advertising was enabled using Set Advertising.
1629 */
1630 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
1631 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
1632
1633 hci_dev_unlock(hdev);
1634
1635 return 0;
1636}
1637
1638static void discoverable_update_work(struct work_struct *work)
1639{
1640 struct hci_dev *hdev = container_of(work, struct hci_dev,
1641 discoverable_update);
1642 u8 status;
1643
1644 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1645 mgmt_set_discoverable_complete(hdev, status);
1646}
1647
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001648void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1649 u8 reason)
1650{
1651 switch (conn->state) {
1652 case BT_CONNECTED:
1653 case BT_CONFIG:
1654 if (conn->type == AMP_LINK) {
1655 struct hci_cp_disconn_phy_link cp;
1656
1657 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1658 cp.reason = reason;
1659 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1660 &cp);
1661 } else {
1662 struct hci_cp_disconnect dc;
1663
1664 dc.handle = cpu_to_le16(conn->handle);
1665 dc.reason = reason;
1666 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
1667 }
1668
1669 conn->state = BT_DISCONN;
1670
1671 break;
1672 case BT_CONNECT:
1673 if (conn->type == LE_LINK) {
1674 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
1675 break;
1676 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
1677 0, NULL);
1678 } else if (conn->type == ACL_LINK) {
1679 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
1680 break;
1681 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
1682 6, &conn->dst);
1683 }
1684 break;
1685 case BT_CONNECT2:
1686 if (conn->type == ACL_LINK) {
1687 struct hci_cp_reject_conn_req rej;
1688
1689 bacpy(&rej.bdaddr, &conn->dst);
1690 rej.reason = reason;
1691
1692 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
1693 sizeof(rej), &rej);
1694 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
1695 struct hci_cp_reject_sync_conn_req rej;
1696
1697 bacpy(&rej.bdaddr, &conn->dst);
1698
1699 /* SCO rejection has its own limited set of
1700 * allowed error values (0x0D-0x0F) which isn't
1701 * compatible with most values passed to this
1702 * function. To be safe hard-code one of the
1703 * values that's suitable for SCO.
1704 */
1705 rej.reason = HCI_ERROR_REMOTE_LOW_RESOURCES;
1706
1707 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
1708 sizeof(rej), &rej);
1709 }
1710 break;
1711 default:
1712 conn->state = BT_CLOSED;
1713 break;
1714 }
1715}
1716
1717static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1718{
1719 if (status)
1720 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
1721}
1722
1723int hci_abort_conn(struct hci_conn *conn, u8 reason)
1724{
1725 struct hci_request req;
1726 int err;
1727
1728 hci_req_init(&req, conn->hdev);
1729
1730 __hci_abort_conn(&req, conn, reason);
1731
1732 err = hci_req_run(&req, abort_conn_complete);
1733 if (err && err != -ENODATA) {
1734 BT_ERR("Failed to run HCI request: err %d", err);
1735 return err;
1736 }
1737
1738 return 0;
1739}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02001740
Johan Hedberga1d01db2015-11-11 08:11:25 +02001741static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02001742{
1743 hci_dev_lock(req->hdev);
1744 __hci_update_background_scan(req);
1745 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001746 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001747}
1748
1749static void bg_scan_update(struct work_struct *work)
1750{
1751 struct hci_dev *hdev = container_of(work, struct hci_dev,
1752 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02001753 struct hci_conn *conn;
1754 u8 status;
1755 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02001756
Johan Hedberg84235d22015-11-11 08:11:20 +02001757 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
1758 if (!err)
1759 return;
1760
1761 hci_dev_lock(hdev);
1762
1763 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
1764 if (conn)
1765 hci_le_conn_failed(conn, status);
1766
1767 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02001768}
1769
Johan Hedberga1d01db2015-11-11 08:11:25 +02001770static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001771{
1772 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02001773 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001774}
1775
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001776static int bredr_inquiry(struct hci_request *req, unsigned long opt)
1777{
1778 u8 length = opt;
1779 /* General inquiry access code (GIAC) */
1780 u8 lap[3] = { 0x33, 0x8b, 0x9e };
1781 struct hci_cp_inquiry cp;
1782
1783 BT_DBG("%s", req->hdev->name);
1784
1785 hci_dev_lock(req->hdev);
1786 hci_inquiry_cache_flush(req->hdev);
1787 hci_dev_unlock(req->hdev);
1788
1789 memset(&cp, 0, sizeof(cp));
1790 memcpy(&cp.lap, lap, sizeof(cp.lap));
1791 cp.length = length;
1792
1793 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
1794
1795 return 0;
1796}
1797
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001798static void le_scan_disable_work(struct work_struct *work)
1799{
1800 struct hci_dev *hdev = container_of(work, struct hci_dev,
1801 le_scan_disable.work);
1802 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001803
1804 BT_DBG("%s", hdev->name);
1805
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001806 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001807 return;
1808
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02001809 cancel_delayed_work(&hdev->le_scan_restart);
1810
1811 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
1812 if (status) {
1813 BT_ERR("Failed to disable LE scan: status 0x%02x", status);
1814 return;
1815 }
1816
1817 hdev->discovery.scan_start = 0;
1818
1819 /* If we were running LE only scan, change discovery state. If
1820 * we were running both LE and BR/EDR inquiry simultaneously,
1821 * and BR/EDR inquiry is already finished, stop discovery,
1822 * otherwise BR/EDR inquiry will stop discovery when finished.
1823 * If we will resolve remote device name, do not change
1824 * discovery state.
1825 */
1826
1827 if (hdev->discovery.type == DISCOV_TYPE_LE)
1828 goto discov_stopped;
1829
1830 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
1831 return;
1832
1833 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
1834 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
1835 hdev->discovery.state != DISCOVERY_RESOLVING)
1836 goto discov_stopped;
1837
1838 return;
1839 }
1840
1841 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
1842 HCI_CMD_TIMEOUT, &status);
1843 if (status) {
1844 BT_ERR("Inquiry failed: status 0x%02x", status);
1845 goto discov_stopped;
1846 }
1847
1848 return;
1849
1850discov_stopped:
1851 hci_dev_lock(hdev);
1852 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
1853 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001854}
1855
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001856static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001857{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001858 struct hci_dev *hdev = req->hdev;
1859 struct hci_cp_le_set_scan_enable cp;
1860
1861 /* If controller is not scanning we are done. */
1862 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
1863 return 0;
1864
1865 hci_req_add_le_scan_disable(req);
1866
1867 memset(&cp, 0, sizeof(cp));
1868 cp.enable = LE_SCAN_ENABLE;
1869 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1870 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1871
1872 return 0;
1873}
1874
1875static void le_scan_restart_work(struct work_struct *work)
1876{
1877 struct hci_dev *hdev = container_of(work, struct hci_dev,
1878 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001879 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001880 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001881
1882 BT_DBG("%s", hdev->name);
1883
Johan Hedberg3dfe5902015-11-11 12:24:23 +02001884 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02001885 if (status) {
1886 BT_ERR("Failed to restart LE scan: status %d", status);
1887 return;
1888 }
1889
1890 hci_dev_lock(hdev);
1891
1892 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
1893 !hdev->discovery.scan_start)
1894 goto unlock;
1895
1896 /* When the scan was started, hdev->le_scan_disable has been queued
1897 * after duration from scan_start. During scan restart this job
1898 * has been canceled, and we need to queue it again after proper
1899 * timeout, to make sure that scan does not run indefinitely.
1900 */
1901 duration = hdev->discovery.scan_duration;
1902 scan_start = hdev->discovery.scan_start;
1903 now = jiffies;
1904 if (now - scan_start <= duration) {
1905 int elapsed;
1906
1907 if (now >= scan_start)
1908 elapsed = now - scan_start;
1909 else
1910 elapsed = ULONG_MAX - scan_start + now;
1911
1912 timeout = duration - elapsed;
1913 } else {
1914 timeout = 0;
1915 }
1916
1917 queue_delayed_work(hdev->req_workqueue,
1918 &hdev->le_scan_disable, timeout);
1919
1920unlock:
1921 hci_dev_unlock(hdev);
1922}
1923
Johan Hedberge68f0722015-11-11 08:30:30 +02001924static void disable_advertising(struct hci_request *req)
1925{
1926 u8 enable = 0x00;
1927
1928 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1929}
1930
1931static int active_scan(struct hci_request *req, unsigned long opt)
1932{
1933 uint16_t interval = opt;
1934 struct hci_dev *hdev = req->hdev;
1935 struct hci_cp_le_set_scan_param param_cp;
1936 struct hci_cp_le_set_scan_enable enable_cp;
1937 u8 own_addr_type;
1938 int err;
1939
1940 BT_DBG("%s", hdev->name);
1941
1942 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
1943 hci_dev_lock(hdev);
1944
1945 /* Don't let discovery abort an outgoing connection attempt
1946 * that's using directed advertising.
1947 */
1948 if (hci_lookup_le_connect(hdev)) {
1949 hci_dev_unlock(hdev);
1950 return -EBUSY;
1951 }
1952
1953 cancel_adv_timeout(hdev);
1954 hci_dev_unlock(hdev);
1955
1956 disable_advertising(req);
1957 }
1958
1959 /* If controller is scanning, it means the background scanning is
1960 * running. Thus, we should temporarily stop it in order to set the
1961 * discovery scanning parameters.
1962 */
1963 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
1964 hci_req_add_le_scan_disable(req);
1965
1966 /* All active scans will be done with either a resolvable private
1967 * address (when privacy feature has been enabled) or non-resolvable
1968 * private address.
1969 */
1970 err = hci_update_random_address(req, true, &own_addr_type);
1971 if (err < 0)
1972 own_addr_type = ADDR_LE_DEV_PUBLIC;
1973
1974 memset(&param_cp, 0, sizeof(param_cp));
1975 param_cp.type = LE_SCAN_ACTIVE;
1976 param_cp.interval = cpu_to_le16(interval);
1977 param_cp.window = cpu_to_le16(DISCOV_LE_SCAN_WIN);
1978 param_cp.own_address_type = own_addr_type;
1979
1980 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
1981 &param_cp);
1982
1983 memset(&enable_cp, 0, sizeof(enable_cp));
1984 enable_cp.enable = LE_SCAN_ENABLE;
1985 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
1986
1987 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
1988 &enable_cp);
1989
1990 return 0;
1991}
1992
1993static int interleaved_discov(struct hci_request *req, unsigned long opt)
1994{
1995 int err;
1996
1997 BT_DBG("%s", req->hdev->name);
1998
1999 err = active_scan(req, opt);
2000 if (err)
2001 return err;
2002
Johan Hedberg7df26b52015-11-11 12:24:21 +02002003 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002004}
2005
2006static void start_discovery(struct hci_dev *hdev, u8 *status)
2007{
2008 unsigned long timeout;
2009
2010 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2011
2012 switch (hdev->discovery.type) {
2013 case DISCOV_TYPE_BREDR:
2014 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002015 hci_req_sync(hdev, bredr_inquiry,
2016 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002017 status);
2018 return;
2019 case DISCOV_TYPE_INTERLEAVED:
2020 /* When running simultaneous discovery, the LE scanning time
2021 * should occupy the whole discovery time sine BR/EDR inquiry
2022 * and LE scanning are scheduled by the controller.
2023 *
2024 * For interleaving discovery in comparison, BR/EDR inquiry
2025 * and LE scanning are done sequentially with separate
2026 * timeouts.
2027 */
2028 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2029 &hdev->quirks)) {
2030 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2031 /* During simultaneous discovery, we double LE scan
2032 * interval. We must leave some time for the controller
2033 * to do BR/EDR inquiry.
2034 */
2035 hci_req_sync(hdev, interleaved_discov,
2036 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2037 status);
2038 break;
2039 }
2040
2041 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2042 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2043 HCI_CMD_TIMEOUT, status);
2044 break;
2045 case DISCOV_TYPE_LE:
2046 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2047 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2048 HCI_CMD_TIMEOUT, status);
2049 break;
2050 default:
2051 *status = HCI_ERROR_UNSPECIFIED;
2052 return;
2053 }
2054
2055 if (*status)
2056 return;
2057
2058 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2059
2060 /* When service discovery is used and the controller has a
2061 * strict duplicate filter, it is important to remember the
2062 * start and duration of the scan. This is required for
2063 * restarting scanning during the discovery phase.
2064 */
2065 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2066 hdev->discovery.result_filtering) {
2067 hdev->discovery.scan_start = jiffies;
2068 hdev->discovery.scan_duration = timeout;
2069 }
2070
2071 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2072 timeout);
2073}
2074
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002075bool hci_req_stop_discovery(struct hci_request *req)
2076{
2077 struct hci_dev *hdev = req->hdev;
2078 struct discovery_state *d = &hdev->discovery;
2079 struct hci_cp_remote_name_req_cancel cp;
2080 struct inquiry_entry *e;
2081 bool ret = false;
2082
2083 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2084
2085 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2086 if (test_bit(HCI_INQUIRY, &hdev->flags))
2087 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2088
2089 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2090 cancel_delayed_work(&hdev->le_scan_disable);
2091 hci_req_add_le_scan_disable(req);
2092 }
2093
2094 ret = true;
2095 } else {
2096 /* Passive scanning */
2097 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2098 hci_req_add_le_scan_disable(req);
2099 ret = true;
2100 }
2101 }
2102
2103 /* No further actions needed for LE-only discovery */
2104 if (d->type == DISCOV_TYPE_LE)
2105 return ret;
2106
2107 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2108 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2109 NAME_PENDING);
2110 if (!e)
2111 return ret;
2112
2113 bacpy(&cp.bdaddr, &e->data.bdaddr);
2114 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2115 &cp);
2116 ret = true;
2117 }
2118
2119 return ret;
2120}
2121
2122static int stop_discovery(struct hci_request *req, unsigned long opt)
2123{
2124 hci_dev_lock(req->hdev);
2125 hci_req_stop_discovery(req);
2126 hci_dev_unlock(req->hdev);
2127
2128 return 0;
2129}
2130
Johan Hedberge68f0722015-11-11 08:30:30 +02002131static void discov_update(struct work_struct *work)
2132{
2133 struct hci_dev *hdev = container_of(work, struct hci_dev,
2134 discov_update);
2135 u8 status = 0;
2136
2137 switch (hdev->discovery.state) {
2138 case DISCOVERY_STARTING:
2139 start_discovery(hdev, &status);
2140 mgmt_start_discovery_complete(hdev, status);
2141 if (status)
2142 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2143 else
2144 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2145 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002146 case DISCOVERY_STOPPING:
2147 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2148 mgmt_stop_discovery_complete(hdev, status);
2149 if (!status)
2150 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2151 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002152 case DISCOVERY_STOPPED:
2153 default:
2154 return;
2155 }
2156}
2157
Johan Hedbergc366f552015-11-23 15:43:06 +02002158static void discov_off(struct work_struct *work)
2159{
2160 struct hci_dev *hdev = container_of(work, struct hci_dev,
2161 discov_off.work);
2162
2163 BT_DBG("%s", hdev->name);
2164
2165 hci_dev_lock(hdev);
2166
2167 /* When discoverable timeout triggers, then just make sure
2168 * the limited discoverable flag is cleared. Even in the case
2169 * of a timeout triggered from general discoverable, it is
2170 * safe to unconditionally clear the flag.
2171 */
2172 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2173 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2174 hdev->discov_timeout = 0;
2175
2176 hci_dev_unlock(hdev);
2177
2178 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2179 mgmt_new_settings(hdev);
2180}
2181
Johan Hedberg2ff13892015-11-25 16:15:44 +02002182static int powered_update_hci(struct hci_request *req, unsigned long opt)
2183{
2184 struct hci_dev *hdev = req->hdev;
2185 struct adv_info *adv_instance;
2186 u8 link_sec;
2187
2188 hci_dev_lock(hdev);
2189
2190 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2191 !lmp_host_ssp_capable(hdev)) {
2192 u8 mode = 0x01;
2193
2194 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2195
2196 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2197 u8 support = 0x01;
2198
2199 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2200 sizeof(support), &support);
2201 }
2202 }
2203
2204 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2205 lmp_bredr_capable(hdev)) {
2206 struct hci_cp_write_le_host_supported cp;
2207
2208 cp.le = 0x01;
2209 cp.simul = 0x00;
2210
2211 /* Check first if we already have the right
2212 * host state (host features set)
2213 */
2214 if (cp.le != lmp_host_le_capable(hdev) ||
2215 cp.simul != lmp_host_le_br_capable(hdev))
2216 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2217 sizeof(cp), &cp);
2218 }
2219
2220 if (lmp_le_capable(hdev)) {
2221 /* Make sure the controller has a good default for
2222 * advertising data. This also applies to the case
2223 * where BR/EDR was toggled during the AUTO_OFF phase.
2224 */
2225 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2226 (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002227 list_empty(&hdev->adv_instances))) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002228 __hci_req_update_adv_data(req, HCI_ADV_CURRENT);
2229 __hci_req_update_scan_rsp_data(req, HCI_ADV_CURRENT);
2230 }
2231
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002232 if (hdev->cur_adv_instance == 0x00 &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02002233 !list_empty(&hdev->adv_instances)) {
2234 adv_instance = list_first_entry(&hdev->adv_instances,
2235 struct adv_info, list);
2236 hdev->cur_adv_instance = adv_instance->instance;
2237 }
2238
2239 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
2240 __hci_req_enable_advertising(req);
Johan Hedberg17fd08f2015-11-26 12:15:59 +02002241 else if (!list_empty(&hdev->adv_instances) &&
Johan Hedberg2ff13892015-11-25 16:15:44 +02002242 hdev->cur_adv_instance)
2243 __hci_req_schedule_adv_instance(req,
2244 hdev->cur_adv_instance,
2245 true);
2246 }
2247
2248 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2249 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2250 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2251 sizeof(link_sec), &link_sec);
2252
2253 if (lmp_bredr_capable(hdev)) {
2254 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2255 __hci_req_write_fast_connectable(req, true);
2256 else
2257 __hci_req_write_fast_connectable(req, false);
2258 __hci_req_update_scan(req);
2259 __hci_req_update_class(req);
2260 __hci_req_update_name(req);
2261 __hci_req_update_eir(req);
2262 }
2263
2264 hci_dev_unlock(hdev);
2265 return 0;
2266}
2267
2268int __hci_req_hci_power_on(struct hci_dev *hdev)
2269{
2270 /* Register the available SMP channels (BR/EDR and LE) only when
2271 * successfully powering on the controller. This late
2272 * registration is required so that LE SMP can clearly decide if
2273 * the public address or static address is used.
2274 */
2275 smp_register(hdev);
2276
2277 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2278 NULL);
2279}
2280
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002281void hci_request_setup(struct hci_dev *hdev)
2282{
Johan Hedberge68f0722015-11-11 08:30:30 +02002283 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002284 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002285 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002286 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002287 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002288 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002289 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2290 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002291 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002292}
2293
2294void hci_request_cancel_all(struct hci_dev *hdev)
2295{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002296 hci_req_sync_cancel(hdev, ENODEV);
2297
Johan Hedberge68f0722015-11-11 08:30:30 +02002298 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002299 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002300 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002301 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002302 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002303 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002304 cancel_delayed_work_sync(&hdev->le_scan_disable);
2305 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002306
2307 if (hdev->adv_instance_timeout) {
2308 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2309 hdev->adv_instance_timeout = 0;
2310 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002311}