blob: c41e9bb7818b98e95a7bc2e9c9bbcd5146486515 [file] [log] [blame]
Johan Hedberg0857dd32014-12-19 13:40:20 +02001/*
2 BlueZ - Bluetooth protocol stack for Linux
3
4 Copyright (C) 2014 Intel Corporation
5
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License version 2 as
8 published by the Free Software Foundation;
9
10 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
11 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
12 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
13 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
14 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
15 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18
19 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
20 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
21 SOFTWARE IS DISCLAIMED.
22*/
23
Ingo Molnar174cd4b2017-02-02 19:15:33 +010024#include <linux/sched/signal.h>
25
Johan Hedberg0857dd32014-12-19 13:40:20 +020026#include <net/bluetooth/bluetooth.h>
27#include <net/bluetooth/hci_core.h>
Johan Hedbergf2252572015-11-18 12:49:20 +020028#include <net/bluetooth/mgmt.h>
Johan Hedberg0857dd32014-12-19 13:40:20 +020029
30#include "smp.h"
31#include "hci_request.h"
32
Johan Hedbergbe91cd02015-11-10 09:44:54 +020033#define HCI_REQ_DONE 0
34#define HCI_REQ_PEND 1
35#define HCI_REQ_CANCELED 2
36
Johan Hedberg0857dd32014-12-19 13:40:20 +020037void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
38{
39 skb_queue_head_init(&req->cmd_q);
40 req->hdev = hdev;
41 req->err = 0;
42}
43
Jaganath Kanakkasseryf17d8582017-10-25 10:58:48 +053044void hci_req_purge(struct hci_request *req)
45{
46 skb_queue_purge(&req->cmd_q);
47}
48
Johan Hedberge62144872015-04-02 13:41:08 +030049static int req_run(struct hci_request *req, hci_req_complete_t complete,
50 hci_req_complete_skb_t complete_skb)
Johan Hedberg0857dd32014-12-19 13:40:20 +020051{
52 struct hci_dev *hdev = req->hdev;
53 struct sk_buff *skb;
54 unsigned long flags;
55
56 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
57
58 /* If an error occurred during request building, remove all HCI
59 * commands queued on the HCI request queue.
60 */
61 if (req->err) {
62 skb_queue_purge(&req->cmd_q);
63 return req->err;
64 }
65
66 /* Do not allow empty requests */
67 if (skb_queue_empty(&req->cmd_q))
68 return -ENODATA;
69
70 skb = skb_peek_tail(&req->cmd_q);
Johan Hedberg44d27132015-11-05 09:31:40 +020071 if (complete) {
72 bt_cb(skb)->hci.req_complete = complete;
73 } else if (complete_skb) {
74 bt_cb(skb)->hci.req_complete_skb = complete_skb;
75 bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB;
76 }
Johan Hedberg0857dd32014-12-19 13:40:20 +020077
78 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
79 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
80 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
81
82 queue_work(hdev->workqueue, &hdev->cmd_work);
83
84 return 0;
85}
86
Johan Hedberge62144872015-04-02 13:41:08 +030087int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
88{
89 return req_run(req, complete, NULL);
90}
91
92int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete)
93{
94 return req_run(req, NULL, complete);
95}
96
Johan Hedbergbe91cd02015-11-10 09:44:54 +020097static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode,
98 struct sk_buff *skb)
99{
100 BT_DBG("%s result 0x%2.2x", hdev->name, result);
101
102 if (hdev->req_status == HCI_REQ_PEND) {
103 hdev->req_result = result;
104 hdev->req_status = HCI_REQ_DONE;
105 if (skb)
106 hdev->req_skb = skb_get(skb);
107 wake_up_interruptible(&hdev->req_wait_q);
108 }
109}
110
Johan Hedbergb5044302015-11-10 09:44:55 +0200111void hci_req_sync_cancel(struct hci_dev *hdev, int err)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200112{
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
119 }
120}
121
122struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen,
123 const void *param, u8 event, u32 timeout)
124{
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200125 struct hci_request req;
126 struct sk_buff *skb;
127 int err = 0;
128
129 BT_DBG("%s", hdev->name);
130
131 hci_req_init(&req, hdev);
132
133 hci_req_add_ev(&req, opcode, plen, param, event);
134
135 hdev->req_status = HCI_REQ_PEND;
136
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200137 err = hci_req_run_skb(&req, hci_req_sync_complete);
John Keeping67d8cee2018-04-19 16:29:37 +0100138 if (err < 0)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200139 return ERR_PTR(err);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200140
John Keeping67d8cee2018-04-19 16:29:37 +0100141 err = wait_event_interruptible_timeout(hdev->req_wait_q,
142 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200143
John Keeping67d8cee2018-04-19 16:29:37 +0100144 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200145 return ERR_PTR(-EINTR);
146
147 switch (hdev->req_status) {
148 case HCI_REQ_DONE:
149 err = -bt_to_errno(hdev->req_result);
150 break;
151
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
154 break;
155
156 default:
157 err = -ETIMEDOUT;
158 break;
159 }
160
161 hdev->req_status = hdev->req_result = 0;
162 skb = hdev->req_skb;
163 hdev->req_skb = NULL;
164
165 BT_DBG("%s end: err %d", hdev->name, err);
166
167 if (err < 0) {
168 kfree_skb(skb);
169 return ERR_PTR(err);
170 }
171
172 if (!skb)
173 return ERR_PTR(-ENODATA);
174
175 return skb;
176}
177EXPORT_SYMBOL(__hci_cmd_sync_ev);
178
179struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen,
180 const void *param, u32 timeout)
181{
182 return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout);
183}
184EXPORT_SYMBOL(__hci_cmd_sync);
185
186/* Execute request and wait for completion. */
Johan Hedberga1d01db2015-11-11 08:11:25 +0200187int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req,
188 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200189 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200190{
191 struct hci_request req;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200192 int err = 0;
193
194 BT_DBG("%s start", hdev->name);
195
196 hci_req_init(&req, hdev);
197
198 hdev->req_status = HCI_REQ_PEND;
199
Johan Hedberga1d01db2015-11-11 08:11:25 +0200200 err = func(&req, opt);
201 if (err) {
202 if (hci_status)
203 *hci_status = HCI_ERROR_UNSPECIFIED;
204 return err;
205 }
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200206
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200207 err = hci_req_run_skb(&req, hci_req_sync_complete);
208 if (err < 0) {
209 hdev->req_status = 0;
210
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200211 /* ENODATA means the HCI request command queue is empty.
212 * This can happen when a request with conditionals doesn't
213 * trigger any commands to be sent. This is normal behavior
214 * and should not trigger an error return.
215 */
Johan Hedberg568f44f2015-11-23 14:40:47 +0200216 if (err == -ENODATA) {
217 if (hci_status)
218 *hci_status = 0;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200219 return 0;
Johan Hedberg568f44f2015-11-23 14:40:47 +0200220 }
221
222 if (hci_status)
223 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200224
225 return err;
226 }
227
John Keeping67d8cee2018-04-19 16:29:37 +0100228 err = wait_event_interruptible_timeout(hdev->req_wait_q,
229 hdev->req_status != HCI_REQ_PEND, timeout);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200230
John Keeping67d8cee2018-04-19 16:29:37 +0100231 if (err == -ERESTARTSYS)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200232 return -EINTR;
233
234 switch (hdev->req_status) {
235 case HCI_REQ_DONE:
236 err = -bt_to_errno(hdev->req_result);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200237 if (hci_status)
238 *hci_status = hdev->req_result;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200239 break;
240
241 case HCI_REQ_CANCELED:
242 err = -hdev->req_result;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200243 if (hci_status)
244 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200245 break;
246
247 default:
248 err = -ETIMEDOUT;
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200249 if (hci_status)
250 *hci_status = HCI_ERROR_UNSPECIFIED;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200251 break;
252 }
253
Frederic Dalleau9afee942016-08-23 07:59:19 +0200254 kfree_skb(hdev->req_skb);
255 hdev->req_skb = NULL;
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200256 hdev->req_status = hdev->req_result = 0;
257
258 BT_DBG("%s end: err %d", hdev->name, err);
259
260 return err;
261}
262
Johan Hedberga1d01db2015-11-11 08:11:25 +0200263int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req,
264 unsigned long opt),
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200265 unsigned long opt, u32 timeout, u8 *hci_status)
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200266{
267 int ret;
268
269 if (!test_bit(HCI_UP, &hdev->flags))
270 return -ENETDOWN;
271
272 /* Serialize all requests */
Johan Hedbergb5044302015-11-10 09:44:55 +0200273 hci_req_sync_lock(hdev);
Johan Hedberg4ebeee22015-11-11 08:11:19 +0200274 ret = __hci_req_sync(hdev, req, opt, timeout, hci_status);
Johan Hedbergb5044302015-11-10 09:44:55 +0200275 hci_req_sync_unlock(hdev);
Johan Hedbergbe91cd02015-11-10 09:44:54 +0200276
277 return ret;
278}
279
Johan Hedberg0857dd32014-12-19 13:40:20 +0200280struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen,
281 const void *param)
282{
283 int len = HCI_COMMAND_HDR_SIZE + plen;
284 struct hci_command_hdr *hdr;
285 struct sk_buff *skb;
286
287 skb = bt_skb_alloc(len, GFP_ATOMIC);
288 if (!skb)
289 return NULL;
290
Johannes Berg4df864c2017-06-16 14:29:21 +0200291 hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200292 hdr->opcode = cpu_to_le16(opcode);
293 hdr->plen = plen;
294
295 if (plen)
Johannes Berg59ae1d12017-06-16 14:29:20 +0200296 skb_put_data(skb, param, plen);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200297
298 BT_DBG("skb len %d", skb->len);
299
Marcel Holtmannd79f34e2015-11-05 07:10:00 +0100300 hci_skb_pkt_type(skb) = HCI_COMMAND_PKT;
301 hci_skb_opcode(skb) = opcode;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200302
303 return skb;
304}
305
306/* Queue a command to an asynchronous HCI request */
307void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen,
308 const void *param, u8 event)
309{
310 struct hci_dev *hdev = req->hdev;
311 struct sk_buff *skb;
312
313 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
314
315 /* If an error occurred during request building, there is no point in
316 * queueing the HCI command. We can simply return.
317 */
318 if (req->err)
319 return;
320
321 skb = hci_prepare_cmd(hdev, opcode, plen, param);
322 if (!skb) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +0100323 bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)",
324 opcode);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200325 req->err = -ENOMEM;
326 return;
327 }
328
329 if (skb_queue_empty(&req->cmd_q))
Johan Hedberg44d27132015-11-05 09:31:40 +0200330 bt_cb(skb)->hci.req_flags |= HCI_REQ_START;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200331
Marcel Holtmann242c0eb2015-10-25 22:45:53 +0100332 bt_cb(skb)->hci.req_event = event;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200333
334 skb_queue_tail(&req->cmd_q, skb);
335}
336
337void hci_req_add(struct hci_request *req, u16 opcode, u32 plen,
338 const void *param)
339{
340 hci_req_add_ev(req, opcode, plen, param, 0);
341}
342
Johan Hedbergbf943cb2015-11-25 16:15:43 +0200343void __hci_req_write_fast_connectable(struct hci_request *req, bool enable)
344{
345 struct hci_dev *hdev = req->hdev;
346 struct hci_cp_write_page_scan_activity acp;
347 u8 type;
348
349 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
350 return;
351
352 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
353 return;
354
355 if (enable) {
356 type = PAGE_SCAN_TYPE_INTERLACED;
357
358 /* 160 msec page scan interval */
359 acp.interval = cpu_to_le16(0x0100);
360 } else {
361 type = PAGE_SCAN_TYPE_STANDARD; /* default */
362
363 /* default 1.28 sec page scan */
364 acp.interval = cpu_to_le16(0x0800);
365 }
366
367 acp.window = cpu_to_le16(0x0012);
368
369 if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval ||
370 __cpu_to_le16(hdev->page_scan_window) != acp.window)
371 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY,
372 sizeof(acp), &acp);
373
374 if (hdev->page_scan_type != type)
375 hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type);
376}
377
Johan Hedberg196a5e92015-11-22 18:55:44 +0200378/* This function controls the background scanning based on hdev->pend_le_conns
379 * list. If there are pending LE connection we start the background scanning,
380 * otherwise we stop it.
381 *
382 * This function requires the caller holds hdev->lock.
383 */
384static void __hci_update_background_scan(struct hci_request *req)
385{
386 struct hci_dev *hdev = req->hdev;
387
388 if (!test_bit(HCI_UP, &hdev->flags) ||
389 test_bit(HCI_INIT, &hdev->flags) ||
390 hci_dev_test_flag(hdev, HCI_SETUP) ||
391 hci_dev_test_flag(hdev, HCI_CONFIG) ||
392 hci_dev_test_flag(hdev, HCI_AUTO_OFF) ||
393 hci_dev_test_flag(hdev, HCI_UNREGISTER))
394 return;
395
396 /* No point in doing scanning if LE support hasn't been enabled */
397 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
398 return;
399
400 /* If discovery is active don't interfere with it */
401 if (hdev->discovery.state != DISCOVERY_STOPPED)
402 return;
403
404 /* Reset RSSI and UUID filters when starting background scanning
405 * since these filters are meant for service discovery only.
406 *
407 * The Start Discovery and Start Service Discovery operations
408 * ensure to set proper values for RSSI threshold and UUID
409 * filter list. So it is safe to just reset them here.
410 */
411 hci_discovery_filter_clear(hdev);
412
413 if (list_empty(&hdev->pend_le_conns) &&
414 list_empty(&hdev->pend_le_reports)) {
415 /* If there is no pending LE connections or devices
416 * to be scanned for, we should stop the background
417 * scanning.
418 */
419
420 /* If controller is not scanning we are done. */
421 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
422 return;
423
424 hci_req_add_le_scan_disable(req);
425
426 BT_DBG("%s stopping background scanning", hdev->name);
427 } else {
428 /* If there is at least one pending LE connection, we should
429 * keep the background scan running.
430 */
431
432 /* If controller is connecting, we should not start scanning
433 * since some controllers are not able to scan and connect at
434 * the same time.
435 */
436 if (hci_lookup_le_connect(hdev))
437 return;
438
439 /* If controller is currently scanning, we stop it to ensure we
440 * don't miss any advertising (due to duplicates filter).
441 */
442 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
443 hci_req_add_le_scan_disable(req);
444
445 hci_req_add_le_passive_scan(req);
446
447 BT_DBG("%s starting background scanning", hdev->name);
448 }
449}
450
Johan Hedberg00cf5042015-11-25 16:15:41 +0200451void __hci_req_update_name(struct hci_request *req)
452{
453 struct hci_dev *hdev = req->hdev;
454 struct hci_cp_write_local_name cp;
455
456 memcpy(cp.name, hdev->dev_name, sizeof(cp.name));
457
458 hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp);
459}
460
Johan Hedbergb1a89172015-11-25 16:15:42 +0200461#define PNP_INFO_SVCLASS_ID 0x1200
462
463static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
464{
465 u8 *ptr = data, *uuids_start = NULL;
466 struct bt_uuid *uuid;
467
468 if (len < 4)
469 return ptr;
470
471 list_for_each_entry(uuid, &hdev->uuids, list) {
472 u16 uuid16;
473
474 if (uuid->size != 16)
475 continue;
476
477 uuid16 = get_unaligned_le16(&uuid->uuid[12]);
478 if (uuid16 < 0x1100)
479 continue;
480
481 if (uuid16 == PNP_INFO_SVCLASS_ID)
482 continue;
483
484 if (!uuids_start) {
485 uuids_start = ptr;
486 uuids_start[0] = 1;
487 uuids_start[1] = EIR_UUID16_ALL;
488 ptr += 2;
489 }
490
491 /* Stop if not enough space to put next UUID */
492 if ((ptr - data) + sizeof(u16) > len) {
493 uuids_start[1] = EIR_UUID16_SOME;
494 break;
495 }
496
497 *ptr++ = (uuid16 & 0x00ff);
498 *ptr++ = (uuid16 & 0xff00) >> 8;
499 uuids_start[0] += sizeof(uuid16);
500 }
501
502 return ptr;
503}
504
505static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
506{
507 u8 *ptr = data, *uuids_start = NULL;
508 struct bt_uuid *uuid;
509
510 if (len < 6)
511 return ptr;
512
513 list_for_each_entry(uuid, &hdev->uuids, list) {
514 if (uuid->size != 32)
515 continue;
516
517 if (!uuids_start) {
518 uuids_start = ptr;
519 uuids_start[0] = 1;
520 uuids_start[1] = EIR_UUID32_ALL;
521 ptr += 2;
522 }
523
524 /* Stop if not enough space to put next UUID */
525 if ((ptr - data) + sizeof(u32) > len) {
526 uuids_start[1] = EIR_UUID32_SOME;
527 break;
528 }
529
530 memcpy(ptr, &uuid->uuid[12], sizeof(u32));
531 ptr += sizeof(u32);
532 uuids_start[0] += sizeof(u32);
533 }
534
535 return ptr;
536}
537
538static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len)
539{
540 u8 *ptr = data, *uuids_start = NULL;
541 struct bt_uuid *uuid;
542
543 if (len < 18)
544 return ptr;
545
546 list_for_each_entry(uuid, &hdev->uuids, list) {
547 if (uuid->size != 128)
548 continue;
549
550 if (!uuids_start) {
551 uuids_start = ptr;
552 uuids_start[0] = 1;
553 uuids_start[1] = EIR_UUID128_ALL;
554 ptr += 2;
555 }
556
557 /* Stop if not enough space to put next UUID */
558 if ((ptr - data) + 16 > len) {
559 uuids_start[1] = EIR_UUID128_SOME;
560 break;
561 }
562
563 memcpy(ptr, uuid->uuid, 16);
564 ptr += 16;
565 uuids_start[0] += 16;
566 }
567
568 return ptr;
569}
570
571static void create_eir(struct hci_dev *hdev, u8 *data)
572{
573 u8 *ptr = data;
574 size_t name_len;
575
576 name_len = strlen(hdev->dev_name);
577
578 if (name_len > 0) {
579 /* EIR Data type */
580 if (name_len > 48) {
581 name_len = 48;
582 ptr[1] = EIR_NAME_SHORT;
583 } else
584 ptr[1] = EIR_NAME_COMPLETE;
585
586 /* EIR Data length */
587 ptr[0] = name_len + 1;
588
589 memcpy(ptr + 2, hdev->dev_name, name_len);
590
591 ptr += (name_len + 2);
592 }
593
594 if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) {
595 ptr[0] = 2;
596 ptr[1] = EIR_TX_POWER;
597 ptr[2] = (u8) hdev->inq_tx_power;
598
599 ptr += 3;
600 }
601
602 if (hdev->devid_source > 0) {
603 ptr[0] = 9;
604 ptr[1] = EIR_DEVICE_ID;
605
606 put_unaligned_le16(hdev->devid_source, ptr + 2);
607 put_unaligned_le16(hdev->devid_vendor, ptr + 4);
608 put_unaligned_le16(hdev->devid_product, ptr + 6);
609 put_unaligned_le16(hdev->devid_version, ptr + 8);
610
611 ptr += 10;
612 }
613
614 ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
615 ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
616 ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data));
617}
618
619void __hci_req_update_eir(struct hci_request *req)
620{
621 struct hci_dev *hdev = req->hdev;
622 struct hci_cp_write_eir cp;
623
624 if (!hdev_is_powered(hdev))
625 return;
626
627 if (!lmp_ext_inq_capable(hdev))
628 return;
629
630 if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED))
631 return;
632
633 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
634 return;
635
636 memset(&cp, 0, sizeof(cp));
637
638 create_eir(hdev, cp.data);
639
640 if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0)
641 return;
642
643 memcpy(hdev->eir, cp.data, sizeof(cp.data));
644
645 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
646}
647
Johan Hedberg0857dd32014-12-19 13:40:20 +0200648void hci_req_add_le_scan_disable(struct hci_request *req)
649{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530650 struct hci_dev *hdev = req->hdev;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200651
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530652 if (use_ext_scan(hdev)) {
653 struct hci_cp_le_set_ext_scan_enable cp;
654
655 memset(&cp, 0, sizeof(cp));
656 cp.enable = LE_SCAN_DISABLE;
657 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp),
658 &cp);
659 } else {
660 struct hci_cp_le_set_scan_enable cp;
661
662 memset(&cp, 0, sizeof(cp));
663 cp.enable = LE_SCAN_DISABLE;
664 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
665 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200666}
667
668static void add_to_white_list(struct hci_request *req,
669 struct hci_conn_params *params)
670{
671 struct hci_cp_le_add_to_white_list cp;
672
673 cp.bdaddr_type = params->addr_type;
674 bacpy(&cp.bdaddr, &params->addr);
675
676 hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp);
677}
678
679static u8 update_white_list(struct hci_request *req)
680{
681 struct hci_dev *hdev = req->hdev;
682 struct hci_conn_params *params;
683 struct bdaddr_list *b;
684 uint8_t white_list_entries = 0;
685
686 /* Go through the current white list programmed into the
687 * controller one by one and check if that address is still
688 * in the list of pending connections or list of devices to
689 * report. If not present in either list, then queue the
690 * command to remove it from the controller.
691 */
692 list_for_each_entry(b, &hdev->le_white_list, list) {
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500693 /* If the device is neither in pend_le_conns nor
694 * pend_le_reports then remove it from the whitelist.
695 */
696 if (!hci_pend_le_action_lookup(&hdev->pend_le_conns,
697 &b->bdaddr, b->bdaddr_type) &&
698 !hci_pend_le_action_lookup(&hdev->pend_le_reports,
699 &b->bdaddr, b->bdaddr_type)) {
700 struct hci_cp_le_del_from_white_list cp;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200701
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500702 cp.bdaddr_type = b->bdaddr_type;
703 bacpy(&cp.bdaddr, &b->bdaddr);
704
705 hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST,
706 sizeof(cp), &cp);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200707 continue;
708 }
709
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500710 if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) {
711 /* White list can not be used with RPAs */
712 return 0x00;
713 }
Johan Hedberg0857dd32014-12-19 13:40:20 +0200714
Johan Hedbergcff10ce2016-01-26 14:31:31 -0500715 white_list_entries++;
Johan Hedberg0857dd32014-12-19 13:40:20 +0200716 }
717
718 /* Since all no longer valid white list entries have been
719 * removed, walk through the list of pending connections
720 * and ensure that any new device gets programmed into
721 * the controller.
722 *
723 * If the list of the devices is larger than the list of
724 * available white list entries in the controller, then
725 * just abort and return filer policy value to not use the
726 * white list.
727 */
728 list_for_each_entry(params, &hdev->pend_le_conns, action) {
729 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
730 &params->addr, params->addr_type))
731 continue;
732
733 if (white_list_entries >= hdev->le_white_list_size) {
734 /* Select filter policy to accept all advertising */
735 return 0x00;
736 }
737
738 if (hci_find_irk_by_addr(hdev, &params->addr,
739 params->addr_type)) {
740 /* White list can not be used with RPAs */
741 return 0x00;
742 }
743
744 white_list_entries++;
745 add_to_white_list(req, params);
746 }
747
748 /* After adding all new pending connections, walk through
749 * the list of pending reports and also add these to the
750 * white list if there is still space.
751 */
752 list_for_each_entry(params, &hdev->pend_le_reports, action) {
753 if (hci_bdaddr_list_lookup(&hdev->le_white_list,
754 &params->addr, params->addr_type))
755 continue;
756
757 if (white_list_entries >= hdev->le_white_list_size) {
758 /* Select filter policy to accept all advertising */
759 return 0x00;
760 }
761
762 if (hci_find_irk_by_addr(hdev, &params->addr,
763 params->addr_type)) {
764 /* White list can not be used with RPAs */
765 return 0x00;
766 }
767
768 white_list_entries++;
769 add_to_white_list(req, params);
770 }
771
772 /* Select filter policy to use white list */
773 return 0x01;
774}
775
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200776static bool scan_use_rpa(struct hci_dev *hdev)
777{
778 return hci_dev_test_flag(hdev, HCI_PRIVACY);
779}
780
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530781static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval,
782 u16 window, u8 own_addr_type, u8 filter_policy)
Johan Hedberg0857dd32014-12-19 13:40:20 +0200783{
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530784 struct hci_dev *hdev = req->hdev;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530785
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530786 /* Use ext scanning if set ext scan param and ext scan enable is
787 * supported
788 */
789 if (use_ext_scan(hdev)) {
790 struct hci_cp_le_set_ext_scan_params *ext_param_cp;
791 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
792 struct hci_cp_le_scan_phy_params *phy_params;
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530793 u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2];
794 u32 plen;
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530795
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530796 ext_param_cp = (void *)data;
797 phy_params = (void *)ext_param_cp->data;
798
799 memset(ext_param_cp, 0, sizeof(*ext_param_cp));
800 ext_param_cp->own_addr_type = own_addr_type;
801 ext_param_cp->filter_policy = filter_policy;
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530802
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530803 plen = sizeof(*ext_param_cp);
804
805 if (scan_1m(hdev) || scan_2m(hdev)) {
806 ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M;
807
808 memset(phy_params, 0, sizeof(*phy_params));
809 phy_params->type = type;
810 phy_params->interval = cpu_to_le16(interval);
811 phy_params->window = cpu_to_le16(window);
812
813 plen += sizeof(*phy_params);
814 phy_params++;
815 }
816
817 if (scan_coded(hdev)) {
818 ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED;
819
820 memset(phy_params, 0, sizeof(*phy_params));
821 phy_params->type = type;
822 phy_params->interval = cpu_to_le16(interval);
823 phy_params->window = cpu_to_le16(window);
824
825 plen += sizeof(*phy_params);
826 phy_params++;
827 }
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530828
829 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS,
Jaganath Kanakkassery45bdd862018-07-19 17:09:37 +0530830 plen, ext_param_cp);
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +0530831
832 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
833 ext_enable_cp.enable = LE_SCAN_ENABLE;
834 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
835
836 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
837 sizeof(ext_enable_cp), &ext_enable_cp);
838 } else {
839 struct hci_cp_le_set_scan_param param_cp;
840 struct hci_cp_le_set_scan_enable enable_cp;
841
842 memset(&param_cp, 0, sizeof(param_cp));
843 param_cp.type = type;
844 param_cp.interval = cpu_to_le16(interval);
845 param_cp.window = cpu_to_le16(window);
846 param_cp.own_address_type = own_addr_type;
847 param_cp.filter_policy = filter_policy;
848 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp),
849 &param_cp);
850
851 memset(&enable_cp, 0, sizeof(enable_cp));
852 enable_cp.enable = LE_SCAN_ENABLE;
853 enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
854 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp),
855 &enable_cp);
856 }
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530857}
858
859void hci_req_add_le_passive_scan(struct hci_request *req)
860{
Johan Hedberg0857dd32014-12-19 13:40:20 +0200861 struct hci_dev *hdev = req->hdev;
862 u8 own_addr_type;
863 u8 filter_policy;
864
865 /* Set require_privacy to false since no SCAN_REQ are send
866 * during passive scanning. Not using an non-resolvable address
867 * here is important so that peer devices using direct
868 * advertising with our address will be correctly reported
869 * by the controller.
870 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200871 if (hci_update_random_address(req, false, scan_use_rpa(hdev),
872 &own_addr_type))
Johan Hedberg0857dd32014-12-19 13:40:20 +0200873 return;
874
875 /* Adding or removing entries from the white list must
876 * happen before enabling scanning. The controller does
877 * not allow white list modification while scanning.
878 */
879 filter_policy = update_white_list(req);
880
881 /* When the controller is using random resolvable addresses and
882 * with that having LE privacy enabled, then controllers with
883 * Extended Scanner Filter Policies support can now enable support
884 * for handling directed advertising.
885 *
886 * So instead of using filter polices 0x00 (no whitelist)
887 * and 0x01 (whitelist enabled) use the new filter policies
888 * 0x02 (no whitelist) and 0x03 (whitelist enabled).
889 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -0700890 if (hci_dev_test_flag(hdev, HCI_PRIVACY) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +0200891 (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY))
892 filter_policy |= 0x02;
893
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +0530894 hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval,
895 hdev->le_scan_window, own_addr_type, filter_policy);
Johan Hedberg0857dd32014-12-19 13:40:20 +0200896}
897
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +0530898static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance)
899{
900 struct adv_info *adv_instance;
901
902 /* Ignore instance 0 */
903 if (instance == 0x00)
904 return 0;
905
906 adv_instance = hci_find_adv_instance(hdev, instance);
907 if (!adv_instance)
908 return 0;
909
910 /* TODO: Take into account the "appearance" and "local-name" flags here.
911 * These are currently being ignored as they are not supported.
912 */
913 return adv_instance->scan_rsp_len;
914}
915
Johan Hedbergf2252572015-11-18 12:49:20 +0200916static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev)
917{
Johan Hedbergcab054a2015-11-30 11:21:45 +0200918 u8 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +0200919 struct adv_info *adv_instance;
920
921 /* Ignore instance 0 */
922 if (instance == 0x00)
923 return 0;
924
925 adv_instance = hci_find_adv_instance(hdev, instance);
926 if (!adv_instance)
927 return 0;
928
929 /* TODO: Take into account the "appearance" and "local-name" flags here.
930 * These are currently being ignored as they are not supported.
931 */
932 return adv_instance->scan_rsp_len;
933}
934
935void __hci_req_disable_advertising(struct hci_request *req)
936{
937 u8 enable = 0x00;
938
939 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
940}
941
942static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance)
943{
944 u32 flags;
945 struct adv_info *adv_instance;
946
947 if (instance == 0x00) {
948 /* Instance 0 always manages the "Tx Power" and "Flags"
949 * fields
950 */
951 flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS;
952
953 /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting
954 * corresponds to the "connectable" instance flag.
955 */
956 if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE))
957 flags |= MGMT_ADV_FLAG_CONNECTABLE;
958
Johan Hedberg6a19cc82016-03-11 09:56:32 +0200959 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
960 flags |= MGMT_ADV_FLAG_LIMITED_DISCOV;
961 else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedbergd43efbd2016-03-09 17:30:33 +0200962 flags |= MGMT_ADV_FLAG_DISCOV;
963
Johan Hedbergf2252572015-11-18 12:49:20 +0200964 return flags;
965 }
966
967 adv_instance = hci_find_adv_instance(hdev, instance);
968
969 /* Return 0 when we got an invalid instance identifier. */
970 if (!adv_instance)
971 return 0;
972
973 return adv_instance->flags;
974}
975
Johan Hedberg82a37ad2016-03-09 17:30:34 +0200976static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags)
977{
978 /* If privacy is not enabled don't use RPA */
979 if (!hci_dev_test_flag(hdev, HCI_PRIVACY))
980 return false;
981
982 /* If basic privacy mode is enabled use RPA */
983 if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY))
984 return true;
985
986 /* If limited privacy mode is enabled don't use RPA if we're
987 * both discoverable and bondable.
988 */
989 if ((flags & MGMT_ADV_FLAG_DISCOV) &&
990 hci_dev_test_flag(hdev, HCI_BONDABLE))
991 return false;
992
993 /* We're neither bondable nor discoverable in the limited
994 * privacy mode, therefore use RPA.
995 */
996 return true;
997}
998
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +0100999static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable)
1000{
1001 /* If there is no connection we are OK to advertise. */
1002 if (hci_conn_num(hdev, LE_LINK) == 0)
1003 return true;
1004
1005 /* Check le_states if there is any connection in slave role. */
1006 if (hdev->conn_hash.le_num_slave > 0) {
1007 /* Slave connection state and non connectable mode bit 20. */
1008 if (!connectable && !(hdev->le_states[2] & 0x10))
1009 return false;
1010
1011 /* Slave connection state and connectable mode bit 38
1012 * and scannable bit 21.
1013 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001014 if (connectable && (!(hdev->le_states[4] & 0x40) ||
1015 !(hdev->le_states[2] & 0x20)))
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001016 return false;
1017 }
1018
1019 /* Check le_states if there is any connection in master role. */
1020 if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) {
1021 /* Master connection state and non connectable mode bit 18. */
1022 if (!connectable && !(hdev->le_states[2] & 0x02))
1023 return false;
1024
1025 /* Master connection state and connectable mode bit 35 and
1026 * scannable 19.
1027 */
Łukasz Rymanowski62ebdc22018-02-09 18:26:02 +01001028 if (connectable && (!(hdev->le_states[4] & 0x08) ||
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001029 !(hdev->le_states[2] & 0x08)))
1030 return false;
1031 }
1032
1033 return true;
1034}
1035
Johan Hedbergf2252572015-11-18 12:49:20 +02001036void __hci_req_enable_advertising(struct hci_request *req)
1037{
1038 struct hci_dev *hdev = req->hdev;
1039 struct hci_cp_le_set_adv_param cp;
1040 u8 own_addr_type, enable = 0x01;
1041 bool connectable;
Johan Hedbergf2252572015-11-18 12:49:20 +02001042 u32 flags;
1043
Łukasz Rymanowski9e1e9f22017-12-08 13:40:57 +01001044 flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance);
1045
1046 /* If the "connectable" instance flag was not set, then choose between
1047 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1048 */
1049 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1050 mgmt_get_connectable(hdev);
1051
1052 if (!is_advertising_allowed(hdev, connectable))
Johan Hedbergf2252572015-11-18 12:49:20 +02001053 return;
1054
1055 if (hci_dev_test_flag(hdev, HCI_LE_ADV))
1056 __hci_req_disable_advertising(req);
1057
1058 /* Clear the HCI_LE_ADV bit temporarily so that the
1059 * hci_update_random_address knows that it's safe to go ahead
1060 * and write a new random address. The flag will be set back on
1061 * as soon as the SET_ADV_ENABLE HCI command completes.
1062 */
1063 hci_dev_clear_flag(hdev, HCI_LE_ADV);
1064
Johan Hedbergf2252572015-11-18 12:49:20 +02001065 /* Set require_privacy to true only when non-connectable
1066 * advertising is used. In that case it is fine to use a
1067 * non-resolvable private address.
1068 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001069 if (hci_update_random_address(req, !connectable,
1070 adv_use_rpa(hdev, flags),
1071 &own_addr_type) < 0)
Johan Hedbergf2252572015-11-18 12:49:20 +02001072 return;
1073
1074 memset(&cp, 0, sizeof(cp));
1075 cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval);
1076 cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval);
1077
1078 if (connectable)
1079 cp.type = LE_ADV_IND;
1080 else if (get_cur_adv_instance_scan_rsp_len(hdev))
1081 cp.type = LE_ADV_SCAN_IND;
1082 else
1083 cp.type = LE_ADV_NONCONN_IND;
1084
1085 cp.own_address_type = own_addr_type;
1086 cp.channel_map = hdev->le_adv_channel_map;
1087
1088 hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp);
1089
1090 hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable);
1091}
1092
Michał Narajowskif61851f2016-10-19 10:20:27 +02001093u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
Johan Hedbergf2252572015-11-18 12:49:20 +02001094{
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001095 size_t short_len;
Michał Narajowskif61851f2016-10-19 10:20:27 +02001096 size_t complete_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001097
Michał Narajowskif61851f2016-10-19 10:20:27 +02001098 /* no space left for name (+ NULL + type + len) */
1099 if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3)
1100 return ad_len;
1101
1102 /* use complete name if present and fits */
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001103 complete_len = strlen(hdev->dev_name);
Michał Narajowskif61851f2016-10-19 10:20:27 +02001104 if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH)
Michał Narajowski1b422062016-10-05 12:28:27 +02001105 return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001106 hdev->dev_name, complete_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001107
Michał Narajowskif61851f2016-10-19 10:20:27 +02001108 /* use short name if present */
1109 short_len = strlen(hdev->short_name);
1110 if (short_len)
Michał Narajowski1b422062016-10-05 12:28:27 +02001111 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT,
Michał Narajowskif61851f2016-10-19 10:20:27 +02001112 hdev->short_name, short_len + 1);
Michał Narajowskicecbf3e2016-10-05 12:28:25 +02001113
Michał Narajowskif61851f2016-10-19 10:20:27 +02001114 /* use shortened full name if present, we already know that name
1115 * is longer then HCI_MAX_SHORT_NAME_LENGTH
1116 */
1117 if (complete_len) {
1118 u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1];
1119
1120 memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH);
1121 name[HCI_MAX_SHORT_NAME_LENGTH] = '\0';
1122
1123 return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name,
1124 sizeof(name));
Johan Hedbergf2252572015-11-18 12:49:20 +02001125 }
1126
1127 return ad_len;
1128}
1129
Michał Narajowski1b422062016-10-05 12:28:27 +02001130static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len)
1131{
1132 return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance);
1133}
1134
Michał Narajowski7c295c42016-09-18 12:50:02 +02001135static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr)
1136{
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001137 u8 scan_rsp_len = 0;
1138
1139 if (hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001140 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowski7ddb30c2016-10-05 12:28:26 +02001141 }
1142
Michał Narajowski1b422062016-10-05 12:28:27 +02001143 return append_local_name(hdev, ptr, scan_rsp_len);
Michał Narajowski7c295c42016-09-18 12:50:02 +02001144}
1145
Johan Hedbergf2252572015-11-18 12:49:20 +02001146static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance,
1147 u8 *ptr)
1148{
1149 struct adv_info *adv_instance;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001150 u32 instance_flags;
1151 u8 scan_rsp_len = 0;
Johan Hedbergf2252572015-11-18 12:49:20 +02001152
1153 adv_instance = hci_find_adv_instance(hdev, instance);
1154 if (!adv_instance)
1155 return 0;
1156
Michał Narajowski7c295c42016-09-18 12:50:02 +02001157 instance_flags = adv_instance->flags;
1158
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001159 if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) {
Michał Narajowski1b422062016-10-05 12:28:27 +02001160 scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len);
Michał Narajowskic4960ec2016-09-18 12:50:03 +02001161 }
1162
Michał Narajowski1b422062016-10-05 12:28:27 +02001163 memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data,
Johan Hedbergf2252572015-11-18 12:49:20 +02001164 adv_instance->scan_rsp_len);
1165
Michał Narajowski7c295c42016-09-18 12:50:02 +02001166 scan_rsp_len += adv_instance->scan_rsp_len;
Michał Narajowski7c295c42016-09-18 12:50:02 +02001167
1168 if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME)
1169 scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len);
1170
1171 return scan_rsp_len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001172}
1173
Johan Hedbergcab054a2015-11-30 11:21:45 +02001174void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001175{
1176 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001177 u8 len;
1178
1179 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1180 return;
1181
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301182 if (ext_adv_capable(hdev)) {
1183 struct hci_cp_le_set_ext_scan_rsp_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001184
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301185 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001186
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301187 if (instance)
1188 len = create_instance_scan_rsp_data(hdev, instance,
1189 cp.data);
1190 else
1191 len = create_default_scan_rsp_data(hdev, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001192
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301193 if (hdev->scan_rsp_data_len == len &&
1194 !memcmp(cp.data, hdev->scan_rsp_data, len))
1195 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001196
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301197 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1198 hdev->scan_rsp_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001199
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301200 cp.handle = 0;
1201 cp.length = len;
1202 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1203 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1204
1205 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp),
1206 &cp);
1207 } else {
1208 struct hci_cp_le_set_scan_rsp_data cp;
1209
1210 memset(&cp, 0, sizeof(cp));
1211
1212 if (instance)
1213 len = create_instance_scan_rsp_data(hdev, instance,
1214 cp.data);
1215 else
1216 len = create_default_scan_rsp_data(hdev, cp.data);
1217
1218 if (hdev->scan_rsp_data_len == len &&
1219 !memcmp(cp.data, hdev->scan_rsp_data, len))
1220 return;
1221
1222 memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data));
1223 hdev->scan_rsp_data_len = len;
1224
1225 cp.length = len;
1226
1227 hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp);
1228 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001229}
1230
Johan Hedbergf2252572015-11-18 12:49:20 +02001231static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr)
1232{
1233 struct adv_info *adv_instance = NULL;
1234 u8 ad_len = 0, flags = 0;
1235 u32 instance_flags;
1236
1237 /* Return 0 when the current instance identifier is invalid. */
1238 if (instance) {
1239 adv_instance = hci_find_adv_instance(hdev, instance);
1240 if (!adv_instance)
1241 return 0;
1242 }
1243
1244 instance_flags = get_adv_instance_flags(hdev, instance);
1245
1246 /* The Add Advertising command allows userspace to set both the general
1247 * and limited discoverable flags.
1248 */
1249 if (instance_flags & MGMT_ADV_FLAG_DISCOV)
1250 flags |= LE_AD_GENERAL;
1251
1252 if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV)
1253 flags |= LE_AD_LIMITED;
1254
Johan Hedbergf18ba582016-04-06 13:09:05 +03001255 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1256 flags |= LE_AD_NO_BREDR;
1257
Johan Hedbergf2252572015-11-18 12:49:20 +02001258 if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) {
1259 /* If a discovery flag wasn't provided, simply use the global
1260 * settings.
1261 */
1262 if (!flags)
1263 flags |= mgmt_get_adv_discov_flags(hdev);
1264
Johan Hedbergf2252572015-11-18 12:49:20 +02001265 /* If flags would still be empty, then there is no need to
1266 * include the "Flags" AD field".
1267 */
1268 if (flags) {
1269 ptr[0] = 0x02;
1270 ptr[1] = EIR_FLAGS;
1271 ptr[2] = flags;
1272
1273 ad_len += 3;
1274 ptr += 3;
1275 }
1276 }
1277
1278 if (adv_instance) {
1279 memcpy(ptr, adv_instance->adv_data,
1280 adv_instance->adv_data_len);
1281 ad_len += adv_instance->adv_data_len;
1282 ptr += adv_instance->adv_data_len;
1283 }
1284
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301285 if (instance_flags & MGMT_ADV_FLAG_TX_POWER) {
1286 s8 adv_tx_power;
Johan Hedbergf2252572015-11-18 12:49:20 +02001287
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301288 if (ext_adv_capable(hdev)) {
1289 if (adv_instance)
1290 adv_tx_power = adv_instance->tx_power;
1291 else
1292 adv_tx_power = hdev->adv_tx_power;
1293 } else {
1294 adv_tx_power = hdev->adv_tx_power;
1295 }
1296
1297 /* Provide Tx Power only if we can provide a valid value for it */
1298 if (adv_tx_power != HCI_TX_POWER_INVALID) {
1299 ptr[0] = 0x02;
1300 ptr[1] = EIR_TX_POWER;
1301 ptr[2] = (u8)adv_tx_power;
1302
1303 ad_len += 3;
1304 ptr += 3;
1305 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001306 }
1307
1308 return ad_len;
1309}
1310
Johan Hedbergcab054a2015-11-30 11:21:45 +02001311void __hci_req_update_adv_data(struct hci_request *req, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001312{
1313 struct hci_dev *hdev = req->hdev;
Johan Hedbergf2252572015-11-18 12:49:20 +02001314 u8 len;
1315
1316 if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED))
1317 return;
1318
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301319 if (ext_adv_capable(hdev)) {
1320 struct hci_cp_le_set_ext_adv_data cp;
Johan Hedbergf2252572015-11-18 12:49:20 +02001321
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301322 memset(&cp, 0, sizeof(cp));
Johan Hedbergf2252572015-11-18 12:49:20 +02001323
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301324 len = create_instance_adv_data(hdev, instance, cp.data);
Johan Hedbergf2252572015-11-18 12:49:20 +02001325
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301326 /* There's nothing to do if the data hasn't changed */
1327 if (hdev->adv_data_len == len &&
1328 memcmp(cp.data, hdev->adv_data, len) == 0)
1329 return;
Johan Hedbergf2252572015-11-18 12:49:20 +02001330
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301331 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1332 hdev->adv_data_len = len;
Johan Hedbergf2252572015-11-18 12:49:20 +02001333
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301334 cp.length = len;
1335 cp.handle = 0;
1336 cp.operation = LE_SET_ADV_DATA_OP_COMPLETE;
1337 cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG;
1338
1339 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp);
1340 } else {
1341 struct hci_cp_le_set_adv_data cp;
1342
1343 memset(&cp, 0, sizeof(cp));
1344
1345 len = create_instance_adv_data(hdev, instance, cp.data);
1346
1347 /* There's nothing to do if the data hasn't changed */
1348 if (hdev->adv_data_len == len &&
1349 memcmp(cp.data, hdev->adv_data, len) == 0)
1350 return;
1351
1352 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
1353 hdev->adv_data_len = len;
1354
1355 cp.length = len;
1356
1357 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
1358 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001359}
1360
Johan Hedbergcab054a2015-11-30 11:21:45 +02001361int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance)
Johan Hedbergf2252572015-11-18 12:49:20 +02001362{
1363 struct hci_request req;
1364
1365 hci_req_init(&req, hdev);
1366 __hci_req_update_adv_data(&req, instance);
1367
1368 return hci_req_run(&req, NULL);
1369}
1370
1371static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode)
1372{
1373 BT_DBG("%s status %u", hdev->name, status);
1374}
1375
1376void hci_req_reenable_advertising(struct hci_dev *hdev)
1377{
1378 struct hci_request req;
Johan Hedbergf2252572015-11-18 12:49:20 +02001379
1380 if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) &&
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001381 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001382 return;
1383
Johan Hedbergf2252572015-11-18 12:49:20 +02001384 hci_req_init(&req, hdev);
1385
Johan Hedbergcab054a2015-11-30 11:21:45 +02001386 if (hdev->cur_adv_instance) {
1387 __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance,
1388 true);
Johan Hedbergf2252572015-11-18 12:49:20 +02001389 } else {
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301390 if (ext_adv_capable(hdev)) {
1391 __hci_req_start_ext_adv(&req, 0x00);
1392 } else {
1393 __hci_req_update_adv_data(&req, 0x00);
1394 __hci_req_update_scan_rsp_data(&req, 0x00);
1395 __hci_req_enable_advertising(&req);
1396 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001397 }
1398
1399 hci_req_run(&req, adv_enable_complete);
1400}
1401
1402static void adv_timeout_expire(struct work_struct *work)
1403{
1404 struct hci_dev *hdev = container_of(work, struct hci_dev,
1405 adv_instance_expire.work);
1406
1407 struct hci_request req;
1408 u8 instance;
1409
1410 BT_DBG("%s", hdev->name);
1411
1412 hci_dev_lock(hdev);
1413
1414 hdev->adv_instance_timeout = 0;
1415
Johan Hedbergcab054a2015-11-30 11:21:45 +02001416 instance = hdev->cur_adv_instance;
Johan Hedbergf2252572015-11-18 12:49:20 +02001417 if (instance == 0x00)
1418 goto unlock;
1419
1420 hci_req_init(&req, hdev);
1421
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001422 hci_req_clear_adv_instance(hdev, NULL, &req, instance, false);
Johan Hedbergf2252572015-11-18 12:49:20 +02001423
1424 if (list_empty(&hdev->adv_instances))
1425 __hci_req_disable_advertising(&req);
1426
Johan Hedberg550a8ca2015-11-27 11:11:52 +02001427 hci_req_run(&req, NULL);
Johan Hedbergf2252572015-11-18 12:49:20 +02001428
1429unlock:
1430 hci_dev_unlock(hdev);
1431}
1432
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301433int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance)
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301434{
1435 struct hci_cp_le_set_ext_adv_params cp;
1436 struct hci_dev *hdev = req->hdev;
1437 bool connectable;
1438 u32 flags;
1439 /* In ext adv set param interval is 3 octets */
1440 const u8 adv_interval[3] = { 0x00, 0x08, 0x00 };
1441
1442 flags = get_adv_instance_flags(hdev, instance);
1443
1444 /* If the "connectable" instance flag was not set, then choose between
1445 * ADV_IND and ADV_NONCONN_IND based on the global connectable setting.
1446 */
1447 connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) ||
1448 mgmt_get_connectable(hdev);
1449
1450 if (!is_advertising_allowed(hdev, connectable))
1451 return -EPERM;
1452
1453 memset(&cp, 0, sizeof(cp));
1454
1455 memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval));
1456 memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval));
1457
1458 if (connectable)
1459 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND);
1460 else if (get_adv_instance_scan_rsp_len(hdev, instance))
1461 cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND);
1462 else
1463 cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND);
1464
1465 cp.own_addr_type = BDADDR_LE_PUBLIC;
1466 cp.channel_map = hdev->le_adv_channel_map;
1467 cp.tx_power = 127;
1468 cp.primary_phy = HCI_ADV_PHY_1M;
1469 cp.secondary_phy = HCI_ADV_PHY_1M;
1470 cp.handle = 0;
1471
1472 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp);
1473
1474 return 0;
1475}
1476
1477void __hci_req_enable_ext_advertising(struct hci_request *req)
1478{
1479 struct hci_cp_le_set_ext_adv_enable *cp;
1480 struct hci_cp_ext_adv_set *adv_set;
1481 u8 data[sizeof(*cp) + sizeof(*adv_set) * 1];
1482
1483 cp = (void *) data;
1484 adv_set = (void *) cp->data;
1485
1486 memset(cp, 0, sizeof(*cp));
1487
1488 cp->enable = 0x01;
1489 cp->num_of_sets = 0x01;
1490
1491 memset(adv_set, 0, sizeof(*adv_set));
1492
1493 adv_set->handle = 0;
1494
1495 hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE,
1496 sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets,
1497 data);
1498}
1499
1500int __hci_req_start_ext_adv(struct hci_request *req, u8 instance)
1501{
1502 int err;
1503
1504 err = __hci_req_setup_ext_adv_instance(req, instance);
1505 if (err < 0)
1506 return err;
1507
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05301508 __hci_req_update_scan_rsp_data(req, instance);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301509 __hci_req_enable_ext_advertising(req);
1510
1511 return 0;
1512}
1513
Johan Hedbergf2252572015-11-18 12:49:20 +02001514int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance,
1515 bool force)
1516{
1517 struct hci_dev *hdev = req->hdev;
1518 struct adv_info *adv_instance = NULL;
1519 u16 timeout;
1520
1521 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Johan Hedberg17fd08f2015-11-26 12:15:59 +02001522 list_empty(&hdev->adv_instances))
Johan Hedbergf2252572015-11-18 12:49:20 +02001523 return -EPERM;
1524
1525 if (hdev->adv_instance_timeout)
1526 return -EBUSY;
1527
1528 adv_instance = hci_find_adv_instance(hdev, instance);
1529 if (!adv_instance)
1530 return -ENOENT;
1531
1532 /* A zero timeout means unlimited advertising. As long as there is
1533 * only one instance, duration should be ignored. We still set a timeout
1534 * in case further instances are being added later on.
1535 *
1536 * If the remaining lifetime of the instance is more than the duration
1537 * then the timeout corresponds to the duration, otherwise it will be
1538 * reduced to the remaining instance lifetime.
1539 */
1540 if (adv_instance->timeout == 0 ||
1541 adv_instance->duration <= adv_instance->remaining_time)
1542 timeout = adv_instance->duration;
1543 else
1544 timeout = adv_instance->remaining_time;
1545
1546 /* The remaining time is being reduced unless the instance is being
1547 * advertised without time limit.
1548 */
1549 if (adv_instance->timeout)
1550 adv_instance->remaining_time =
1551 adv_instance->remaining_time - timeout;
1552
1553 hdev->adv_instance_timeout = timeout;
1554 queue_delayed_work(hdev->req_workqueue,
1555 &hdev->adv_instance_expire,
1556 msecs_to_jiffies(timeout * 1000));
1557
1558 /* If we're just re-scheduling the same instance again then do not
1559 * execute any HCI commands. This happens when a single instance is
1560 * being advertised.
1561 */
1562 if (!force && hdev->cur_adv_instance == instance &&
1563 hci_dev_test_flag(hdev, HCI_LE_ADV))
1564 return 0;
1565
1566 hdev->cur_adv_instance = instance;
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301567 if (ext_adv_capable(hdev)) {
1568 __hci_req_start_ext_adv(req, instance);
1569 } else {
1570 __hci_req_update_adv_data(req, instance);
1571 __hci_req_update_scan_rsp_data(req, instance);
1572 __hci_req_enable_advertising(req);
1573 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001574
1575 return 0;
1576}
1577
1578static void cancel_adv_timeout(struct hci_dev *hdev)
1579{
1580 if (hdev->adv_instance_timeout) {
1581 hdev->adv_instance_timeout = 0;
1582 cancel_delayed_work(&hdev->adv_instance_expire);
1583 }
1584}
1585
1586/* For a single instance:
1587 * - force == true: The instance will be removed even when its remaining
1588 * lifetime is not zero.
1589 * - force == false: the instance will be deactivated but kept stored unless
1590 * the remaining lifetime is zero.
1591 *
1592 * For instance == 0x00:
1593 * - force == true: All instances will be removed regardless of their timeout
1594 * setting.
1595 * - force == false: Only instances that have a timeout will be removed.
1596 */
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001597void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk,
1598 struct hci_request *req, u8 instance,
1599 bool force)
Johan Hedbergf2252572015-11-18 12:49:20 +02001600{
1601 struct adv_info *adv_instance, *n, *next_instance = NULL;
1602 int err;
1603 u8 rem_inst;
1604
1605 /* Cancel any timeout concerning the removed instance(s). */
1606 if (!instance || hdev->cur_adv_instance == instance)
1607 cancel_adv_timeout(hdev);
1608
1609 /* Get the next instance to advertise BEFORE we remove
1610 * the current one. This can be the same instance again
1611 * if there is only one instance.
1612 */
1613 if (instance && hdev->cur_adv_instance == instance)
1614 next_instance = hci_get_next_instance(hdev, instance);
1615
1616 if (instance == 0x00) {
1617 list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances,
1618 list) {
1619 if (!(force || adv_instance->timeout))
1620 continue;
1621
1622 rem_inst = adv_instance->instance;
1623 err = hci_remove_adv_instance(hdev, rem_inst);
1624 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001625 mgmt_advertising_removed(sk, hdev, rem_inst);
Johan Hedbergf2252572015-11-18 12:49:20 +02001626 }
Johan Hedbergf2252572015-11-18 12:49:20 +02001627 } else {
1628 adv_instance = hci_find_adv_instance(hdev, instance);
1629
1630 if (force || (adv_instance && adv_instance->timeout &&
1631 !adv_instance->remaining_time)) {
1632 /* Don't advertise a removed instance. */
1633 if (next_instance &&
1634 next_instance->instance == instance)
1635 next_instance = NULL;
1636
1637 err = hci_remove_adv_instance(hdev, instance);
1638 if (!err)
Johan Hedberg37d3a1f2016-08-28 20:53:34 +03001639 mgmt_advertising_removed(sk, hdev, instance);
Johan Hedbergf2252572015-11-18 12:49:20 +02001640 }
1641 }
1642
Johan Hedbergf2252572015-11-18 12:49:20 +02001643 if (!req || !hdev_is_powered(hdev) ||
1644 hci_dev_test_flag(hdev, HCI_ADVERTISING))
1645 return;
1646
1647 if (next_instance)
1648 __hci_req_schedule_adv_instance(req, next_instance->instance,
1649 false);
1650}
1651
Johan Hedberg0857dd32014-12-19 13:40:20 +02001652static void set_random_addr(struct hci_request *req, bdaddr_t *rpa)
1653{
1654 struct hci_dev *hdev = req->hdev;
1655
1656 /* If we're advertising or initiating an LE connection we can't
1657 * go ahead and change the random address at this time. This is
1658 * because the eventual initiator address used for the
1659 * subsequently created connection will be undefined (some
1660 * controllers use the new address and others the one we had
1661 * when the operation started).
1662 *
1663 * In this kind of scenario skip the update and let the random
1664 * address be updated at the next cycle.
1665 */
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001666 if (hci_dev_test_flag(hdev, HCI_LE_ADV) ||
Jakub Pawlowskie7d9ab72015-08-07 20:22:52 +02001667 hci_lookup_le_connect(hdev)) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001668 BT_DBG("Deferring random address update");
Marcel Holtmanna1536da2015-03-13 02:11:01 -07001669 hci_dev_set_flag(hdev, HCI_RPA_EXPIRED);
Johan Hedberg0857dd32014-12-19 13:40:20 +02001670 return;
1671 }
1672
1673 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa);
1674}
1675
1676int hci_update_random_address(struct hci_request *req, bool require_privacy,
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001677 bool use_rpa, u8 *own_addr_type)
Johan Hedberg0857dd32014-12-19 13:40:20 +02001678{
1679 struct hci_dev *hdev = req->hdev;
1680 int err;
1681
1682 /* If privacy is enabled use a resolvable private address. If
1683 * current RPA has expired or there is something else than
1684 * the current RPA in use, then generate a new one.
1685 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001686 if (use_rpa) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001687 int to;
1688
1689 *own_addr_type = ADDR_LE_DEV_RANDOM;
1690
Marcel Holtmanna69d8922015-03-13 02:11:05 -07001691 if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) &&
Johan Hedberg0857dd32014-12-19 13:40:20 +02001692 !bacmp(&hdev->random_addr, &hdev->rpa))
1693 return 0;
1694
1695 err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa);
1696 if (err < 0) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01001697 bt_dev_err(hdev, "failed to generate new RPA");
Johan Hedberg0857dd32014-12-19 13:40:20 +02001698 return err;
1699 }
1700
1701 set_random_addr(req, &hdev->rpa);
1702
1703 to = msecs_to_jiffies(hdev->rpa_timeout * 1000);
1704 queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to);
1705
1706 return 0;
1707 }
1708
1709 /* In case of required privacy without resolvable private address,
1710 * use an non-resolvable private address. This is useful for active
1711 * scanning and non-connectable advertising.
1712 */
1713 if (require_privacy) {
1714 bdaddr_t nrpa;
1715
1716 while (true) {
1717 /* The non-resolvable private address is generated
1718 * from random six bytes with the two most significant
1719 * bits cleared.
1720 */
1721 get_random_bytes(&nrpa, 6);
1722 nrpa.b[5] &= 0x3f;
1723
1724 /* The non-resolvable private address shall not be
1725 * equal to the public address.
1726 */
1727 if (bacmp(&hdev->bdaddr, &nrpa))
1728 break;
1729 }
1730
1731 *own_addr_type = ADDR_LE_DEV_RANDOM;
1732 set_random_addr(req, &nrpa);
1733 return 0;
1734 }
1735
1736 /* If forcing static address is in use or there is no public
1737 * address use the static address as random address (but skip
1738 * the HCI command if the current random address is already the
1739 * static one.
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001740 *
1741 * In case BR/EDR has been disabled on a dual-mode controller
1742 * and a static address has been configured, then use that
1743 * address instead of the public BR/EDR address.
Johan Hedberg0857dd32014-12-19 13:40:20 +02001744 */
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07001745 if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) ||
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001746 !bacmp(&hdev->bdaddr, BDADDR_ANY) ||
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001747 (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) &&
Marcel Holtmann50b5b952014-12-19 23:05:35 +01001748 bacmp(&hdev->static_addr, BDADDR_ANY))) {
Johan Hedberg0857dd32014-12-19 13:40:20 +02001749 *own_addr_type = ADDR_LE_DEV_RANDOM;
1750 if (bacmp(&hdev->static_addr, &hdev->random_addr))
1751 hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6,
1752 &hdev->static_addr);
1753 return 0;
1754 }
1755
1756 /* Neither privacy nor static address is being used so use a
1757 * public address.
1758 */
1759 *own_addr_type = ADDR_LE_DEV_PUBLIC;
1760
1761 return 0;
1762}
Johan Hedberg2cf22212014-12-19 22:26:00 +02001763
Johan Hedberg405a2612014-12-19 23:18:22 +02001764static bool disconnected_whitelist_entries(struct hci_dev *hdev)
1765{
1766 struct bdaddr_list *b;
1767
1768 list_for_each_entry(b, &hdev->whitelist, list) {
1769 struct hci_conn *conn;
1770
1771 conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr);
1772 if (!conn)
1773 return true;
1774
1775 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
1776 return true;
1777 }
1778
1779 return false;
1780}
1781
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001782void __hci_req_update_scan(struct hci_request *req)
Johan Hedberg405a2612014-12-19 23:18:22 +02001783{
1784 struct hci_dev *hdev = req->hdev;
1785 u8 scan;
1786
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001787 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedberg405a2612014-12-19 23:18:22 +02001788 return;
1789
1790 if (!hdev_is_powered(hdev))
1791 return;
1792
1793 if (mgmt_powering_down(hdev))
1794 return;
1795
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001796 if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) ||
Johan Hedberg405a2612014-12-19 23:18:22 +02001797 disconnected_whitelist_entries(hdev))
1798 scan = SCAN_PAGE;
1799 else
1800 scan = SCAN_DISABLED;
1801
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07001802 if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
Johan Hedberg405a2612014-12-19 23:18:22 +02001803 scan |= SCAN_INQUIRY;
1804
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001805 if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) &&
1806 test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY))
1807 return;
1808
Johan Hedberg405a2612014-12-19 23:18:22 +02001809 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
1810}
1811
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001812static int update_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg405a2612014-12-19 23:18:22 +02001813{
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001814 hci_dev_lock(req->hdev);
1815 __hci_req_update_scan(req);
1816 hci_dev_unlock(req->hdev);
1817 return 0;
1818}
Johan Hedberg405a2612014-12-19 23:18:22 +02001819
Johan Hedberg01b1cb82015-11-16 12:52:21 +02001820static void scan_update_work(struct work_struct *work)
1821{
1822 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update);
1823
1824 hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL);
Johan Hedberg405a2612014-12-19 23:18:22 +02001825}
1826
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001827static int connectable_update(struct hci_request *req, unsigned long opt)
1828{
1829 struct hci_dev *hdev = req->hdev;
1830
1831 hci_dev_lock(hdev);
1832
1833 __hci_req_update_scan(req);
1834
1835 /* If BR/EDR is not enabled and we disable advertising as a
1836 * by-product of disabling connectable, we need to update the
1837 * advertising flags.
1838 */
1839 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
Johan Hedbergcab054a2015-11-30 11:21:45 +02001840 __hci_req_update_adv_data(req, hdev->cur_adv_instance);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001841
1842 /* Update the advertising parameters if necessary */
1843 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301844 !list_empty(&hdev->adv_instances)) {
1845 if (ext_adv_capable(hdev))
1846 __hci_req_start_ext_adv(req, hdev->cur_adv_instance);
1847 else
1848 __hci_req_enable_advertising(req);
1849 }
Johan Hedberg53c0ba72015-11-22 16:43:43 +03001850
1851 __hci_update_background_scan(req);
1852
1853 hci_dev_unlock(hdev);
1854
1855 return 0;
1856}
1857
1858static void connectable_update_work(struct work_struct *work)
1859{
1860 struct hci_dev *hdev = container_of(work, struct hci_dev,
1861 connectable_update);
1862 u8 status;
1863
1864 hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status);
1865 mgmt_set_connectable_complete(hdev, status);
1866}
1867
Johan Hedberg14bf5ea2015-11-22 19:00:22 +02001868static u8 get_service_classes(struct hci_dev *hdev)
1869{
1870 struct bt_uuid *uuid;
1871 u8 val = 0;
1872
1873 list_for_each_entry(uuid, &hdev->uuids, list)
1874 val |= uuid->svc_hint;
1875
1876 return val;
1877}
1878
1879void __hci_req_update_class(struct hci_request *req)
1880{
1881 struct hci_dev *hdev = req->hdev;
1882 u8 cod[3];
1883
1884 BT_DBG("%s", hdev->name);
1885
1886 if (!hdev_is_powered(hdev))
1887 return;
1888
1889 if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED))
1890 return;
1891
1892 if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE))
1893 return;
1894
1895 cod[0] = hdev->minor_class;
1896 cod[1] = hdev->major_class;
1897 cod[2] = get_service_classes(hdev);
1898
1899 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE))
1900 cod[1] |= 0x20;
1901
1902 if (memcmp(cod, hdev->dev_class, 3) == 0)
1903 return;
1904
1905 hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod);
1906}
1907
Johan Hedbergaed1a882015-11-22 17:24:44 +03001908static void write_iac(struct hci_request *req)
1909{
1910 struct hci_dev *hdev = req->hdev;
1911 struct hci_cp_write_current_iac_lap cp;
1912
1913 if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE))
1914 return;
1915
1916 if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) {
1917 /* Limited discoverable mode */
1918 cp.num_iac = min_t(u8, hdev->num_iac, 2);
1919 cp.iac_lap[0] = 0x00; /* LIAC */
1920 cp.iac_lap[1] = 0x8b;
1921 cp.iac_lap[2] = 0x9e;
1922 cp.iac_lap[3] = 0x33; /* GIAC */
1923 cp.iac_lap[4] = 0x8b;
1924 cp.iac_lap[5] = 0x9e;
1925 } else {
1926 /* General discoverable mode */
1927 cp.num_iac = 1;
1928 cp.iac_lap[0] = 0x33; /* GIAC */
1929 cp.iac_lap[1] = 0x8b;
1930 cp.iac_lap[2] = 0x9e;
1931 }
1932
1933 hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP,
1934 (cp.num_iac * 3) + 1, &cp);
1935}
1936
1937static int discoverable_update(struct hci_request *req, unsigned long opt)
1938{
1939 struct hci_dev *hdev = req->hdev;
1940
1941 hci_dev_lock(hdev);
1942
1943 if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) {
1944 write_iac(req);
1945 __hci_req_update_scan(req);
1946 __hci_req_update_class(req);
1947 }
1948
1949 /* Advertising instances don't use the global discoverable setting, so
1950 * only update AD if advertising was enabled using Set Advertising.
1951 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001952 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Johan Hedbergcab054a2015-11-30 11:21:45 +02001953 __hci_req_update_adv_data(req, 0x00);
Johan Hedbergaed1a882015-11-22 17:24:44 +03001954
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001955 /* Discoverable mode affects the local advertising
1956 * address in limited privacy mode.
1957 */
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05301958 if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) {
1959 if (ext_adv_capable(hdev))
1960 __hci_req_start_ext_adv(req, 0x00);
1961 else
1962 __hci_req_enable_advertising(req);
1963 }
Johan Hedberg82a37ad2016-03-09 17:30:34 +02001964 }
1965
Johan Hedbergaed1a882015-11-22 17:24:44 +03001966 hci_dev_unlock(hdev);
1967
1968 return 0;
1969}
1970
1971static void discoverable_update_work(struct work_struct *work)
1972{
1973 struct hci_dev *hdev = container_of(work, struct hci_dev,
1974 discoverable_update);
1975 u8 status;
1976
1977 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status);
1978 mgmt_set_discoverable_complete(hdev, status);
1979}
1980
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03001981void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn,
1982 u8 reason)
1983{
1984 switch (conn->state) {
1985 case BT_CONNECTED:
1986 case BT_CONFIG:
1987 if (conn->type == AMP_LINK) {
1988 struct hci_cp_disconn_phy_link cp;
1989
1990 cp.phy_handle = HCI_PHY_HANDLE(conn->handle);
1991 cp.reason = reason;
1992 hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp),
1993 &cp);
1994 } else {
1995 struct hci_cp_disconnect dc;
1996
1997 dc.handle = cpu_to_le16(conn->handle);
1998 dc.reason = reason;
1999 hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc);
2000 }
2001
2002 conn->state = BT_DISCONN;
2003
2004 break;
2005 case BT_CONNECT:
2006 if (conn->type == LE_LINK) {
2007 if (test_bit(HCI_CONN_SCANNING, &conn->flags))
2008 break;
2009 hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL,
2010 0, NULL);
2011 } else if (conn->type == ACL_LINK) {
2012 if (req->hdev->hci_ver < BLUETOOTH_VER_1_2)
2013 break;
2014 hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL,
2015 6, &conn->dst);
2016 }
2017 break;
2018 case BT_CONNECT2:
2019 if (conn->type == ACL_LINK) {
2020 struct hci_cp_reject_conn_req rej;
2021
2022 bacpy(&rej.bdaddr, &conn->dst);
2023 rej.reason = reason;
2024
2025 hci_req_add(req, HCI_OP_REJECT_CONN_REQ,
2026 sizeof(rej), &rej);
2027 } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) {
2028 struct hci_cp_reject_sync_conn_req rej;
2029
2030 bacpy(&rej.bdaddr, &conn->dst);
2031
2032 /* SCO rejection has its own limited set of
2033 * allowed error values (0x0D-0x0F) which isn't
2034 * compatible with most values passed to this
2035 * function. To be safe hard-code one of the
2036 * values that's suitable for SCO.
2037 */
Frédéric Dalleau3c0975a2016-09-08 12:00:11 +02002038 rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES;
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03002039
2040 hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ,
2041 sizeof(rej), &rej);
2042 }
2043 break;
2044 default:
2045 conn->state = BT_CLOSED;
2046 break;
2047 }
2048}
2049
2050static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode)
2051{
2052 if (status)
2053 BT_DBG("Failed to abort connection: status 0x%2.2x", status);
2054}
2055
2056int hci_abort_conn(struct hci_conn *conn, u8 reason)
2057{
2058 struct hci_request req;
2059 int err;
2060
2061 hci_req_init(&req, conn->hdev);
2062
2063 __hci_abort_conn(&req, conn, reason);
2064
2065 err = hci_req_run(&req, abort_conn_complete);
2066 if (err && err != -ENODATA) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002067 bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err);
Johan Hedbergdcc0f0d2015-10-22 10:49:37 +03002068 return err;
2069 }
2070
2071 return 0;
2072}
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002073
Johan Hedberga1d01db2015-11-11 08:11:25 +02002074static int update_bg_scan(struct hci_request *req, unsigned long opt)
Johan Hedberg2e93e532015-11-11 08:11:17 +02002075{
2076 hci_dev_lock(req->hdev);
2077 __hci_update_background_scan(req);
2078 hci_dev_unlock(req->hdev);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002079 return 0;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002080}
2081
2082static void bg_scan_update(struct work_struct *work)
2083{
2084 struct hci_dev *hdev = container_of(work, struct hci_dev,
2085 bg_scan_update);
Johan Hedberg84235d22015-11-11 08:11:20 +02002086 struct hci_conn *conn;
2087 u8 status;
2088 int err;
Johan Hedberg2e93e532015-11-11 08:11:17 +02002089
Johan Hedberg84235d22015-11-11 08:11:20 +02002090 err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status);
2091 if (!err)
2092 return;
2093
2094 hci_dev_lock(hdev);
2095
2096 conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT);
2097 if (conn)
2098 hci_le_conn_failed(conn, status);
2099
2100 hci_dev_unlock(hdev);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002101}
2102
Johan Hedberga1d01db2015-11-11 08:11:25 +02002103static int le_scan_disable(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002104{
2105 hci_req_add_le_scan_disable(req);
Johan Hedberga1d01db2015-11-11 08:11:25 +02002106 return 0;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002107}
2108
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002109static int bredr_inquiry(struct hci_request *req, unsigned long opt)
2110{
2111 u8 length = opt;
Johan Hedberg78b781c2016-01-05 13:19:32 +02002112 const u8 giac[3] = { 0x33, 0x8b, 0x9e };
2113 const u8 liac[3] = { 0x00, 0x8b, 0x9e };
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002114 struct hci_cp_inquiry cp;
2115
2116 BT_DBG("%s", req->hdev->name);
2117
2118 hci_dev_lock(req->hdev);
2119 hci_inquiry_cache_flush(req->hdev);
2120 hci_dev_unlock(req->hdev);
2121
2122 memset(&cp, 0, sizeof(cp));
Johan Hedberg78b781c2016-01-05 13:19:32 +02002123
2124 if (req->hdev->discovery.limited)
2125 memcpy(&cp.lap, liac, sizeof(cp.lap));
2126 else
2127 memcpy(&cp.lap, giac, sizeof(cp.lap));
2128
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002129 cp.length = length;
2130
2131 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
2132
2133 return 0;
2134}
2135
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002136static void le_scan_disable_work(struct work_struct *work)
2137{
2138 struct hci_dev *hdev = container_of(work, struct hci_dev,
2139 le_scan_disable.work);
2140 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002141
2142 BT_DBG("%s", hdev->name);
2143
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002144 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002145 return;
2146
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002147 cancel_delayed_work(&hdev->le_scan_restart);
2148
2149 hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status);
2150 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002151 bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x",
2152 status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002153 return;
2154 }
2155
2156 hdev->discovery.scan_start = 0;
2157
2158 /* If we were running LE only scan, change discovery state. If
2159 * we were running both LE and BR/EDR inquiry simultaneously,
2160 * and BR/EDR inquiry is already finished, stop discovery,
2161 * otherwise BR/EDR inquiry will stop discovery when finished.
2162 * If we will resolve remote device name, do not change
2163 * discovery state.
2164 */
2165
2166 if (hdev->discovery.type == DISCOV_TYPE_LE)
2167 goto discov_stopped;
2168
2169 if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED)
2170 return;
2171
2172 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) {
2173 if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
2174 hdev->discovery.state != DISCOVERY_RESOLVING)
2175 goto discov_stopped;
2176
2177 return;
2178 }
2179
2180 hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN,
2181 HCI_CMD_TIMEOUT, &status);
2182 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002183 bt_dev_err(hdev, "inquiry failed: status 0x%02x", status);
Johan Hedbergf4a2cb42015-11-11 12:24:22 +02002184 goto discov_stopped;
2185 }
2186
2187 return;
2188
2189discov_stopped:
2190 hci_dev_lock(hdev);
2191 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2192 hci_dev_unlock(hdev);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002193}
2194
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002195static int le_scan_restart(struct hci_request *req, unsigned long opt)
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002196{
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002197 struct hci_dev *hdev = req->hdev;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002198
2199 /* If controller is not scanning we are done. */
2200 if (!hci_dev_test_flag(hdev, HCI_LE_SCAN))
2201 return 0;
2202
2203 hci_req_add_le_scan_disable(req);
2204
Jaganath Kanakkasserya2344b92018-07-06 17:05:28 +05302205 if (use_ext_scan(hdev)) {
2206 struct hci_cp_le_set_ext_scan_enable ext_enable_cp;
2207
2208 memset(&ext_enable_cp, 0, sizeof(ext_enable_cp));
2209 ext_enable_cp.enable = LE_SCAN_ENABLE;
2210 ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2211
2212 hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE,
2213 sizeof(ext_enable_cp), &ext_enable_cp);
2214 } else {
2215 struct hci_cp_le_set_scan_enable cp;
2216
2217 memset(&cp, 0, sizeof(cp));
2218 cp.enable = LE_SCAN_ENABLE;
2219 cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE;
2220 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
2221 }
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002222
2223 return 0;
2224}
2225
2226static void le_scan_restart_work(struct work_struct *work)
2227{
2228 struct hci_dev *hdev = container_of(work, struct hci_dev,
2229 le_scan_restart.work);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002230 unsigned long timeout, duration, scan_start, now;
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002231 u8 status;
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002232
2233 BT_DBG("%s", hdev->name);
2234
Johan Hedberg3dfe5902015-11-11 12:24:23 +02002235 hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002236 if (status) {
Marcel Holtmann2064ee32017-10-30 10:42:59 +01002237 bt_dev_err(hdev, "failed to restart LE scan: status %d",
2238 status);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002239 return;
2240 }
2241
2242 hci_dev_lock(hdev);
2243
2244 if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) ||
2245 !hdev->discovery.scan_start)
2246 goto unlock;
2247
2248 /* When the scan was started, hdev->le_scan_disable has been queued
2249 * after duration from scan_start. During scan restart this job
2250 * has been canceled, and we need to queue it again after proper
2251 * timeout, to make sure that scan does not run indefinitely.
2252 */
2253 duration = hdev->discovery.scan_duration;
2254 scan_start = hdev->discovery.scan_start;
2255 now = jiffies;
2256 if (now - scan_start <= duration) {
2257 int elapsed;
2258
2259 if (now >= scan_start)
2260 elapsed = now - scan_start;
2261 else
2262 elapsed = ULONG_MAX - scan_start + now;
2263
2264 timeout = duration - elapsed;
2265 } else {
2266 timeout = 0;
2267 }
2268
2269 queue_delayed_work(hdev->req_workqueue,
2270 &hdev->le_scan_disable, timeout);
2271
2272unlock:
2273 hci_dev_unlock(hdev);
2274}
2275
Johan Hedberge68f0722015-11-11 08:30:30 +02002276static int active_scan(struct hci_request *req, unsigned long opt)
2277{
2278 uint16_t interval = opt;
2279 struct hci_dev *hdev = req->hdev;
Johan Hedberge68f0722015-11-11 08:30:30 +02002280 u8 own_addr_type;
2281 int err;
2282
2283 BT_DBG("%s", hdev->name);
2284
2285 if (hci_dev_test_flag(hdev, HCI_LE_ADV)) {
2286 hci_dev_lock(hdev);
2287
2288 /* Don't let discovery abort an outgoing connection attempt
2289 * that's using directed advertising.
2290 */
2291 if (hci_lookup_le_connect(hdev)) {
2292 hci_dev_unlock(hdev);
2293 return -EBUSY;
2294 }
2295
2296 cancel_adv_timeout(hdev);
2297 hci_dev_unlock(hdev);
2298
Jaganath Kanakkassery94386b62017-12-11 20:26:47 +05302299 __hci_req_disable_advertising(req);
Johan Hedberge68f0722015-11-11 08:30:30 +02002300 }
2301
2302 /* If controller is scanning, it means the background scanning is
2303 * running. Thus, we should temporarily stop it in order to set the
2304 * discovery scanning parameters.
2305 */
2306 if (hci_dev_test_flag(hdev, HCI_LE_SCAN))
2307 hci_req_add_le_scan_disable(req);
2308
2309 /* All active scans will be done with either a resolvable private
2310 * address (when privacy feature has been enabled) or non-resolvable
2311 * private address.
2312 */
Johan Hedberg82a37ad2016-03-09 17:30:34 +02002313 err = hci_update_random_address(req, true, scan_use_rpa(hdev),
2314 &own_addr_type);
Johan Hedberge68f0722015-11-11 08:30:30 +02002315 if (err < 0)
2316 own_addr_type = ADDR_LE_DEV_PUBLIC;
2317
Jaganath Kanakkassery3baef812018-07-06 17:05:27 +05302318 hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN,
2319 own_addr_type, 0);
Johan Hedberge68f0722015-11-11 08:30:30 +02002320 return 0;
2321}
2322
2323static int interleaved_discov(struct hci_request *req, unsigned long opt)
2324{
2325 int err;
2326
2327 BT_DBG("%s", req->hdev->name);
2328
2329 err = active_scan(req, opt);
2330 if (err)
2331 return err;
2332
Johan Hedberg7df26b52015-11-11 12:24:21 +02002333 return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN);
Johan Hedberge68f0722015-11-11 08:30:30 +02002334}
2335
2336static void start_discovery(struct hci_dev *hdev, u8 *status)
2337{
2338 unsigned long timeout;
2339
2340 BT_DBG("%s type %u", hdev->name, hdev->discovery.type);
2341
2342 switch (hdev->discovery.type) {
2343 case DISCOV_TYPE_BREDR:
2344 if (!hci_dev_test_flag(hdev, HCI_INQUIRY))
Johan Hedberg7df26b52015-11-11 12:24:21 +02002345 hci_req_sync(hdev, bredr_inquiry,
2346 DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT,
Johan Hedberge68f0722015-11-11 08:30:30 +02002347 status);
2348 return;
2349 case DISCOV_TYPE_INTERLEAVED:
2350 /* When running simultaneous discovery, the LE scanning time
2351 * should occupy the whole discovery time sine BR/EDR inquiry
2352 * and LE scanning are scheduled by the controller.
2353 *
2354 * For interleaving discovery in comparison, BR/EDR inquiry
2355 * and LE scanning are done sequentially with separate
2356 * timeouts.
2357 */
2358 if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY,
2359 &hdev->quirks)) {
2360 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2361 /* During simultaneous discovery, we double LE scan
2362 * interval. We must leave some time for the controller
2363 * to do BR/EDR inquiry.
2364 */
2365 hci_req_sync(hdev, interleaved_discov,
2366 DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT,
2367 status);
2368 break;
2369 }
2370
2371 timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout);
2372 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2373 HCI_CMD_TIMEOUT, status);
2374 break;
2375 case DISCOV_TYPE_LE:
2376 timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT);
2377 hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT,
2378 HCI_CMD_TIMEOUT, status);
2379 break;
2380 default:
2381 *status = HCI_ERROR_UNSPECIFIED;
2382 return;
2383 }
2384
2385 if (*status)
2386 return;
2387
2388 BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout));
2389
2390 /* When service discovery is used and the controller has a
2391 * strict duplicate filter, it is important to remember the
2392 * start and duration of the scan. This is required for
2393 * restarting scanning during the discovery phase.
2394 */
2395 if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) &&
2396 hdev->discovery.result_filtering) {
2397 hdev->discovery.scan_start = jiffies;
2398 hdev->discovery.scan_duration = timeout;
2399 }
2400
2401 queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable,
2402 timeout);
2403}
2404
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002405bool hci_req_stop_discovery(struct hci_request *req)
2406{
2407 struct hci_dev *hdev = req->hdev;
2408 struct discovery_state *d = &hdev->discovery;
2409 struct hci_cp_remote_name_req_cancel cp;
2410 struct inquiry_entry *e;
2411 bool ret = false;
2412
2413 BT_DBG("%s state %u", hdev->name, hdev->discovery.state);
2414
2415 if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) {
2416 if (test_bit(HCI_INQUIRY, &hdev->flags))
2417 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2418
2419 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2420 cancel_delayed_work(&hdev->le_scan_disable);
2421 hci_req_add_le_scan_disable(req);
2422 }
2423
2424 ret = true;
2425 } else {
2426 /* Passive scanning */
2427 if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) {
2428 hci_req_add_le_scan_disable(req);
2429 ret = true;
2430 }
2431 }
2432
2433 /* No further actions needed for LE-only discovery */
2434 if (d->type == DISCOV_TYPE_LE)
2435 return ret;
2436
2437 if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) {
2438 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
2439 NAME_PENDING);
2440 if (!e)
2441 return ret;
2442
2443 bacpy(&cp.bdaddr, &e->data.bdaddr);
2444 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
2445 &cp);
2446 ret = true;
2447 }
2448
2449 return ret;
2450}
2451
2452static int stop_discovery(struct hci_request *req, unsigned long opt)
2453{
2454 hci_dev_lock(req->hdev);
2455 hci_req_stop_discovery(req);
2456 hci_dev_unlock(req->hdev);
2457
2458 return 0;
2459}
2460
Johan Hedberge68f0722015-11-11 08:30:30 +02002461static void discov_update(struct work_struct *work)
2462{
2463 struct hci_dev *hdev = container_of(work, struct hci_dev,
2464 discov_update);
2465 u8 status = 0;
2466
2467 switch (hdev->discovery.state) {
2468 case DISCOVERY_STARTING:
2469 start_discovery(hdev, &status);
2470 mgmt_start_discovery_complete(hdev, status);
2471 if (status)
2472 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2473 else
2474 hci_discovery_set_state(hdev, DISCOVERY_FINDING);
2475 break;
Johan Hedberg2154d3f2015-11-11 08:30:45 +02002476 case DISCOVERY_STOPPING:
2477 hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status);
2478 mgmt_stop_discovery_complete(hdev, status);
2479 if (!status)
2480 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
2481 break;
Johan Hedberge68f0722015-11-11 08:30:30 +02002482 case DISCOVERY_STOPPED:
2483 default:
2484 return;
2485 }
2486}
2487
Johan Hedbergc366f552015-11-23 15:43:06 +02002488static void discov_off(struct work_struct *work)
2489{
2490 struct hci_dev *hdev = container_of(work, struct hci_dev,
2491 discov_off.work);
2492
2493 BT_DBG("%s", hdev->name);
2494
2495 hci_dev_lock(hdev);
2496
2497 /* When discoverable timeout triggers, then just make sure
2498 * the limited discoverable flag is cleared. Even in the case
2499 * of a timeout triggered from general discoverable, it is
2500 * safe to unconditionally clear the flag.
2501 */
2502 hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE);
2503 hci_dev_clear_flag(hdev, HCI_DISCOVERABLE);
2504 hdev->discov_timeout = 0;
2505
2506 hci_dev_unlock(hdev);
2507
2508 hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL);
2509 mgmt_new_settings(hdev);
2510}
2511
Johan Hedberg2ff13892015-11-25 16:15:44 +02002512static int powered_update_hci(struct hci_request *req, unsigned long opt)
2513{
2514 struct hci_dev *hdev = req->hdev;
Johan Hedberg2ff13892015-11-25 16:15:44 +02002515 u8 link_sec;
2516
2517 hci_dev_lock(hdev);
2518
2519 if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) &&
2520 !lmp_host_ssp_capable(hdev)) {
2521 u8 mode = 0x01;
2522
2523 hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode);
2524
2525 if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) {
2526 u8 support = 0x01;
2527
2528 hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT,
2529 sizeof(support), &support);
2530 }
2531 }
2532
2533 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) &&
2534 lmp_bredr_capable(hdev)) {
2535 struct hci_cp_write_le_host_supported cp;
2536
2537 cp.le = 0x01;
2538 cp.simul = 0x00;
2539
2540 /* Check first if we already have the right
2541 * host state (host features set)
2542 */
2543 if (cp.le != lmp_host_le_capable(hdev) ||
2544 cp.simul != lmp_host_le_br_capable(hdev))
2545 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED,
2546 sizeof(cp), &cp);
2547 }
2548
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002549 if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) {
Johan Hedberg2ff13892015-11-25 16:15:44 +02002550 /* Make sure the controller has a good default for
2551 * advertising data. This also applies to the case
2552 * where BR/EDR was toggled during the AUTO_OFF phase.
2553 */
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002554 if (hci_dev_test_flag(hdev, HCI_ADVERTISING) ||
2555 list_empty(&hdev->adv_instances)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302556 int err;
2557
2558 if (ext_adv_capable(hdev)) {
2559 err = __hci_req_setup_ext_adv_instance(req,
2560 0x00);
2561 if (!err)
2562 __hci_req_update_scan_rsp_data(req,
2563 0x00);
2564 } else {
2565 err = 0;
2566 __hci_req_update_adv_data(req, 0x00);
2567 __hci_req_update_scan_rsp_data(req, 0x00);
2568 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002569
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302570 if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) {
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302571 if (!ext_adv_capable(hdev))
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302572 __hci_req_enable_advertising(req);
Jaganath Kanakkasserya0fb3722018-07-19 17:09:42 +05302573 else if (!err)
2574 __hci_req_enable_ext_advertising(req);
Jaganath Kanakkasseryde181e82018-07-19 17:09:41 +05302575 }
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002576 } else if (!list_empty(&hdev->adv_instances)) {
2577 struct adv_info *adv_instance;
2578
Johan Hedberg2ff13892015-11-25 16:15:44 +02002579 adv_instance = list_first_entry(&hdev->adv_instances,
2580 struct adv_info, list);
Johan Hedberg2ff13892015-11-25 16:15:44 +02002581 __hci_req_schedule_adv_instance(req,
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002582 adv_instance->instance,
Johan Hedberg2ff13892015-11-25 16:15:44 +02002583 true);
Johan Hedbergd6b7e2c2015-11-30 11:21:44 +02002584 }
Johan Hedberg2ff13892015-11-25 16:15:44 +02002585 }
2586
2587 link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY);
2588 if (link_sec != test_bit(HCI_AUTH, &hdev->flags))
2589 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE,
2590 sizeof(link_sec), &link_sec);
2591
2592 if (lmp_bredr_capable(hdev)) {
2593 if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE))
2594 __hci_req_write_fast_connectable(req, true);
2595 else
2596 __hci_req_write_fast_connectable(req, false);
2597 __hci_req_update_scan(req);
2598 __hci_req_update_class(req);
2599 __hci_req_update_name(req);
2600 __hci_req_update_eir(req);
2601 }
2602
2603 hci_dev_unlock(hdev);
2604 return 0;
2605}
2606
2607int __hci_req_hci_power_on(struct hci_dev *hdev)
2608{
2609 /* Register the available SMP channels (BR/EDR and LE) only when
2610 * successfully powering on the controller. This late
2611 * registration is required so that LE SMP can clearly decide if
2612 * the public address or static address is used.
2613 */
2614 smp_register(hdev);
2615
2616 return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT,
2617 NULL);
2618}
2619
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002620void hci_request_setup(struct hci_dev *hdev)
2621{
Johan Hedberge68f0722015-11-11 08:30:30 +02002622 INIT_WORK(&hdev->discov_update, discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002623 INIT_WORK(&hdev->bg_scan_update, bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002624 INIT_WORK(&hdev->scan_update, scan_update_work);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002625 INIT_WORK(&hdev->connectable_update, connectable_update_work);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002626 INIT_WORK(&hdev->discoverable_update, discoverable_update_work);
Johan Hedbergc366f552015-11-23 15:43:06 +02002627 INIT_DELAYED_WORK(&hdev->discov_off, discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002628 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2629 INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work);
Johan Hedbergf2252572015-11-18 12:49:20 +02002630 INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire);
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002631}
2632
2633void hci_request_cancel_all(struct hci_dev *hdev)
2634{
Johan Hedberg7df0f732015-11-12 15:15:00 +02002635 hci_req_sync_cancel(hdev, ENODEV);
2636
Johan Hedberge68f0722015-11-11 08:30:30 +02002637 cancel_work_sync(&hdev->discov_update);
Johan Hedberg2e93e532015-11-11 08:11:17 +02002638 cancel_work_sync(&hdev->bg_scan_update);
Johan Hedberg01b1cb82015-11-16 12:52:21 +02002639 cancel_work_sync(&hdev->scan_update);
Johan Hedberg53c0ba72015-11-22 16:43:43 +03002640 cancel_work_sync(&hdev->connectable_update);
Johan Hedbergaed1a882015-11-22 17:24:44 +03002641 cancel_work_sync(&hdev->discoverable_update);
Johan Hedbergc366f552015-11-23 15:43:06 +02002642 cancel_delayed_work_sync(&hdev->discov_off);
Johan Hedberg7c1fbed2015-11-11 08:11:23 +02002643 cancel_delayed_work_sync(&hdev->le_scan_disable);
2644 cancel_delayed_work_sync(&hdev->le_scan_restart);
Johan Hedbergf2252572015-11-18 12:49:20 +02002645
2646 if (hdev->adv_instance_timeout) {
2647 cancel_delayed_work_sync(&hdev->adv_instance_expire);
2648 hdev->adv_instance_timeout = 0;
2649 }
Johan Hedberg5fc16cc2015-11-11 08:11:16 +02002650}