| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1 | /* | 
|  | 2 | BlueZ - Bluetooth protocol stack for Linux | 
|  | 3 |  | 
|  | 4 | Copyright (C) 2014 Intel Corporation | 
|  | 5 |  | 
|  | 6 | This program is free software; you can redistribute it and/or modify | 
|  | 7 | it under the terms of the GNU General Public License version 2 as | 
|  | 8 | published by the Free Software Foundation; | 
|  | 9 |  | 
|  | 10 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS | 
|  | 11 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | 
|  | 12 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS. | 
|  | 13 | IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY | 
|  | 14 | CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES | 
|  | 15 | WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN | 
|  | 16 | ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF | 
|  | 17 | OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. | 
|  | 18 |  | 
|  | 19 | ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS, | 
|  | 20 | COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS | 
|  | 21 | SOFTWARE IS DISCLAIMED. | 
|  | 22 | */ | 
|  | 23 |  | 
| Ingo Molnar | 174cd4b | 2017-02-02 19:15:33 +0100 | [diff] [blame] | 24 | #include <linux/sched/signal.h> | 
|  | 25 |  | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 26 | #include <net/bluetooth/bluetooth.h> | 
|  | 27 | #include <net/bluetooth/hci_core.h> | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 28 | #include <net/bluetooth/mgmt.h> | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 29 |  | 
|  | 30 | #include "smp.h" | 
|  | 31 | #include "hci_request.h" | 
|  | 32 |  | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 33 | #define HCI_REQ_DONE	  0 | 
|  | 34 | #define HCI_REQ_PEND	  1 | 
|  | 35 | #define HCI_REQ_CANCELED  2 | 
|  | 36 |  | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 37 | void hci_req_init(struct hci_request *req, struct hci_dev *hdev) | 
|  | 38 | { | 
|  | 39 | skb_queue_head_init(&req->cmd_q); | 
|  | 40 | req->hdev = hdev; | 
|  | 41 | req->err = 0; | 
|  | 42 | } | 
|  | 43 |  | 
| Jaganath Kanakkassery | f17d858 | 2017-10-25 10:58:48 +0530 | [diff] [blame] | 44 | void hci_req_purge(struct hci_request *req) | 
|  | 45 | { | 
|  | 46 | skb_queue_purge(&req->cmd_q); | 
|  | 47 | } | 
|  | 48 |  | 
| Johan Hedberg | e6214487 | 2015-04-02 13:41:08 +0300 | [diff] [blame] | 49 | static int req_run(struct hci_request *req, hci_req_complete_t complete, | 
|  | 50 | hci_req_complete_skb_t complete_skb) | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 51 | { | 
|  | 52 | struct hci_dev *hdev = req->hdev; | 
|  | 53 | struct sk_buff *skb; | 
|  | 54 | unsigned long flags; | 
|  | 55 |  | 
|  | 56 | BT_DBG("length %u", skb_queue_len(&req->cmd_q)); | 
|  | 57 |  | 
|  | 58 | /* If an error occurred during request building, remove all HCI | 
|  | 59 | * commands queued on the HCI request queue. | 
|  | 60 | */ | 
|  | 61 | if (req->err) { | 
|  | 62 | skb_queue_purge(&req->cmd_q); | 
|  | 63 | return req->err; | 
|  | 64 | } | 
|  | 65 |  | 
|  | 66 | /* Do not allow empty requests */ | 
|  | 67 | if (skb_queue_empty(&req->cmd_q)) | 
|  | 68 | return -ENODATA; | 
|  | 69 |  | 
|  | 70 | skb = skb_peek_tail(&req->cmd_q); | 
| Johan Hedberg | 44d2713 | 2015-11-05 09:31:40 +0200 | [diff] [blame] | 71 | if (complete) { | 
|  | 72 | bt_cb(skb)->hci.req_complete = complete; | 
|  | 73 | } else if (complete_skb) { | 
|  | 74 | bt_cb(skb)->hci.req_complete_skb = complete_skb; | 
|  | 75 | bt_cb(skb)->hci.req_flags |= HCI_REQ_SKB; | 
|  | 76 | } | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 77 |  | 
|  | 78 | spin_lock_irqsave(&hdev->cmd_q.lock, flags); | 
|  | 79 | skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q); | 
|  | 80 | spin_unlock_irqrestore(&hdev->cmd_q.lock, flags); | 
|  | 81 |  | 
|  | 82 | queue_work(hdev->workqueue, &hdev->cmd_work); | 
|  | 83 |  | 
|  | 84 | return 0; | 
|  | 85 | } | 
|  | 86 |  | 
| Johan Hedberg | e6214487 | 2015-04-02 13:41:08 +0300 | [diff] [blame] | 87 | int hci_req_run(struct hci_request *req, hci_req_complete_t complete) | 
|  | 88 | { | 
|  | 89 | return req_run(req, complete, NULL); | 
|  | 90 | } | 
|  | 91 |  | 
|  | 92 | int hci_req_run_skb(struct hci_request *req, hci_req_complete_skb_t complete) | 
|  | 93 | { | 
|  | 94 | return req_run(req, NULL, complete); | 
|  | 95 | } | 
|  | 96 |  | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 97 | static void hci_req_sync_complete(struct hci_dev *hdev, u8 result, u16 opcode, | 
|  | 98 | struct sk_buff *skb) | 
|  | 99 | { | 
|  | 100 | BT_DBG("%s result 0x%2.2x", hdev->name, result); | 
|  | 101 |  | 
|  | 102 | if (hdev->req_status == HCI_REQ_PEND) { | 
|  | 103 | hdev->req_result = result; | 
|  | 104 | hdev->req_status = HCI_REQ_DONE; | 
|  | 105 | if (skb) | 
|  | 106 | hdev->req_skb = skb_get(skb); | 
|  | 107 | wake_up_interruptible(&hdev->req_wait_q); | 
|  | 108 | } | 
|  | 109 | } | 
|  | 110 |  | 
| Johan Hedberg | b504430 | 2015-11-10 09:44:55 +0200 | [diff] [blame] | 111 | void hci_req_sync_cancel(struct hci_dev *hdev, int err) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 112 | { | 
|  | 113 | BT_DBG("%s err 0x%2.2x", hdev->name, err); | 
|  | 114 |  | 
|  | 115 | if (hdev->req_status == HCI_REQ_PEND) { | 
|  | 116 | hdev->req_result = err; | 
|  | 117 | hdev->req_status = HCI_REQ_CANCELED; | 
|  | 118 | wake_up_interruptible(&hdev->req_wait_q); | 
|  | 119 | } | 
|  | 120 | } | 
|  | 121 |  | 
|  | 122 | struct sk_buff *__hci_cmd_sync_ev(struct hci_dev *hdev, u16 opcode, u32 plen, | 
|  | 123 | const void *param, u8 event, u32 timeout) | 
|  | 124 | { | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 125 | struct hci_request req; | 
|  | 126 | struct sk_buff *skb; | 
|  | 127 | int err = 0; | 
|  | 128 |  | 
|  | 129 | BT_DBG("%s", hdev->name); | 
|  | 130 |  | 
|  | 131 | hci_req_init(&req, hdev); | 
|  | 132 |  | 
|  | 133 | hci_req_add_ev(&req, opcode, plen, param, event); | 
|  | 134 |  | 
|  | 135 | hdev->req_status = HCI_REQ_PEND; | 
|  | 136 |  | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 137 | err = hci_req_run_skb(&req, hci_req_sync_complete); | 
| John Keeping | 67d8cee | 2018-04-19 16:29:37 +0100 | [diff] [blame] | 138 | if (err < 0) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 139 | return ERR_PTR(err); | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 140 |  | 
| John Keeping | 67d8cee | 2018-04-19 16:29:37 +0100 | [diff] [blame] | 141 | err = wait_event_interruptible_timeout(hdev->req_wait_q, | 
|  | 142 | hdev->req_status != HCI_REQ_PEND, timeout); | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 143 |  | 
| John Keeping | 67d8cee | 2018-04-19 16:29:37 +0100 | [diff] [blame] | 144 | if (err == -ERESTARTSYS) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 145 | return ERR_PTR(-EINTR); | 
|  | 146 |  | 
|  | 147 | switch (hdev->req_status) { | 
|  | 148 | case HCI_REQ_DONE: | 
|  | 149 | err = -bt_to_errno(hdev->req_result); | 
|  | 150 | break; | 
|  | 151 |  | 
|  | 152 | case HCI_REQ_CANCELED: | 
|  | 153 | err = -hdev->req_result; | 
|  | 154 | break; | 
|  | 155 |  | 
|  | 156 | default: | 
|  | 157 | err = -ETIMEDOUT; | 
|  | 158 | break; | 
|  | 159 | } | 
|  | 160 |  | 
|  | 161 | hdev->req_status = hdev->req_result = 0; | 
|  | 162 | skb = hdev->req_skb; | 
|  | 163 | hdev->req_skb = NULL; | 
|  | 164 |  | 
|  | 165 | BT_DBG("%s end: err %d", hdev->name, err); | 
|  | 166 |  | 
|  | 167 | if (err < 0) { | 
|  | 168 | kfree_skb(skb); | 
|  | 169 | return ERR_PTR(err); | 
|  | 170 | } | 
|  | 171 |  | 
|  | 172 | if (!skb) | 
|  | 173 | return ERR_PTR(-ENODATA); | 
|  | 174 |  | 
|  | 175 | return skb; | 
|  | 176 | } | 
|  | 177 | EXPORT_SYMBOL(__hci_cmd_sync_ev); | 
|  | 178 |  | 
|  | 179 | struct sk_buff *__hci_cmd_sync(struct hci_dev *hdev, u16 opcode, u32 plen, | 
|  | 180 | const void *param, u32 timeout) | 
|  | 181 | { | 
|  | 182 | return __hci_cmd_sync_ev(hdev, opcode, plen, param, 0, timeout); | 
|  | 183 | } | 
|  | 184 | EXPORT_SYMBOL(__hci_cmd_sync); | 
|  | 185 |  | 
|  | 186 | /* Execute request and wait for completion. */ | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 187 | int __hci_req_sync(struct hci_dev *hdev, int (*func)(struct hci_request *req, | 
|  | 188 | unsigned long opt), | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 189 | unsigned long opt, u32 timeout, u8 *hci_status) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 190 | { | 
|  | 191 | struct hci_request req; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 192 | int err = 0; | 
|  | 193 |  | 
|  | 194 | BT_DBG("%s start", hdev->name); | 
|  | 195 |  | 
|  | 196 | hci_req_init(&req, hdev); | 
|  | 197 |  | 
|  | 198 | hdev->req_status = HCI_REQ_PEND; | 
|  | 199 |  | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 200 | err = func(&req, opt); | 
|  | 201 | if (err) { | 
|  | 202 | if (hci_status) | 
|  | 203 | *hci_status = HCI_ERROR_UNSPECIFIED; | 
|  | 204 | return err; | 
|  | 205 | } | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 206 |  | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 207 | err = hci_req_run_skb(&req, hci_req_sync_complete); | 
|  | 208 | if (err < 0) { | 
|  | 209 | hdev->req_status = 0; | 
|  | 210 |  | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 211 | /* ENODATA means the HCI request command queue is empty. | 
|  | 212 | * This can happen when a request with conditionals doesn't | 
|  | 213 | * trigger any commands to be sent. This is normal behavior | 
|  | 214 | * and should not trigger an error return. | 
|  | 215 | */ | 
| Johan Hedberg | 568f44f | 2015-11-23 14:40:47 +0200 | [diff] [blame] | 216 | if (err == -ENODATA) { | 
|  | 217 | if (hci_status) | 
|  | 218 | *hci_status = 0; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 219 | return 0; | 
| Johan Hedberg | 568f44f | 2015-11-23 14:40:47 +0200 | [diff] [blame] | 220 | } | 
|  | 221 |  | 
|  | 222 | if (hci_status) | 
|  | 223 | *hci_status = HCI_ERROR_UNSPECIFIED; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 224 |  | 
|  | 225 | return err; | 
|  | 226 | } | 
|  | 227 |  | 
| John Keeping | 67d8cee | 2018-04-19 16:29:37 +0100 | [diff] [blame] | 228 | err = wait_event_interruptible_timeout(hdev->req_wait_q, | 
|  | 229 | hdev->req_status != HCI_REQ_PEND, timeout); | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 230 |  | 
| John Keeping | 67d8cee | 2018-04-19 16:29:37 +0100 | [diff] [blame] | 231 | if (err == -ERESTARTSYS) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 232 | return -EINTR; | 
|  | 233 |  | 
|  | 234 | switch (hdev->req_status) { | 
|  | 235 | case HCI_REQ_DONE: | 
|  | 236 | err = -bt_to_errno(hdev->req_result); | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 237 | if (hci_status) | 
|  | 238 | *hci_status = hdev->req_result; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 239 | break; | 
|  | 240 |  | 
|  | 241 | case HCI_REQ_CANCELED: | 
|  | 242 | err = -hdev->req_result; | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 243 | if (hci_status) | 
|  | 244 | *hci_status = HCI_ERROR_UNSPECIFIED; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 245 | break; | 
|  | 246 |  | 
|  | 247 | default: | 
|  | 248 | err = -ETIMEDOUT; | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 249 | if (hci_status) | 
|  | 250 | *hci_status = HCI_ERROR_UNSPECIFIED; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 251 | break; | 
|  | 252 | } | 
|  | 253 |  | 
| Frederic Dalleau | 9afee94 | 2016-08-23 07:59:19 +0200 | [diff] [blame] | 254 | kfree_skb(hdev->req_skb); | 
|  | 255 | hdev->req_skb = NULL; | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 256 | hdev->req_status = hdev->req_result = 0; | 
|  | 257 |  | 
|  | 258 | BT_DBG("%s end: err %d", hdev->name, err); | 
|  | 259 |  | 
|  | 260 | return err; | 
|  | 261 | } | 
|  | 262 |  | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 263 | int hci_req_sync(struct hci_dev *hdev, int (*req)(struct hci_request *req, | 
|  | 264 | unsigned long opt), | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 265 | unsigned long opt, u32 timeout, u8 *hci_status) | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 266 | { | 
|  | 267 | int ret; | 
|  | 268 |  | 
|  | 269 | if (!test_bit(HCI_UP, &hdev->flags)) | 
|  | 270 | return -ENETDOWN; | 
|  | 271 |  | 
|  | 272 | /* Serialize all requests */ | 
| Johan Hedberg | b504430 | 2015-11-10 09:44:55 +0200 | [diff] [blame] | 273 | hci_req_sync_lock(hdev); | 
| Johan Hedberg | 4ebeee2 | 2015-11-11 08:11:19 +0200 | [diff] [blame] | 274 | ret = __hci_req_sync(hdev, req, opt, timeout, hci_status); | 
| Johan Hedberg | b504430 | 2015-11-10 09:44:55 +0200 | [diff] [blame] | 275 | hci_req_sync_unlock(hdev); | 
| Johan Hedberg | be91cd0 | 2015-11-10 09:44:54 +0200 | [diff] [blame] | 276 |  | 
|  | 277 | return ret; | 
|  | 278 | } | 
|  | 279 |  | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 280 | struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, | 
|  | 281 | const void *param) | 
|  | 282 | { | 
|  | 283 | int len = HCI_COMMAND_HDR_SIZE + plen; | 
|  | 284 | struct hci_command_hdr *hdr; | 
|  | 285 | struct sk_buff *skb; | 
|  | 286 |  | 
|  | 287 | skb = bt_skb_alloc(len, GFP_ATOMIC); | 
|  | 288 | if (!skb) | 
|  | 289 | return NULL; | 
|  | 290 |  | 
| Johannes Berg | 4df864c | 2017-06-16 14:29:21 +0200 | [diff] [blame] | 291 | hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 292 | hdr->opcode = cpu_to_le16(opcode); | 
|  | 293 | hdr->plen   = plen; | 
|  | 294 |  | 
|  | 295 | if (plen) | 
| Johannes Berg | 59ae1d1 | 2017-06-16 14:29:20 +0200 | [diff] [blame] | 296 | skb_put_data(skb, param, plen); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 297 |  | 
|  | 298 | BT_DBG("skb len %d", skb->len); | 
|  | 299 |  | 
| Marcel Holtmann | d79f34e | 2015-11-05 07:10:00 +0100 | [diff] [blame] | 300 | hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; | 
|  | 301 | hci_skb_opcode(skb) = opcode; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 302 |  | 
|  | 303 | return skb; | 
|  | 304 | } | 
|  | 305 |  | 
|  | 306 | /* Queue a command to an asynchronous HCI request */ | 
|  | 307 | void hci_req_add_ev(struct hci_request *req, u16 opcode, u32 plen, | 
|  | 308 | const void *param, u8 event) | 
|  | 309 | { | 
|  | 310 | struct hci_dev *hdev = req->hdev; | 
|  | 311 | struct sk_buff *skb; | 
|  | 312 |  | 
|  | 313 | BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen); | 
|  | 314 |  | 
|  | 315 | /* If an error occurred during request building, there is no point in | 
|  | 316 | * queueing the HCI command. We can simply return. | 
|  | 317 | */ | 
|  | 318 | if (req->err) | 
|  | 319 | return; | 
|  | 320 |  | 
|  | 321 | skb = hci_prepare_cmd(hdev, opcode, plen, param); | 
|  | 322 | if (!skb) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 323 | bt_dev_err(hdev, "no memory for command (opcode 0x%4.4x)", | 
|  | 324 | opcode); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 325 | req->err = -ENOMEM; | 
|  | 326 | return; | 
|  | 327 | } | 
|  | 328 |  | 
|  | 329 | if (skb_queue_empty(&req->cmd_q)) | 
| Johan Hedberg | 44d2713 | 2015-11-05 09:31:40 +0200 | [diff] [blame] | 330 | bt_cb(skb)->hci.req_flags |= HCI_REQ_START; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 331 |  | 
| Marcel Holtmann | 242c0eb | 2015-10-25 22:45:53 +0100 | [diff] [blame] | 332 | bt_cb(skb)->hci.req_event = event; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 333 |  | 
|  | 334 | skb_queue_tail(&req->cmd_q, skb); | 
|  | 335 | } | 
|  | 336 |  | 
|  | 337 | void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, | 
|  | 338 | const void *param) | 
|  | 339 | { | 
|  | 340 | hci_req_add_ev(req, opcode, plen, param, 0); | 
|  | 341 | } | 
|  | 342 |  | 
| Johan Hedberg | bf943cb | 2015-11-25 16:15:43 +0200 | [diff] [blame] | 343 | void __hci_req_write_fast_connectable(struct hci_request *req, bool enable) | 
|  | 344 | { | 
|  | 345 | struct hci_dev *hdev = req->hdev; | 
|  | 346 | struct hci_cp_write_page_scan_activity acp; | 
|  | 347 | u8 type; | 
|  | 348 |  | 
|  | 349 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | 
|  | 350 | return; | 
|  | 351 |  | 
|  | 352 | if (hdev->hci_ver < BLUETOOTH_VER_1_2) | 
|  | 353 | return; | 
|  | 354 |  | 
|  | 355 | if (enable) { | 
|  | 356 | type = PAGE_SCAN_TYPE_INTERLACED; | 
|  | 357 |  | 
|  | 358 | /* 160 msec page scan interval */ | 
|  | 359 | acp.interval = cpu_to_le16(0x0100); | 
|  | 360 | } else { | 
|  | 361 | type = PAGE_SCAN_TYPE_STANDARD;	/* default */ | 
|  | 362 |  | 
|  | 363 | /* default 1.28 sec page scan */ | 
|  | 364 | acp.interval = cpu_to_le16(0x0800); | 
|  | 365 | } | 
|  | 366 |  | 
|  | 367 | acp.window = cpu_to_le16(0x0012); | 
|  | 368 |  | 
|  | 369 | if (__cpu_to_le16(hdev->page_scan_interval) != acp.interval || | 
|  | 370 | __cpu_to_le16(hdev->page_scan_window) != acp.window) | 
|  | 371 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_ACTIVITY, | 
|  | 372 | sizeof(acp), &acp); | 
|  | 373 |  | 
|  | 374 | if (hdev->page_scan_type != type) | 
|  | 375 | hci_req_add(req, HCI_OP_WRITE_PAGE_SCAN_TYPE, 1, &type); | 
|  | 376 | } | 
|  | 377 |  | 
| Johan Hedberg | 196a5e9 | 2015-11-22 18:55:44 +0200 | [diff] [blame] | 378 | /* This function controls the background scanning based on hdev->pend_le_conns | 
|  | 379 | * list. If there are pending LE connection we start the background scanning, | 
|  | 380 | * otherwise we stop it. | 
|  | 381 | * | 
|  | 382 | * This function requires the caller holds hdev->lock. | 
|  | 383 | */ | 
|  | 384 | static void __hci_update_background_scan(struct hci_request *req) | 
|  | 385 | { | 
|  | 386 | struct hci_dev *hdev = req->hdev; | 
|  | 387 |  | 
|  | 388 | if (!test_bit(HCI_UP, &hdev->flags) || | 
|  | 389 | test_bit(HCI_INIT, &hdev->flags) || | 
|  | 390 | hci_dev_test_flag(hdev, HCI_SETUP) || | 
|  | 391 | hci_dev_test_flag(hdev, HCI_CONFIG) || | 
|  | 392 | hci_dev_test_flag(hdev, HCI_AUTO_OFF) || | 
|  | 393 | hci_dev_test_flag(hdev, HCI_UNREGISTER)) | 
|  | 394 | return; | 
|  | 395 |  | 
|  | 396 | /* No point in doing scanning if LE support hasn't been enabled */ | 
|  | 397 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | 
|  | 398 | return; | 
|  | 399 |  | 
|  | 400 | /* If discovery is active don't interfere with it */ | 
|  | 401 | if (hdev->discovery.state != DISCOVERY_STOPPED) | 
|  | 402 | return; | 
|  | 403 |  | 
|  | 404 | /* Reset RSSI and UUID filters when starting background scanning | 
|  | 405 | * since these filters are meant for service discovery only. | 
|  | 406 | * | 
|  | 407 | * The Start Discovery and Start Service Discovery operations | 
|  | 408 | * ensure to set proper values for RSSI threshold and UUID | 
|  | 409 | * filter list. So it is safe to just reset them here. | 
|  | 410 | */ | 
|  | 411 | hci_discovery_filter_clear(hdev); | 
|  | 412 |  | 
|  | 413 | if (list_empty(&hdev->pend_le_conns) && | 
|  | 414 | list_empty(&hdev->pend_le_reports)) { | 
|  | 415 | /* If there is no pending LE connections or devices | 
|  | 416 | * to be scanned for, we should stop the background | 
|  | 417 | * scanning. | 
|  | 418 | */ | 
|  | 419 |  | 
|  | 420 | /* If controller is not scanning we are done. */ | 
|  | 421 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 
|  | 422 | return; | 
|  | 423 |  | 
|  | 424 | hci_req_add_le_scan_disable(req); | 
|  | 425 |  | 
|  | 426 | BT_DBG("%s stopping background scanning", hdev->name); | 
|  | 427 | } else { | 
|  | 428 | /* If there is at least one pending LE connection, we should | 
|  | 429 | * keep the background scan running. | 
|  | 430 | */ | 
|  | 431 |  | 
|  | 432 | /* If controller is connecting, we should not start scanning | 
|  | 433 | * since some controllers are not able to scan and connect at | 
|  | 434 | * the same time. | 
|  | 435 | */ | 
|  | 436 | if (hci_lookup_le_connect(hdev)) | 
|  | 437 | return; | 
|  | 438 |  | 
|  | 439 | /* If controller is currently scanning, we stop it to ensure we | 
|  | 440 | * don't miss any advertising (due to duplicates filter). | 
|  | 441 | */ | 
|  | 442 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 
|  | 443 | hci_req_add_le_scan_disable(req); | 
|  | 444 |  | 
|  | 445 | hci_req_add_le_passive_scan(req); | 
|  | 446 |  | 
|  | 447 | BT_DBG("%s starting background scanning", hdev->name); | 
|  | 448 | } | 
|  | 449 | } | 
|  | 450 |  | 
| Johan Hedberg | 00cf504 | 2015-11-25 16:15:41 +0200 | [diff] [blame] | 451 | void __hci_req_update_name(struct hci_request *req) | 
|  | 452 | { | 
|  | 453 | struct hci_dev *hdev = req->hdev; | 
|  | 454 | struct hci_cp_write_local_name cp; | 
|  | 455 |  | 
|  | 456 | memcpy(cp.name, hdev->dev_name, sizeof(cp.name)); | 
|  | 457 |  | 
|  | 458 | hci_req_add(req, HCI_OP_WRITE_LOCAL_NAME, sizeof(cp), &cp); | 
|  | 459 | } | 
|  | 460 |  | 
| Johan Hedberg | b1a8917 | 2015-11-25 16:15:42 +0200 | [diff] [blame] | 461 | #define PNP_INFO_SVCLASS_ID		0x1200 | 
|  | 462 |  | 
|  | 463 | static u8 *create_uuid16_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | 
|  | 464 | { | 
|  | 465 | u8 *ptr = data, *uuids_start = NULL; | 
|  | 466 | struct bt_uuid *uuid; | 
|  | 467 |  | 
|  | 468 | if (len < 4) | 
|  | 469 | return ptr; | 
|  | 470 |  | 
|  | 471 | list_for_each_entry(uuid, &hdev->uuids, list) { | 
|  | 472 | u16 uuid16; | 
|  | 473 |  | 
|  | 474 | if (uuid->size != 16) | 
|  | 475 | continue; | 
|  | 476 |  | 
|  | 477 | uuid16 = get_unaligned_le16(&uuid->uuid[12]); | 
|  | 478 | if (uuid16 < 0x1100) | 
|  | 479 | continue; | 
|  | 480 |  | 
|  | 481 | if (uuid16 == PNP_INFO_SVCLASS_ID) | 
|  | 482 | continue; | 
|  | 483 |  | 
|  | 484 | if (!uuids_start) { | 
|  | 485 | uuids_start = ptr; | 
|  | 486 | uuids_start[0] = 1; | 
|  | 487 | uuids_start[1] = EIR_UUID16_ALL; | 
|  | 488 | ptr += 2; | 
|  | 489 | } | 
|  | 490 |  | 
|  | 491 | /* Stop if not enough space to put next UUID */ | 
|  | 492 | if ((ptr - data) + sizeof(u16) > len) { | 
|  | 493 | uuids_start[1] = EIR_UUID16_SOME; | 
|  | 494 | break; | 
|  | 495 | } | 
|  | 496 |  | 
|  | 497 | *ptr++ = (uuid16 & 0x00ff); | 
|  | 498 | *ptr++ = (uuid16 & 0xff00) >> 8; | 
|  | 499 | uuids_start[0] += sizeof(uuid16); | 
|  | 500 | } | 
|  | 501 |  | 
|  | 502 | return ptr; | 
|  | 503 | } | 
|  | 504 |  | 
|  | 505 | static u8 *create_uuid32_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | 
|  | 506 | { | 
|  | 507 | u8 *ptr = data, *uuids_start = NULL; | 
|  | 508 | struct bt_uuid *uuid; | 
|  | 509 |  | 
|  | 510 | if (len < 6) | 
|  | 511 | return ptr; | 
|  | 512 |  | 
|  | 513 | list_for_each_entry(uuid, &hdev->uuids, list) { | 
|  | 514 | if (uuid->size != 32) | 
|  | 515 | continue; | 
|  | 516 |  | 
|  | 517 | if (!uuids_start) { | 
|  | 518 | uuids_start = ptr; | 
|  | 519 | uuids_start[0] = 1; | 
|  | 520 | uuids_start[1] = EIR_UUID32_ALL; | 
|  | 521 | ptr += 2; | 
|  | 522 | } | 
|  | 523 |  | 
|  | 524 | /* Stop if not enough space to put next UUID */ | 
|  | 525 | if ((ptr - data) + sizeof(u32) > len) { | 
|  | 526 | uuids_start[1] = EIR_UUID32_SOME; | 
|  | 527 | break; | 
|  | 528 | } | 
|  | 529 |  | 
|  | 530 | memcpy(ptr, &uuid->uuid[12], sizeof(u32)); | 
|  | 531 | ptr += sizeof(u32); | 
|  | 532 | uuids_start[0] += sizeof(u32); | 
|  | 533 | } | 
|  | 534 |  | 
|  | 535 | return ptr; | 
|  | 536 | } | 
|  | 537 |  | 
|  | 538 | static u8 *create_uuid128_list(struct hci_dev *hdev, u8 *data, ptrdiff_t len) | 
|  | 539 | { | 
|  | 540 | u8 *ptr = data, *uuids_start = NULL; | 
|  | 541 | struct bt_uuid *uuid; | 
|  | 542 |  | 
|  | 543 | if (len < 18) | 
|  | 544 | return ptr; | 
|  | 545 |  | 
|  | 546 | list_for_each_entry(uuid, &hdev->uuids, list) { | 
|  | 547 | if (uuid->size != 128) | 
|  | 548 | continue; | 
|  | 549 |  | 
|  | 550 | if (!uuids_start) { | 
|  | 551 | uuids_start = ptr; | 
|  | 552 | uuids_start[0] = 1; | 
|  | 553 | uuids_start[1] = EIR_UUID128_ALL; | 
|  | 554 | ptr += 2; | 
|  | 555 | } | 
|  | 556 |  | 
|  | 557 | /* Stop if not enough space to put next UUID */ | 
|  | 558 | if ((ptr - data) + 16 > len) { | 
|  | 559 | uuids_start[1] = EIR_UUID128_SOME; | 
|  | 560 | break; | 
|  | 561 | } | 
|  | 562 |  | 
|  | 563 | memcpy(ptr, uuid->uuid, 16); | 
|  | 564 | ptr += 16; | 
|  | 565 | uuids_start[0] += 16; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 | return ptr; | 
|  | 569 | } | 
|  | 570 |  | 
|  | 571 | static void create_eir(struct hci_dev *hdev, u8 *data) | 
|  | 572 | { | 
|  | 573 | u8 *ptr = data; | 
|  | 574 | size_t name_len; | 
|  | 575 |  | 
|  | 576 | name_len = strlen(hdev->dev_name); | 
|  | 577 |  | 
|  | 578 | if (name_len > 0) { | 
|  | 579 | /* EIR Data type */ | 
|  | 580 | if (name_len > 48) { | 
|  | 581 | name_len = 48; | 
|  | 582 | ptr[1] = EIR_NAME_SHORT; | 
|  | 583 | } else | 
|  | 584 | ptr[1] = EIR_NAME_COMPLETE; | 
|  | 585 |  | 
|  | 586 | /* EIR Data length */ | 
|  | 587 | ptr[0] = name_len + 1; | 
|  | 588 |  | 
|  | 589 | memcpy(ptr + 2, hdev->dev_name, name_len); | 
|  | 590 |  | 
|  | 591 | ptr += (name_len + 2); | 
|  | 592 | } | 
|  | 593 |  | 
|  | 594 | if (hdev->inq_tx_power != HCI_TX_POWER_INVALID) { | 
|  | 595 | ptr[0] = 2; | 
|  | 596 | ptr[1] = EIR_TX_POWER; | 
|  | 597 | ptr[2] = (u8) hdev->inq_tx_power; | 
|  | 598 |  | 
|  | 599 | ptr += 3; | 
|  | 600 | } | 
|  | 601 |  | 
|  | 602 | if (hdev->devid_source > 0) { | 
|  | 603 | ptr[0] = 9; | 
|  | 604 | ptr[1] = EIR_DEVICE_ID; | 
|  | 605 |  | 
|  | 606 | put_unaligned_le16(hdev->devid_source, ptr + 2); | 
|  | 607 | put_unaligned_le16(hdev->devid_vendor, ptr + 4); | 
|  | 608 | put_unaligned_le16(hdev->devid_product, ptr + 6); | 
|  | 609 | put_unaligned_le16(hdev->devid_version, ptr + 8); | 
|  | 610 |  | 
|  | 611 | ptr += 10; | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | ptr = create_uuid16_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | 
|  | 615 | ptr = create_uuid32_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | 
|  | 616 | ptr = create_uuid128_list(hdev, ptr, HCI_MAX_EIR_LENGTH - (ptr - data)); | 
|  | 617 | } | 
|  | 618 |  | 
|  | 619 | void __hci_req_update_eir(struct hci_request *req) | 
|  | 620 | { | 
|  | 621 | struct hci_dev *hdev = req->hdev; | 
|  | 622 | struct hci_cp_write_eir cp; | 
|  | 623 |  | 
|  | 624 | if (!hdev_is_powered(hdev)) | 
|  | 625 | return; | 
|  | 626 |  | 
|  | 627 | if (!lmp_ext_inq_capable(hdev)) | 
|  | 628 | return; | 
|  | 629 |  | 
|  | 630 | if (!hci_dev_test_flag(hdev, HCI_SSP_ENABLED)) | 
|  | 631 | return; | 
|  | 632 |  | 
|  | 633 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) | 
|  | 634 | return; | 
|  | 635 |  | 
|  | 636 | memset(&cp, 0, sizeof(cp)); | 
|  | 637 |  | 
|  | 638 | create_eir(hdev, cp.data); | 
|  | 639 |  | 
|  | 640 | if (memcmp(cp.data, hdev->eir, sizeof(cp.data)) == 0) | 
|  | 641 | return; | 
|  | 642 |  | 
|  | 643 | memcpy(hdev->eir, cp.data, sizeof(cp.data)); | 
|  | 644 |  | 
|  | 645 | hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp); | 
|  | 646 | } | 
|  | 647 |  | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 648 | void hci_req_add_le_scan_disable(struct hci_request *req) | 
|  | 649 | { | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 650 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 651 |  | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 652 | if (use_ext_scan(hdev)) { | 
|  | 653 | struct hci_cp_le_set_ext_scan_enable cp; | 
|  | 654 |  | 
|  | 655 | memset(&cp, 0, sizeof(cp)); | 
|  | 656 | cp.enable = LE_SCAN_DISABLE; | 
|  | 657 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, sizeof(cp), | 
|  | 658 | &cp); | 
|  | 659 | } else { | 
|  | 660 | struct hci_cp_le_set_scan_enable cp; | 
|  | 661 |  | 
|  | 662 | memset(&cp, 0, sizeof(cp)); | 
|  | 663 | cp.enable = LE_SCAN_DISABLE; | 
|  | 664 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | 
|  | 665 | } | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 666 | } | 
|  | 667 |  | 
|  | 668 | static void add_to_white_list(struct hci_request *req, | 
|  | 669 | struct hci_conn_params *params) | 
|  | 670 | { | 
|  | 671 | struct hci_cp_le_add_to_white_list cp; | 
|  | 672 |  | 
|  | 673 | cp.bdaddr_type = params->addr_type; | 
|  | 674 | bacpy(&cp.bdaddr, ¶ms->addr); | 
|  | 675 |  | 
|  | 676 | hci_req_add(req, HCI_OP_LE_ADD_TO_WHITE_LIST, sizeof(cp), &cp); | 
|  | 677 | } | 
|  | 678 |  | 
|  | 679 | static u8 update_white_list(struct hci_request *req) | 
|  | 680 | { | 
|  | 681 | struct hci_dev *hdev = req->hdev; | 
|  | 682 | struct hci_conn_params *params; | 
|  | 683 | struct bdaddr_list *b; | 
|  | 684 | uint8_t white_list_entries = 0; | 
|  | 685 |  | 
|  | 686 | /* Go through the current white list programmed into the | 
|  | 687 | * controller one by one and check if that address is still | 
|  | 688 | * in the list of pending connections or list of devices to | 
|  | 689 | * report. If not present in either list, then queue the | 
|  | 690 | * command to remove it from the controller. | 
|  | 691 | */ | 
|  | 692 | list_for_each_entry(b, &hdev->le_white_list, list) { | 
| Johan Hedberg | cff10ce | 2016-01-26 14:31:31 -0500 | [diff] [blame] | 693 | /* If the device is neither in pend_le_conns nor | 
|  | 694 | * pend_le_reports then remove it from the whitelist. | 
|  | 695 | */ | 
|  | 696 | if (!hci_pend_le_action_lookup(&hdev->pend_le_conns, | 
|  | 697 | &b->bdaddr, b->bdaddr_type) && | 
|  | 698 | !hci_pend_le_action_lookup(&hdev->pend_le_reports, | 
|  | 699 | &b->bdaddr, b->bdaddr_type)) { | 
|  | 700 | struct hci_cp_le_del_from_white_list cp; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 701 |  | 
| Johan Hedberg | cff10ce | 2016-01-26 14:31:31 -0500 | [diff] [blame] | 702 | cp.bdaddr_type = b->bdaddr_type; | 
|  | 703 | bacpy(&cp.bdaddr, &b->bdaddr); | 
|  | 704 |  | 
|  | 705 | hci_req_add(req, HCI_OP_LE_DEL_FROM_WHITE_LIST, | 
|  | 706 | sizeof(cp), &cp); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 707 | continue; | 
|  | 708 | } | 
|  | 709 |  | 
| Johan Hedberg | cff10ce | 2016-01-26 14:31:31 -0500 | [diff] [blame] | 710 | if (hci_find_irk_by_addr(hdev, &b->bdaddr, b->bdaddr_type)) { | 
|  | 711 | /* White list can not be used with RPAs */ | 
|  | 712 | return 0x00; | 
|  | 713 | } | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 714 |  | 
| Johan Hedberg | cff10ce | 2016-01-26 14:31:31 -0500 | [diff] [blame] | 715 | white_list_entries++; | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 716 | } | 
|  | 717 |  | 
|  | 718 | /* Since all no longer valid white list entries have been | 
|  | 719 | * removed, walk through the list of pending connections | 
|  | 720 | * and ensure that any new device gets programmed into | 
|  | 721 | * the controller. | 
|  | 722 | * | 
|  | 723 | * If the list of the devices is larger than the list of | 
|  | 724 | * available white list entries in the controller, then | 
|  | 725 | * just abort and return filer policy value to not use the | 
|  | 726 | * white list. | 
|  | 727 | */ | 
|  | 728 | list_for_each_entry(params, &hdev->pend_le_conns, action) { | 
|  | 729 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | 
|  | 730 | ¶ms->addr, params->addr_type)) | 
|  | 731 | continue; | 
|  | 732 |  | 
|  | 733 | if (white_list_entries >= hdev->le_white_list_size) { | 
|  | 734 | /* Select filter policy to accept all advertising */ | 
|  | 735 | return 0x00; | 
|  | 736 | } | 
|  | 737 |  | 
|  | 738 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | 
|  | 739 | params->addr_type)) { | 
|  | 740 | /* White list can not be used with RPAs */ | 
|  | 741 | return 0x00; | 
|  | 742 | } | 
|  | 743 |  | 
|  | 744 | white_list_entries++; | 
|  | 745 | add_to_white_list(req, params); | 
|  | 746 | } | 
|  | 747 |  | 
|  | 748 | /* After adding all new pending connections, walk through | 
|  | 749 | * the list of pending reports and also add these to the | 
|  | 750 | * white list if there is still space. | 
|  | 751 | */ | 
|  | 752 | list_for_each_entry(params, &hdev->pend_le_reports, action) { | 
|  | 753 | if (hci_bdaddr_list_lookup(&hdev->le_white_list, | 
|  | 754 | ¶ms->addr, params->addr_type)) | 
|  | 755 | continue; | 
|  | 756 |  | 
|  | 757 | if (white_list_entries >= hdev->le_white_list_size) { | 
|  | 758 | /* Select filter policy to accept all advertising */ | 
|  | 759 | return 0x00; | 
|  | 760 | } | 
|  | 761 |  | 
|  | 762 | if (hci_find_irk_by_addr(hdev, ¶ms->addr, | 
|  | 763 | params->addr_type)) { | 
|  | 764 | /* White list can not be used with RPAs */ | 
|  | 765 | return 0x00; | 
|  | 766 | } | 
|  | 767 |  | 
|  | 768 | white_list_entries++; | 
|  | 769 | add_to_white_list(req, params); | 
|  | 770 | } | 
|  | 771 |  | 
|  | 772 | /* Select filter policy to use white list */ | 
|  | 773 | return 0x01; | 
|  | 774 | } | 
|  | 775 |  | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 776 | static bool scan_use_rpa(struct hci_dev *hdev) | 
|  | 777 | { | 
|  | 778 | return hci_dev_test_flag(hdev, HCI_PRIVACY); | 
|  | 779 | } | 
|  | 780 |  | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 781 | static void hci_req_start_scan(struct hci_request *req, u8 type, u16 interval, | 
|  | 782 | u16 window, u8 own_addr_type, u8 filter_policy) | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 783 | { | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 784 | struct hci_dev *hdev = req->hdev; | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 785 |  | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 786 | /* Use ext scanning if set ext scan param and ext scan enable is | 
|  | 787 | * supported | 
|  | 788 | */ | 
|  | 789 | if (use_ext_scan(hdev)) { | 
|  | 790 | struct hci_cp_le_set_ext_scan_params *ext_param_cp; | 
|  | 791 | struct hci_cp_le_set_ext_scan_enable ext_enable_cp; | 
|  | 792 | struct hci_cp_le_scan_phy_params *phy_params; | 
| Jaganath Kanakkassery | 45bdd86 | 2018-07-19 17:09:37 +0530 | [diff] [blame] | 793 | u8 data[sizeof(*ext_param_cp) + sizeof(*phy_params) * 2]; | 
|  | 794 | u32 plen; | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 795 |  | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 796 | ext_param_cp = (void *)data; | 
|  | 797 | phy_params = (void *)ext_param_cp->data; | 
|  | 798 |  | 
|  | 799 | memset(ext_param_cp, 0, sizeof(*ext_param_cp)); | 
|  | 800 | ext_param_cp->own_addr_type = own_addr_type; | 
|  | 801 | ext_param_cp->filter_policy = filter_policy; | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 802 |  | 
| Jaganath Kanakkassery | 45bdd86 | 2018-07-19 17:09:37 +0530 | [diff] [blame] | 803 | plen = sizeof(*ext_param_cp); | 
|  | 804 |  | 
|  | 805 | if (scan_1m(hdev) || scan_2m(hdev)) { | 
|  | 806 | ext_param_cp->scanning_phys |= LE_SCAN_PHY_1M; | 
|  | 807 |  | 
|  | 808 | memset(phy_params, 0, sizeof(*phy_params)); | 
|  | 809 | phy_params->type = type; | 
|  | 810 | phy_params->interval = cpu_to_le16(interval); | 
|  | 811 | phy_params->window = cpu_to_le16(window); | 
|  | 812 |  | 
|  | 813 | plen += sizeof(*phy_params); | 
|  | 814 | phy_params++; | 
|  | 815 | } | 
|  | 816 |  | 
|  | 817 | if (scan_coded(hdev)) { | 
|  | 818 | ext_param_cp->scanning_phys |= LE_SCAN_PHY_CODED; | 
|  | 819 |  | 
|  | 820 | memset(phy_params, 0, sizeof(*phy_params)); | 
|  | 821 | phy_params->type = type; | 
|  | 822 | phy_params->interval = cpu_to_le16(interval); | 
|  | 823 | phy_params->window = cpu_to_le16(window); | 
|  | 824 |  | 
|  | 825 | plen += sizeof(*phy_params); | 
|  | 826 | phy_params++; | 
|  | 827 | } | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 828 |  | 
|  | 829 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_PARAMS, | 
| Jaganath Kanakkassery | 45bdd86 | 2018-07-19 17:09:37 +0530 | [diff] [blame] | 830 | plen, ext_param_cp); | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 831 |  | 
|  | 832 | memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); | 
|  | 833 | ext_enable_cp.enable = LE_SCAN_ENABLE; | 
|  | 834 | ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | 
|  | 835 |  | 
|  | 836 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, | 
|  | 837 | sizeof(ext_enable_cp), &ext_enable_cp); | 
|  | 838 | } else { | 
|  | 839 | struct hci_cp_le_set_scan_param param_cp; | 
|  | 840 | struct hci_cp_le_set_scan_enable enable_cp; | 
|  | 841 |  | 
|  | 842 | memset(¶m_cp, 0, sizeof(param_cp)); | 
|  | 843 | param_cp.type = type; | 
|  | 844 | param_cp.interval = cpu_to_le16(interval); | 
|  | 845 | param_cp.window = cpu_to_le16(window); | 
|  | 846 | param_cp.own_address_type = own_addr_type; | 
|  | 847 | param_cp.filter_policy = filter_policy; | 
|  | 848 | hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(param_cp), | 
|  | 849 | ¶m_cp); | 
|  | 850 |  | 
|  | 851 | memset(&enable_cp, 0, sizeof(enable_cp)); | 
|  | 852 | enable_cp.enable = LE_SCAN_ENABLE; | 
|  | 853 | enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | 
|  | 854 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(enable_cp), | 
|  | 855 | &enable_cp); | 
|  | 856 | } | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 857 | } | 
|  | 858 |  | 
|  | 859 | void hci_req_add_le_passive_scan(struct hci_request *req) | 
|  | 860 | { | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 861 | struct hci_dev *hdev = req->hdev; | 
|  | 862 | u8 own_addr_type; | 
|  | 863 | u8 filter_policy; | 
|  | 864 |  | 
|  | 865 | /* Set require_privacy to false since no SCAN_REQ are send | 
|  | 866 | * during passive scanning. Not using an non-resolvable address | 
|  | 867 | * here is important so that peer devices using direct | 
|  | 868 | * advertising with our address will be correctly reported | 
|  | 869 | * by the controller. | 
|  | 870 | */ | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 871 | if (hci_update_random_address(req, false, scan_use_rpa(hdev), | 
|  | 872 | &own_addr_type)) | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 873 | return; | 
|  | 874 |  | 
|  | 875 | /* Adding or removing entries from the white list must | 
|  | 876 | * happen before enabling scanning. The controller does | 
|  | 877 | * not allow white list modification while scanning. | 
|  | 878 | */ | 
|  | 879 | filter_policy = update_white_list(req); | 
|  | 880 |  | 
|  | 881 | /* When the controller is using random resolvable addresses and | 
|  | 882 | * with that having LE privacy enabled, then controllers with | 
|  | 883 | * Extended Scanner Filter Policies support can now enable support | 
|  | 884 | * for handling directed advertising. | 
|  | 885 | * | 
|  | 886 | * So instead of using filter polices 0x00 (no whitelist) | 
|  | 887 | * and 0x01 (whitelist enabled) use the new filter policies | 
|  | 888 | * 0x02 (no whitelist) and 0x03 (whitelist enabled). | 
|  | 889 | */ | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 890 | if (hci_dev_test_flag(hdev, HCI_PRIVACY) && | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 891 | (hdev->le_features[0] & HCI_LE_EXT_SCAN_POLICY)) | 
|  | 892 | filter_policy |= 0x02; | 
|  | 893 |  | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 894 | hci_req_start_scan(req, LE_SCAN_PASSIVE, hdev->le_scan_interval, | 
|  | 895 | hdev->le_scan_window, own_addr_type, filter_policy); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 896 | } | 
|  | 897 |  | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 898 | static u8 get_adv_instance_scan_rsp_len(struct hci_dev *hdev, u8 instance) | 
|  | 899 | { | 
|  | 900 | struct adv_info *adv_instance; | 
|  | 901 |  | 
|  | 902 | /* Ignore instance 0 */ | 
|  | 903 | if (instance == 0x00) | 
|  | 904 | return 0; | 
|  | 905 |  | 
|  | 906 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 907 | if (!adv_instance) | 
|  | 908 | return 0; | 
|  | 909 |  | 
|  | 910 | /* TODO: Take into account the "appearance" and "local-name" flags here. | 
|  | 911 | * These are currently being ignored as they are not supported. | 
|  | 912 | */ | 
|  | 913 | return adv_instance->scan_rsp_len; | 
|  | 914 | } | 
|  | 915 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 916 | static u8 get_cur_adv_instance_scan_rsp_len(struct hci_dev *hdev) | 
|  | 917 | { | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 918 | u8 instance = hdev->cur_adv_instance; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 919 | struct adv_info *adv_instance; | 
|  | 920 |  | 
|  | 921 | /* Ignore instance 0 */ | 
|  | 922 | if (instance == 0x00) | 
|  | 923 | return 0; | 
|  | 924 |  | 
|  | 925 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 926 | if (!adv_instance) | 
|  | 927 | return 0; | 
|  | 928 |  | 
|  | 929 | /* TODO: Take into account the "appearance" and "local-name" flags here. | 
|  | 930 | * These are currently being ignored as they are not supported. | 
|  | 931 | */ | 
|  | 932 | return adv_instance->scan_rsp_len; | 
|  | 933 | } | 
|  | 934 |  | 
|  | 935 | void __hci_req_disable_advertising(struct hci_request *req) | 
|  | 936 | { | 
| Jaganath Kanakkassery | 45b7749 | 2018-07-19 17:09:43 +0530 | [diff] [blame^] | 937 | if (ext_adv_capable(req->hdev)) { | 
|  | 938 | struct hci_cp_le_set_ext_adv_enable cp; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 939 |  | 
| Jaganath Kanakkassery | 45b7749 | 2018-07-19 17:09:43 +0530 | [diff] [blame^] | 940 | cp.enable = 0x00; | 
|  | 941 | /* Disable all sets since we only support one set at the moment */ | 
|  | 942 | cp.num_of_sets = 0x00; | 
|  | 943 |  | 
|  | 944 | hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, sizeof(cp), &cp); | 
|  | 945 | } else { | 
|  | 946 | u8 enable = 0x00; | 
|  | 947 |  | 
|  | 948 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | 
|  | 949 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 950 | } | 
|  | 951 |  | 
|  | 952 | static u32 get_adv_instance_flags(struct hci_dev *hdev, u8 instance) | 
|  | 953 | { | 
|  | 954 | u32 flags; | 
|  | 955 | struct adv_info *adv_instance; | 
|  | 956 |  | 
|  | 957 | if (instance == 0x00) { | 
|  | 958 | /* Instance 0 always manages the "Tx Power" and "Flags" | 
|  | 959 | * fields | 
|  | 960 | */ | 
|  | 961 | flags = MGMT_ADV_FLAG_TX_POWER | MGMT_ADV_FLAG_MANAGED_FLAGS; | 
|  | 962 |  | 
|  | 963 | /* For instance 0, the HCI_ADVERTISING_CONNECTABLE setting | 
|  | 964 | * corresponds to the "connectable" instance flag. | 
|  | 965 | */ | 
|  | 966 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING_CONNECTABLE)) | 
|  | 967 | flags |= MGMT_ADV_FLAG_CONNECTABLE; | 
|  | 968 |  | 
| Johan Hedberg | 6a19cc8 | 2016-03-11 09:56:32 +0200 | [diff] [blame] | 969 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) | 
|  | 970 | flags |= MGMT_ADV_FLAG_LIMITED_DISCOV; | 
|  | 971 | else if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | 
| Johan Hedberg | d43efbd | 2016-03-09 17:30:33 +0200 | [diff] [blame] | 972 | flags |= MGMT_ADV_FLAG_DISCOV; | 
|  | 973 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 974 | return flags; | 
|  | 975 | } | 
|  | 976 |  | 
|  | 977 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 978 |  | 
|  | 979 | /* Return 0 when we got an invalid instance identifier. */ | 
|  | 980 | if (!adv_instance) | 
|  | 981 | return 0; | 
|  | 982 |  | 
|  | 983 | return adv_instance->flags; | 
|  | 984 | } | 
|  | 985 |  | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 986 | static bool adv_use_rpa(struct hci_dev *hdev, uint32_t flags) | 
|  | 987 | { | 
|  | 988 | /* If privacy is not enabled don't use RPA */ | 
|  | 989 | if (!hci_dev_test_flag(hdev, HCI_PRIVACY)) | 
|  | 990 | return false; | 
|  | 991 |  | 
|  | 992 | /* If basic privacy mode is enabled use RPA */ | 
|  | 993 | if (!hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) | 
|  | 994 | return true; | 
|  | 995 |  | 
|  | 996 | /* If limited privacy mode is enabled don't use RPA if we're | 
|  | 997 | * both discoverable and bondable. | 
|  | 998 | */ | 
|  | 999 | if ((flags & MGMT_ADV_FLAG_DISCOV) && | 
|  | 1000 | hci_dev_test_flag(hdev, HCI_BONDABLE)) | 
|  | 1001 | return false; | 
|  | 1002 |  | 
|  | 1003 | /* We're neither bondable nor discoverable in the limited | 
|  | 1004 | * privacy mode, therefore use RPA. | 
|  | 1005 | */ | 
|  | 1006 | return true; | 
|  | 1007 | } | 
|  | 1008 |  | 
| Łukasz Rymanowski | 9e1e9f2 | 2017-12-08 13:40:57 +0100 | [diff] [blame] | 1009 | static bool is_advertising_allowed(struct hci_dev *hdev, bool connectable) | 
|  | 1010 | { | 
|  | 1011 | /* If there is no connection we are OK to advertise. */ | 
|  | 1012 | if (hci_conn_num(hdev, LE_LINK) == 0) | 
|  | 1013 | return true; | 
|  | 1014 |  | 
|  | 1015 | /* Check le_states if there is any connection in slave role. */ | 
|  | 1016 | if (hdev->conn_hash.le_num_slave > 0) { | 
|  | 1017 | /* Slave connection state and non connectable mode bit 20. */ | 
|  | 1018 | if (!connectable && !(hdev->le_states[2] & 0x10)) | 
|  | 1019 | return false; | 
|  | 1020 |  | 
|  | 1021 | /* Slave connection state and connectable mode bit 38 | 
|  | 1022 | * and scannable bit 21. | 
|  | 1023 | */ | 
| Łukasz Rymanowski | 62ebdc2 | 2018-02-09 18:26:02 +0100 | [diff] [blame] | 1024 | if (connectable && (!(hdev->le_states[4] & 0x40) || | 
|  | 1025 | !(hdev->le_states[2] & 0x20))) | 
| Łukasz Rymanowski | 9e1e9f2 | 2017-12-08 13:40:57 +0100 | [diff] [blame] | 1026 | return false; | 
|  | 1027 | } | 
|  | 1028 |  | 
|  | 1029 | /* Check le_states if there is any connection in master role. */ | 
|  | 1030 | if (hci_conn_num(hdev, LE_LINK) != hdev->conn_hash.le_num_slave) { | 
|  | 1031 | /* Master connection state and non connectable mode bit 18. */ | 
|  | 1032 | if (!connectable && !(hdev->le_states[2] & 0x02)) | 
|  | 1033 | return false; | 
|  | 1034 |  | 
|  | 1035 | /* Master connection state and connectable mode bit 35 and | 
|  | 1036 | * scannable 19. | 
|  | 1037 | */ | 
| Łukasz Rymanowski | 62ebdc2 | 2018-02-09 18:26:02 +0100 | [diff] [blame] | 1038 | if (connectable && (!(hdev->le_states[4] & 0x08) || | 
| Łukasz Rymanowski | 9e1e9f2 | 2017-12-08 13:40:57 +0100 | [diff] [blame] | 1039 | !(hdev->le_states[2] & 0x08))) | 
|  | 1040 | return false; | 
|  | 1041 | } | 
|  | 1042 |  | 
|  | 1043 | return true; | 
|  | 1044 | } | 
|  | 1045 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1046 | void __hci_req_enable_advertising(struct hci_request *req) | 
|  | 1047 | { | 
|  | 1048 | struct hci_dev *hdev = req->hdev; | 
|  | 1049 | struct hci_cp_le_set_adv_param cp; | 
|  | 1050 | u8 own_addr_type, enable = 0x01; | 
|  | 1051 | bool connectable; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1052 | u32 flags; | 
|  | 1053 |  | 
| Łukasz Rymanowski | 9e1e9f2 | 2017-12-08 13:40:57 +0100 | [diff] [blame] | 1054 | flags = get_adv_instance_flags(hdev, hdev->cur_adv_instance); | 
|  | 1055 |  | 
|  | 1056 | /* If the "connectable" instance flag was not set, then choose between | 
|  | 1057 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. | 
|  | 1058 | */ | 
|  | 1059 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || | 
|  | 1060 | mgmt_get_connectable(hdev); | 
|  | 1061 |  | 
|  | 1062 | if (!is_advertising_allowed(hdev, connectable)) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1063 | return; | 
|  | 1064 |  | 
|  | 1065 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) | 
|  | 1066 | __hci_req_disable_advertising(req); | 
|  | 1067 |  | 
|  | 1068 | /* Clear the HCI_LE_ADV bit temporarily so that the | 
|  | 1069 | * hci_update_random_address knows that it's safe to go ahead | 
|  | 1070 | * and write a new random address. The flag will be set back on | 
|  | 1071 | * as soon as the SET_ADV_ENABLE HCI command completes. | 
|  | 1072 | */ | 
|  | 1073 | hci_dev_clear_flag(hdev, HCI_LE_ADV); | 
|  | 1074 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1075 | /* Set require_privacy to true only when non-connectable | 
|  | 1076 | * advertising is used. In that case it is fine to use a | 
|  | 1077 | * non-resolvable private address. | 
|  | 1078 | */ | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1079 | if (hci_update_random_address(req, !connectable, | 
|  | 1080 | adv_use_rpa(hdev, flags), | 
|  | 1081 | &own_addr_type) < 0) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1082 | return; | 
|  | 1083 |  | 
|  | 1084 | memset(&cp, 0, sizeof(cp)); | 
|  | 1085 | cp.min_interval = cpu_to_le16(hdev->le_adv_min_interval); | 
|  | 1086 | cp.max_interval = cpu_to_le16(hdev->le_adv_max_interval); | 
|  | 1087 |  | 
|  | 1088 | if (connectable) | 
|  | 1089 | cp.type = LE_ADV_IND; | 
|  | 1090 | else if (get_cur_adv_instance_scan_rsp_len(hdev)) | 
|  | 1091 | cp.type = LE_ADV_SCAN_IND; | 
|  | 1092 | else | 
|  | 1093 | cp.type = LE_ADV_NONCONN_IND; | 
|  | 1094 |  | 
|  | 1095 | cp.own_address_type = own_addr_type; | 
|  | 1096 | cp.channel_map = hdev->le_adv_channel_map; | 
|  | 1097 |  | 
|  | 1098 | hci_req_add(req, HCI_OP_LE_SET_ADV_PARAM, sizeof(cp), &cp); | 
|  | 1099 |  | 
|  | 1100 | hci_req_add(req, HCI_OP_LE_SET_ADV_ENABLE, sizeof(enable), &enable); | 
|  | 1101 | } | 
|  | 1102 |  | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1103 | u8 append_local_name(struct hci_dev *hdev, u8 *ptr, u8 ad_len) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1104 | { | 
| Michał Narajowski | cecbf3e | 2016-10-05 12:28:25 +0200 | [diff] [blame] | 1105 | size_t short_len; | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1106 | size_t complete_len; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1107 |  | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1108 | /* no space left for name (+ NULL + type + len) */ | 
|  | 1109 | if ((HCI_MAX_AD_LENGTH - ad_len) < HCI_MAX_SHORT_NAME_LENGTH + 3) | 
|  | 1110 | return ad_len; | 
|  | 1111 |  | 
|  | 1112 | /* use complete name if present and fits */ | 
| Michał Narajowski | cecbf3e | 2016-10-05 12:28:25 +0200 | [diff] [blame] | 1113 | complete_len = strlen(hdev->dev_name); | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1114 | if (complete_len && complete_len <= HCI_MAX_SHORT_NAME_LENGTH) | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1115 | return eir_append_data(ptr, ad_len, EIR_NAME_COMPLETE, | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1116 | hdev->dev_name, complete_len + 1); | 
| Michał Narajowski | cecbf3e | 2016-10-05 12:28:25 +0200 | [diff] [blame] | 1117 |  | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1118 | /* use short name if present */ | 
|  | 1119 | short_len = strlen(hdev->short_name); | 
|  | 1120 | if (short_len) | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1121 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1122 | hdev->short_name, short_len + 1); | 
| Michał Narajowski | cecbf3e | 2016-10-05 12:28:25 +0200 | [diff] [blame] | 1123 |  | 
| Michał Narajowski | f61851f | 2016-10-19 10:20:27 +0200 | [diff] [blame] | 1124 | /* use shortened full name if present, we already know that name | 
|  | 1125 | * is longer then HCI_MAX_SHORT_NAME_LENGTH | 
|  | 1126 | */ | 
|  | 1127 | if (complete_len) { | 
|  | 1128 | u8 name[HCI_MAX_SHORT_NAME_LENGTH + 1]; | 
|  | 1129 |  | 
|  | 1130 | memcpy(name, hdev->dev_name, HCI_MAX_SHORT_NAME_LENGTH); | 
|  | 1131 | name[HCI_MAX_SHORT_NAME_LENGTH] = '\0'; | 
|  | 1132 |  | 
|  | 1133 | return eir_append_data(ptr, ad_len, EIR_NAME_SHORT, name, | 
|  | 1134 | sizeof(name)); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1135 | } | 
|  | 1136 |  | 
|  | 1137 | return ad_len; | 
|  | 1138 | } | 
|  | 1139 |  | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1140 | static u8 append_appearance(struct hci_dev *hdev, u8 *ptr, u8 ad_len) | 
|  | 1141 | { | 
|  | 1142 | return eir_append_le16(ptr, ad_len, EIR_APPEARANCE, hdev->appearance); | 
|  | 1143 | } | 
|  | 1144 |  | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1145 | static u8 create_default_scan_rsp_data(struct hci_dev *hdev, u8 *ptr) | 
|  | 1146 | { | 
| Michał Narajowski | 7ddb30c | 2016-10-05 12:28:26 +0200 | [diff] [blame] | 1147 | u8 scan_rsp_len = 0; | 
|  | 1148 |  | 
|  | 1149 | if (hdev->appearance) { | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1150 | scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); | 
| Michał Narajowski | 7ddb30c | 2016-10-05 12:28:26 +0200 | [diff] [blame] | 1151 | } | 
|  | 1152 |  | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1153 | return append_local_name(hdev, ptr, scan_rsp_len); | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1154 | } | 
|  | 1155 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1156 | static u8 create_instance_scan_rsp_data(struct hci_dev *hdev, u8 instance, | 
|  | 1157 | u8 *ptr) | 
|  | 1158 | { | 
|  | 1159 | struct adv_info *adv_instance; | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1160 | u32 instance_flags; | 
|  | 1161 | u8 scan_rsp_len = 0; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1162 |  | 
|  | 1163 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 1164 | if (!adv_instance) | 
|  | 1165 | return 0; | 
|  | 1166 |  | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1167 | instance_flags = adv_instance->flags; | 
|  | 1168 |  | 
| Michał Narajowski | c4960ec | 2016-09-18 12:50:03 +0200 | [diff] [blame] | 1169 | if ((instance_flags & MGMT_ADV_FLAG_APPEARANCE) && hdev->appearance) { | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1170 | scan_rsp_len = append_appearance(hdev, ptr, scan_rsp_len); | 
| Michał Narajowski | c4960ec | 2016-09-18 12:50:03 +0200 | [diff] [blame] | 1171 | } | 
|  | 1172 |  | 
| Michał Narajowski | 1b42206 | 2016-10-05 12:28:27 +0200 | [diff] [blame] | 1173 | memcpy(&ptr[scan_rsp_len], adv_instance->scan_rsp_data, | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1174 | adv_instance->scan_rsp_len); | 
|  | 1175 |  | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1176 | scan_rsp_len += adv_instance->scan_rsp_len; | 
| Michał Narajowski | 7c295c4 | 2016-09-18 12:50:02 +0200 | [diff] [blame] | 1177 |  | 
|  | 1178 | if (instance_flags & MGMT_ADV_FLAG_LOCAL_NAME) | 
|  | 1179 | scan_rsp_len = append_local_name(hdev, ptr, scan_rsp_len); | 
|  | 1180 |  | 
|  | 1181 | return scan_rsp_len; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1182 | } | 
|  | 1183 |  | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1184 | void __hci_req_update_scan_rsp_data(struct hci_request *req, u8 instance) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1185 | { | 
|  | 1186 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1187 | u8 len; | 
|  | 1188 |  | 
|  | 1189 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | 
|  | 1190 | return; | 
|  | 1191 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1192 | if (ext_adv_capable(hdev)) { | 
|  | 1193 | struct hci_cp_le_set_ext_scan_rsp_data cp; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1194 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1195 | memset(&cp, 0, sizeof(cp)); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1196 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1197 | if (instance) | 
|  | 1198 | len = create_instance_scan_rsp_data(hdev, instance, | 
|  | 1199 | cp.data); | 
|  | 1200 | else | 
|  | 1201 | len = create_default_scan_rsp_data(hdev, cp.data); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1202 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1203 | if (hdev->scan_rsp_data_len == len && | 
|  | 1204 | !memcmp(cp.data, hdev->scan_rsp_data, len)) | 
|  | 1205 | return; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1206 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1207 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); | 
|  | 1208 | hdev->scan_rsp_data_len = len; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1209 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1210 | cp.handle = 0; | 
|  | 1211 | cp.length = len; | 
|  | 1212 | cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; | 
|  | 1213 | cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; | 
|  | 1214 |  | 
|  | 1215 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_RSP_DATA, sizeof(cp), | 
|  | 1216 | &cp); | 
|  | 1217 | } else { | 
|  | 1218 | struct hci_cp_le_set_scan_rsp_data cp; | 
|  | 1219 |  | 
|  | 1220 | memset(&cp, 0, sizeof(cp)); | 
|  | 1221 |  | 
|  | 1222 | if (instance) | 
|  | 1223 | len = create_instance_scan_rsp_data(hdev, instance, | 
|  | 1224 | cp.data); | 
|  | 1225 | else | 
|  | 1226 | len = create_default_scan_rsp_data(hdev, cp.data); | 
|  | 1227 |  | 
|  | 1228 | if (hdev->scan_rsp_data_len == len && | 
|  | 1229 | !memcmp(cp.data, hdev->scan_rsp_data, len)) | 
|  | 1230 | return; | 
|  | 1231 |  | 
|  | 1232 | memcpy(hdev->scan_rsp_data, cp.data, sizeof(cp.data)); | 
|  | 1233 | hdev->scan_rsp_data_len = len; | 
|  | 1234 |  | 
|  | 1235 | cp.length = len; | 
|  | 1236 |  | 
|  | 1237 | hci_req_add(req, HCI_OP_LE_SET_SCAN_RSP_DATA, sizeof(cp), &cp); | 
|  | 1238 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1239 | } | 
|  | 1240 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1241 | static u8 create_instance_adv_data(struct hci_dev *hdev, u8 instance, u8 *ptr) | 
|  | 1242 | { | 
|  | 1243 | struct adv_info *adv_instance = NULL; | 
|  | 1244 | u8 ad_len = 0, flags = 0; | 
|  | 1245 | u32 instance_flags; | 
|  | 1246 |  | 
|  | 1247 | /* Return 0 when the current instance identifier is invalid. */ | 
|  | 1248 | if (instance) { | 
|  | 1249 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 1250 | if (!adv_instance) | 
|  | 1251 | return 0; | 
|  | 1252 | } | 
|  | 1253 |  | 
|  | 1254 | instance_flags = get_adv_instance_flags(hdev, instance); | 
|  | 1255 |  | 
|  | 1256 | /* The Add Advertising command allows userspace to set both the general | 
|  | 1257 | * and limited discoverable flags. | 
|  | 1258 | */ | 
|  | 1259 | if (instance_flags & MGMT_ADV_FLAG_DISCOV) | 
|  | 1260 | flags |= LE_AD_GENERAL; | 
|  | 1261 |  | 
|  | 1262 | if (instance_flags & MGMT_ADV_FLAG_LIMITED_DISCOV) | 
|  | 1263 | flags |= LE_AD_LIMITED; | 
|  | 1264 |  | 
| Johan Hedberg | f18ba58 | 2016-04-06 13:09:05 +0300 | [diff] [blame] | 1265 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | 
|  | 1266 | flags |= LE_AD_NO_BREDR; | 
|  | 1267 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1268 | if (flags || (instance_flags & MGMT_ADV_FLAG_MANAGED_FLAGS)) { | 
|  | 1269 | /* If a discovery flag wasn't provided, simply use the global | 
|  | 1270 | * settings. | 
|  | 1271 | */ | 
|  | 1272 | if (!flags) | 
|  | 1273 | flags |= mgmt_get_adv_discov_flags(hdev); | 
|  | 1274 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1275 | /* If flags would still be empty, then there is no need to | 
|  | 1276 | * include the "Flags" AD field". | 
|  | 1277 | */ | 
|  | 1278 | if (flags) { | 
|  | 1279 | ptr[0] = 0x02; | 
|  | 1280 | ptr[1] = EIR_FLAGS; | 
|  | 1281 | ptr[2] = flags; | 
|  | 1282 |  | 
|  | 1283 | ad_len += 3; | 
|  | 1284 | ptr += 3; | 
|  | 1285 | } | 
|  | 1286 | } | 
|  | 1287 |  | 
|  | 1288 | if (adv_instance) { | 
|  | 1289 | memcpy(ptr, adv_instance->adv_data, | 
|  | 1290 | adv_instance->adv_data_len); | 
|  | 1291 | ad_len += adv_instance->adv_data_len; | 
|  | 1292 | ptr += adv_instance->adv_data_len; | 
|  | 1293 | } | 
|  | 1294 |  | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1295 | if (instance_flags & MGMT_ADV_FLAG_TX_POWER) { | 
|  | 1296 | s8 adv_tx_power; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1297 |  | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1298 | if (ext_adv_capable(hdev)) { | 
|  | 1299 | if (adv_instance) | 
|  | 1300 | adv_tx_power = adv_instance->tx_power; | 
|  | 1301 | else | 
|  | 1302 | adv_tx_power = hdev->adv_tx_power; | 
|  | 1303 | } else { | 
|  | 1304 | adv_tx_power = hdev->adv_tx_power; | 
|  | 1305 | } | 
|  | 1306 |  | 
|  | 1307 | /* Provide Tx Power only if we can provide a valid value for it */ | 
|  | 1308 | if (adv_tx_power != HCI_TX_POWER_INVALID) { | 
|  | 1309 | ptr[0] = 0x02; | 
|  | 1310 | ptr[1] = EIR_TX_POWER; | 
|  | 1311 | ptr[2] = (u8)adv_tx_power; | 
|  | 1312 |  | 
|  | 1313 | ad_len += 3; | 
|  | 1314 | ptr += 3; | 
|  | 1315 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1316 | } | 
|  | 1317 |  | 
|  | 1318 | return ad_len; | 
|  | 1319 | } | 
|  | 1320 |  | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1321 | void __hci_req_update_adv_data(struct hci_request *req, u8 instance) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1322 | { | 
|  | 1323 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1324 | u8 len; | 
|  | 1325 |  | 
|  | 1326 | if (!hci_dev_test_flag(hdev, HCI_LE_ENABLED)) | 
|  | 1327 | return; | 
|  | 1328 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1329 | if (ext_adv_capable(hdev)) { | 
|  | 1330 | struct hci_cp_le_set_ext_adv_data cp; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1331 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1332 | memset(&cp, 0, sizeof(cp)); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1333 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1334 | len = create_instance_adv_data(hdev, instance, cp.data); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1335 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1336 | /* There's nothing to do if the data hasn't changed */ | 
|  | 1337 | if (hdev->adv_data_len == len && | 
|  | 1338 | memcmp(cp.data, hdev->adv_data, len) == 0) | 
|  | 1339 | return; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1340 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1341 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | 
|  | 1342 | hdev->adv_data_len = len; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1343 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1344 | cp.length = len; | 
|  | 1345 | cp.handle = 0; | 
|  | 1346 | cp.operation = LE_SET_ADV_DATA_OP_COMPLETE; | 
|  | 1347 | cp.frag_pref = LE_SET_ADV_DATA_NO_FRAG; | 
|  | 1348 |  | 
|  | 1349 | hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_DATA, sizeof(cp), &cp); | 
|  | 1350 | } else { | 
|  | 1351 | struct hci_cp_le_set_adv_data cp; | 
|  | 1352 |  | 
|  | 1353 | memset(&cp, 0, sizeof(cp)); | 
|  | 1354 |  | 
|  | 1355 | len = create_instance_adv_data(hdev, instance, cp.data); | 
|  | 1356 |  | 
|  | 1357 | /* There's nothing to do if the data hasn't changed */ | 
|  | 1358 | if (hdev->adv_data_len == len && | 
|  | 1359 | memcmp(cp.data, hdev->adv_data, len) == 0) | 
|  | 1360 | return; | 
|  | 1361 |  | 
|  | 1362 | memcpy(hdev->adv_data, cp.data, sizeof(cp.data)); | 
|  | 1363 | hdev->adv_data_len = len; | 
|  | 1364 |  | 
|  | 1365 | cp.length = len; | 
|  | 1366 |  | 
|  | 1367 | hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp); | 
|  | 1368 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1369 | } | 
|  | 1370 |  | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1371 | int hci_req_update_adv_data(struct hci_dev *hdev, u8 instance) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1372 | { | 
|  | 1373 | struct hci_request req; | 
|  | 1374 |  | 
|  | 1375 | hci_req_init(&req, hdev); | 
|  | 1376 | __hci_req_update_adv_data(&req, instance); | 
|  | 1377 |  | 
|  | 1378 | return hci_req_run(&req, NULL); | 
|  | 1379 | } | 
|  | 1380 |  | 
|  | 1381 | static void adv_enable_complete(struct hci_dev *hdev, u8 status, u16 opcode) | 
|  | 1382 | { | 
|  | 1383 | BT_DBG("%s status %u", hdev->name, status); | 
|  | 1384 | } | 
|  | 1385 |  | 
|  | 1386 | void hci_req_reenable_advertising(struct hci_dev *hdev) | 
|  | 1387 | { | 
|  | 1388 | struct hci_request req; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1389 |  | 
|  | 1390 | if (!hci_dev_test_flag(hdev, HCI_ADVERTISING) && | 
| Johan Hedberg | 17fd08f | 2015-11-26 12:15:59 +0200 | [diff] [blame] | 1391 | list_empty(&hdev->adv_instances)) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1392 | return; | 
|  | 1393 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1394 | hci_req_init(&req, hdev); | 
|  | 1395 |  | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1396 | if (hdev->cur_adv_instance) { | 
|  | 1397 | __hci_req_schedule_adv_instance(&req, hdev->cur_adv_instance, | 
|  | 1398 | true); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1399 | } else { | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1400 | if (ext_adv_capable(hdev)) { | 
|  | 1401 | __hci_req_start_ext_adv(&req, 0x00); | 
|  | 1402 | } else { | 
|  | 1403 | __hci_req_update_adv_data(&req, 0x00); | 
|  | 1404 | __hci_req_update_scan_rsp_data(&req, 0x00); | 
|  | 1405 | __hci_req_enable_advertising(&req); | 
|  | 1406 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1407 | } | 
|  | 1408 |  | 
|  | 1409 | hci_req_run(&req, adv_enable_complete); | 
|  | 1410 | } | 
|  | 1411 |  | 
|  | 1412 | static void adv_timeout_expire(struct work_struct *work) | 
|  | 1413 | { | 
|  | 1414 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 1415 | adv_instance_expire.work); | 
|  | 1416 |  | 
|  | 1417 | struct hci_request req; | 
|  | 1418 | u8 instance; | 
|  | 1419 |  | 
|  | 1420 | BT_DBG("%s", hdev->name); | 
|  | 1421 |  | 
|  | 1422 | hci_dev_lock(hdev); | 
|  | 1423 |  | 
|  | 1424 | hdev->adv_instance_timeout = 0; | 
|  | 1425 |  | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1426 | instance = hdev->cur_adv_instance; | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1427 | if (instance == 0x00) | 
|  | 1428 | goto unlock; | 
|  | 1429 |  | 
|  | 1430 | hci_req_init(&req, hdev); | 
|  | 1431 |  | 
| Johan Hedberg | 37d3a1f | 2016-08-28 20:53:34 +0300 | [diff] [blame] | 1432 | hci_req_clear_adv_instance(hdev, NULL, &req, instance, false); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1433 |  | 
|  | 1434 | if (list_empty(&hdev->adv_instances)) | 
|  | 1435 | __hci_req_disable_advertising(&req); | 
|  | 1436 |  | 
| Johan Hedberg | 550a8ca | 2015-11-27 11:11:52 +0200 | [diff] [blame] | 1437 | hci_req_run(&req, NULL); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1438 |  | 
|  | 1439 | unlock: | 
|  | 1440 | hci_dev_unlock(hdev); | 
|  | 1441 | } | 
|  | 1442 |  | 
| Jaganath Kanakkassery | 45b7749 | 2018-07-19 17:09:43 +0530 | [diff] [blame^] | 1443 | void __hci_req_clear_ext_adv_sets(struct hci_request *req) | 
|  | 1444 | { | 
|  | 1445 | hci_req_add(req, HCI_OP_LE_CLEAR_ADV_SETS, 0, NULL); | 
|  | 1446 | } | 
|  | 1447 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1448 | int __hci_req_setup_ext_adv_instance(struct hci_request *req, u8 instance) | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1449 | { | 
|  | 1450 | struct hci_cp_le_set_ext_adv_params cp; | 
|  | 1451 | struct hci_dev *hdev = req->hdev; | 
|  | 1452 | bool connectable; | 
|  | 1453 | u32 flags; | 
|  | 1454 | /* In ext adv set param interval is 3 octets */ | 
|  | 1455 | const u8 adv_interval[3] = { 0x00, 0x08, 0x00 }; | 
|  | 1456 |  | 
|  | 1457 | flags = get_adv_instance_flags(hdev, instance); | 
|  | 1458 |  | 
|  | 1459 | /* If the "connectable" instance flag was not set, then choose between | 
|  | 1460 | * ADV_IND and ADV_NONCONN_IND based on the global connectable setting. | 
|  | 1461 | */ | 
|  | 1462 | connectable = (flags & MGMT_ADV_FLAG_CONNECTABLE) || | 
|  | 1463 | mgmt_get_connectable(hdev); | 
|  | 1464 |  | 
|  | 1465 | if (!is_advertising_allowed(hdev, connectable)) | 
|  | 1466 | return -EPERM; | 
|  | 1467 |  | 
|  | 1468 | memset(&cp, 0, sizeof(cp)); | 
|  | 1469 |  | 
|  | 1470 | memcpy(cp.min_interval, adv_interval, sizeof(cp.min_interval)); | 
|  | 1471 | memcpy(cp.max_interval, adv_interval, sizeof(cp.max_interval)); | 
|  | 1472 |  | 
|  | 1473 | if (connectable) | 
|  | 1474 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_IND); | 
|  | 1475 | else if (get_adv_instance_scan_rsp_len(hdev, instance)) | 
|  | 1476 | cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_SCAN_IND); | 
|  | 1477 | else | 
|  | 1478 | cp.evt_properties = cpu_to_le16(LE_LEGACY_NONCONN_IND); | 
|  | 1479 |  | 
|  | 1480 | cp.own_addr_type = BDADDR_LE_PUBLIC; | 
|  | 1481 | cp.channel_map = hdev->le_adv_channel_map; | 
|  | 1482 | cp.tx_power = 127; | 
|  | 1483 | cp.primary_phy = HCI_ADV_PHY_1M; | 
|  | 1484 | cp.secondary_phy = HCI_ADV_PHY_1M; | 
|  | 1485 | cp.handle = 0; | 
|  | 1486 |  | 
|  | 1487 | hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_PARAMS, sizeof(cp), &cp); | 
|  | 1488 |  | 
|  | 1489 | return 0; | 
|  | 1490 | } | 
|  | 1491 |  | 
|  | 1492 | void __hci_req_enable_ext_advertising(struct hci_request *req) | 
|  | 1493 | { | 
|  | 1494 | struct hci_cp_le_set_ext_adv_enable *cp; | 
|  | 1495 | struct hci_cp_ext_adv_set *adv_set; | 
|  | 1496 | u8 data[sizeof(*cp) + sizeof(*adv_set) * 1]; | 
|  | 1497 |  | 
|  | 1498 | cp = (void *) data; | 
|  | 1499 | adv_set = (void *) cp->data; | 
|  | 1500 |  | 
|  | 1501 | memset(cp, 0, sizeof(*cp)); | 
|  | 1502 |  | 
|  | 1503 | cp->enable = 0x01; | 
|  | 1504 | cp->num_of_sets = 0x01; | 
|  | 1505 |  | 
|  | 1506 | memset(adv_set, 0, sizeof(*adv_set)); | 
|  | 1507 |  | 
|  | 1508 | adv_set->handle = 0; | 
|  | 1509 |  | 
|  | 1510 | hci_req_add(req, HCI_OP_LE_SET_EXT_ADV_ENABLE, | 
|  | 1511 | sizeof(*cp) + sizeof(*adv_set) * cp->num_of_sets, | 
|  | 1512 | data); | 
|  | 1513 | } | 
|  | 1514 |  | 
|  | 1515 | int __hci_req_start_ext_adv(struct hci_request *req, u8 instance) | 
|  | 1516 | { | 
| Jaganath Kanakkassery | 45b7749 | 2018-07-19 17:09:43 +0530 | [diff] [blame^] | 1517 | struct hci_dev *hdev = req->hdev; | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1518 | int err; | 
|  | 1519 |  | 
| Jaganath Kanakkassery | 45b7749 | 2018-07-19 17:09:43 +0530 | [diff] [blame^] | 1520 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) | 
|  | 1521 | __hci_req_disable_advertising(req); | 
|  | 1522 |  | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1523 | err = __hci_req_setup_ext_adv_instance(req, instance); | 
|  | 1524 | if (err < 0) | 
|  | 1525 | return err; | 
|  | 1526 |  | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 1527 | __hci_req_update_scan_rsp_data(req, instance); | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1528 | __hci_req_enable_ext_advertising(req); | 
|  | 1529 |  | 
|  | 1530 | return 0; | 
|  | 1531 | } | 
|  | 1532 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1533 | int __hci_req_schedule_adv_instance(struct hci_request *req, u8 instance, | 
|  | 1534 | bool force) | 
|  | 1535 | { | 
|  | 1536 | struct hci_dev *hdev = req->hdev; | 
|  | 1537 | struct adv_info *adv_instance = NULL; | 
|  | 1538 | u16 timeout; | 
|  | 1539 |  | 
|  | 1540 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | 
| Johan Hedberg | 17fd08f | 2015-11-26 12:15:59 +0200 | [diff] [blame] | 1541 | list_empty(&hdev->adv_instances)) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1542 | return -EPERM; | 
|  | 1543 |  | 
|  | 1544 | if (hdev->adv_instance_timeout) | 
|  | 1545 | return -EBUSY; | 
|  | 1546 |  | 
|  | 1547 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 1548 | if (!adv_instance) | 
|  | 1549 | return -ENOENT; | 
|  | 1550 |  | 
|  | 1551 | /* A zero timeout means unlimited advertising. As long as there is | 
|  | 1552 | * only one instance, duration should be ignored. We still set a timeout | 
|  | 1553 | * in case further instances are being added later on. | 
|  | 1554 | * | 
|  | 1555 | * If the remaining lifetime of the instance is more than the duration | 
|  | 1556 | * then the timeout corresponds to the duration, otherwise it will be | 
|  | 1557 | * reduced to the remaining instance lifetime. | 
|  | 1558 | */ | 
|  | 1559 | if (adv_instance->timeout == 0 || | 
|  | 1560 | adv_instance->duration <= adv_instance->remaining_time) | 
|  | 1561 | timeout = adv_instance->duration; | 
|  | 1562 | else | 
|  | 1563 | timeout = adv_instance->remaining_time; | 
|  | 1564 |  | 
|  | 1565 | /* The remaining time is being reduced unless the instance is being | 
|  | 1566 | * advertised without time limit. | 
|  | 1567 | */ | 
|  | 1568 | if (adv_instance->timeout) | 
|  | 1569 | adv_instance->remaining_time = | 
|  | 1570 | adv_instance->remaining_time - timeout; | 
|  | 1571 |  | 
|  | 1572 | hdev->adv_instance_timeout = timeout; | 
|  | 1573 | queue_delayed_work(hdev->req_workqueue, | 
|  | 1574 | &hdev->adv_instance_expire, | 
|  | 1575 | msecs_to_jiffies(timeout * 1000)); | 
|  | 1576 |  | 
|  | 1577 | /* If we're just re-scheduling the same instance again then do not | 
|  | 1578 | * execute any HCI commands. This happens when a single instance is | 
|  | 1579 | * being advertised. | 
|  | 1580 | */ | 
|  | 1581 | if (!force && hdev->cur_adv_instance == instance && | 
|  | 1582 | hci_dev_test_flag(hdev, HCI_LE_ADV)) | 
|  | 1583 | return 0; | 
|  | 1584 |  | 
|  | 1585 | hdev->cur_adv_instance = instance; | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1586 | if (ext_adv_capable(hdev)) { | 
|  | 1587 | __hci_req_start_ext_adv(req, instance); | 
|  | 1588 | } else { | 
|  | 1589 | __hci_req_update_adv_data(req, instance); | 
|  | 1590 | __hci_req_update_scan_rsp_data(req, instance); | 
|  | 1591 | __hci_req_enable_advertising(req); | 
|  | 1592 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1593 |  | 
|  | 1594 | return 0; | 
|  | 1595 | } | 
|  | 1596 |  | 
|  | 1597 | static void cancel_adv_timeout(struct hci_dev *hdev) | 
|  | 1598 | { | 
|  | 1599 | if (hdev->adv_instance_timeout) { | 
|  | 1600 | hdev->adv_instance_timeout = 0; | 
|  | 1601 | cancel_delayed_work(&hdev->adv_instance_expire); | 
|  | 1602 | } | 
|  | 1603 | } | 
|  | 1604 |  | 
|  | 1605 | /* For a single instance: | 
|  | 1606 | * - force == true: The instance will be removed even when its remaining | 
|  | 1607 | *   lifetime is not zero. | 
|  | 1608 | * - force == false: the instance will be deactivated but kept stored unless | 
|  | 1609 | *   the remaining lifetime is zero. | 
|  | 1610 | * | 
|  | 1611 | * For instance == 0x00: | 
|  | 1612 | * - force == true: All instances will be removed regardless of their timeout | 
|  | 1613 | *   setting. | 
|  | 1614 | * - force == false: Only instances that have a timeout will be removed. | 
|  | 1615 | */ | 
| Johan Hedberg | 37d3a1f | 2016-08-28 20:53:34 +0300 | [diff] [blame] | 1616 | void hci_req_clear_adv_instance(struct hci_dev *hdev, struct sock *sk, | 
|  | 1617 | struct hci_request *req, u8 instance, | 
|  | 1618 | bool force) | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1619 | { | 
|  | 1620 | struct adv_info *adv_instance, *n, *next_instance = NULL; | 
|  | 1621 | int err; | 
|  | 1622 | u8 rem_inst; | 
|  | 1623 |  | 
|  | 1624 | /* Cancel any timeout concerning the removed instance(s). */ | 
|  | 1625 | if (!instance || hdev->cur_adv_instance == instance) | 
|  | 1626 | cancel_adv_timeout(hdev); | 
|  | 1627 |  | 
|  | 1628 | /* Get the next instance to advertise BEFORE we remove | 
|  | 1629 | * the current one. This can be the same instance again | 
|  | 1630 | * if there is only one instance. | 
|  | 1631 | */ | 
|  | 1632 | if (instance && hdev->cur_adv_instance == instance) | 
|  | 1633 | next_instance = hci_get_next_instance(hdev, instance); | 
|  | 1634 |  | 
|  | 1635 | if (instance == 0x00) { | 
|  | 1636 | list_for_each_entry_safe(adv_instance, n, &hdev->adv_instances, | 
|  | 1637 | list) { | 
|  | 1638 | if (!(force || adv_instance->timeout)) | 
|  | 1639 | continue; | 
|  | 1640 |  | 
|  | 1641 | rem_inst = adv_instance->instance; | 
|  | 1642 | err = hci_remove_adv_instance(hdev, rem_inst); | 
|  | 1643 | if (!err) | 
| Johan Hedberg | 37d3a1f | 2016-08-28 20:53:34 +0300 | [diff] [blame] | 1644 | mgmt_advertising_removed(sk, hdev, rem_inst); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1645 | } | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1646 | } else { | 
|  | 1647 | adv_instance = hci_find_adv_instance(hdev, instance); | 
|  | 1648 |  | 
|  | 1649 | if (force || (adv_instance && adv_instance->timeout && | 
|  | 1650 | !adv_instance->remaining_time)) { | 
|  | 1651 | /* Don't advertise a removed instance. */ | 
|  | 1652 | if (next_instance && | 
|  | 1653 | next_instance->instance == instance) | 
|  | 1654 | next_instance = NULL; | 
|  | 1655 |  | 
|  | 1656 | err = hci_remove_adv_instance(hdev, instance); | 
|  | 1657 | if (!err) | 
| Johan Hedberg | 37d3a1f | 2016-08-28 20:53:34 +0300 | [diff] [blame] | 1658 | mgmt_advertising_removed(sk, hdev, instance); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1659 | } | 
|  | 1660 | } | 
|  | 1661 |  | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 1662 | if (!req || !hdev_is_powered(hdev) || | 
|  | 1663 | hci_dev_test_flag(hdev, HCI_ADVERTISING)) | 
|  | 1664 | return; | 
|  | 1665 |  | 
|  | 1666 | if (next_instance) | 
|  | 1667 | __hci_req_schedule_adv_instance(req, next_instance->instance, | 
|  | 1668 | false); | 
|  | 1669 | } | 
|  | 1670 |  | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1671 | static void set_random_addr(struct hci_request *req, bdaddr_t *rpa) | 
|  | 1672 | { | 
|  | 1673 | struct hci_dev *hdev = req->hdev; | 
|  | 1674 |  | 
|  | 1675 | /* If we're advertising or initiating an LE connection we can't | 
|  | 1676 | * go ahead and change the random address at this time. This is | 
|  | 1677 | * because the eventual initiator address used for the | 
|  | 1678 | * subsequently created connection will be undefined (some | 
|  | 1679 | * controllers use the new address and others the one we had | 
|  | 1680 | * when the operation started). | 
|  | 1681 | * | 
|  | 1682 | * In this kind of scenario skip the update and let the random | 
|  | 1683 | * address be updated at the next cycle. | 
|  | 1684 | */ | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1685 | if (hci_dev_test_flag(hdev, HCI_LE_ADV) || | 
| Jakub Pawlowski | e7d9ab7 | 2015-08-07 20:22:52 +0200 | [diff] [blame] | 1686 | hci_lookup_le_connect(hdev)) { | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1687 | BT_DBG("Deferring random address update"); | 
| Marcel Holtmann | a1536da | 2015-03-13 02:11:01 -0700 | [diff] [blame] | 1688 | hci_dev_set_flag(hdev, HCI_RPA_EXPIRED); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1689 | return; | 
|  | 1690 | } | 
|  | 1691 |  | 
|  | 1692 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, rpa); | 
|  | 1693 | } | 
|  | 1694 |  | 
|  | 1695 | int hci_update_random_address(struct hci_request *req, bool require_privacy, | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1696 | bool use_rpa, u8 *own_addr_type) | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1697 | { | 
|  | 1698 | struct hci_dev *hdev = req->hdev; | 
|  | 1699 | int err; | 
|  | 1700 |  | 
|  | 1701 | /* If privacy is enabled use a resolvable private address. If | 
|  | 1702 | * current RPA has expired or there is something else than | 
|  | 1703 | * the current RPA in use, then generate a new one. | 
|  | 1704 | */ | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1705 | if (use_rpa) { | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1706 | int to; | 
|  | 1707 |  | 
|  | 1708 | *own_addr_type = ADDR_LE_DEV_RANDOM; | 
|  | 1709 |  | 
| Marcel Holtmann | a69d892 | 2015-03-13 02:11:05 -0700 | [diff] [blame] | 1710 | if (!hci_dev_test_and_clear_flag(hdev, HCI_RPA_EXPIRED) && | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1711 | !bacmp(&hdev->random_addr, &hdev->rpa)) | 
|  | 1712 | return 0; | 
|  | 1713 |  | 
|  | 1714 | err = smp_generate_rpa(hdev, hdev->irk, &hdev->rpa); | 
|  | 1715 | if (err < 0) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 1716 | bt_dev_err(hdev, "failed to generate new RPA"); | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1717 | return err; | 
|  | 1718 | } | 
|  | 1719 |  | 
|  | 1720 | set_random_addr(req, &hdev->rpa); | 
|  | 1721 |  | 
|  | 1722 | to = msecs_to_jiffies(hdev->rpa_timeout * 1000); | 
|  | 1723 | queue_delayed_work(hdev->workqueue, &hdev->rpa_expired, to); | 
|  | 1724 |  | 
|  | 1725 | return 0; | 
|  | 1726 | } | 
|  | 1727 |  | 
|  | 1728 | /* In case of required privacy without resolvable private address, | 
|  | 1729 | * use an non-resolvable private address. This is useful for active | 
|  | 1730 | * scanning and non-connectable advertising. | 
|  | 1731 | */ | 
|  | 1732 | if (require_privacy) { | 
|  | 1733 | bdaddr_t nrpa; | 
|  | 1734 |  | 
|  | 1735 | while (true) { | 
|  | 1736 | /* The non-resolvable private address is generated | 
|  | 1737 | * from random six bytes with the two most significant | 
|  | 1738 | * bits cleared. | 
|  | 1739 | */ | 
|  | 1740 | get_random_bytes(&nrpa, 6); | 
|  | 1741 | nrpa.b[5] &= 0x3f; | 
|  | 1742 |  | 
|  | 1743 | /* The non-resolvable private address shall not be | 
|  | 1744 | * equal to the public address. | 
|  | 1745 | */ | 
|  | 1746 | if (bacmp(&hdev->bdaddr, &nrpa)) | 
|  | 1747 | break; | 
|  | 1748 | } | 
|  | 1749 |  | 
|  | 1750 | *own_addr_type = ADDR_LE_DEV_RANDOM; | 
|  | 1751 | set_random_addr(req, &nrpa); | 
|  | 1752 | return 0; | 
|  | 1753 | } | 
|  | 1754 |  | 
|  | 1755 | /* If forcing static address is in use or there is no public | 
|  | 1756 | * address use the static address as random address (but skip | 
|  | 1757 | * the HCI command if the current random address is already the | 
|  | 1758 | * static one. | 
| Marcel Holtmann | 50b5b95 | 2014-12-19 23:05:35 +0100 | [diff] [blame] | 1759 | * | 
|  | 1760 | * In case BR/EDR has been disabled on a dual-mode controller | 
|  | 1761 | * and a static address has been configured, then use that | 
|  | 1762 | * address instead of the public BR/EDR address. | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1763 | */ | 
| Marcel Holtmann | b7cb93e | 2015-03-13 10:20:35 -0700 | [diff] [blame] | 1764 | if (hci_dev_test_flag(hdev, HCI_FORCE_STATIC_ADDR) || | 
| Marcel Holtmann | 50b5b95 | 2014-12-19 23:05:35 +0100 | [diff] [blame] | 1765 | !bacmp(&hdev->bdaddr, BDADDR_ANY) || | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1766 | (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED) && | 
| Marcel Holtmann | 50b5b95 | 2014-12-19 23:05:35 +0100 | [diff] [blame] | 1767 | bacmp(&hdev->static_addr, BDADDR_ANY))) { | 
| Johan Hedberg | 0857dd3 | 2014-12-19 13:40:20 +0200 | [diff] [blame] | 1768 | *own_addr_type = ADDR_LE_DEV_RANDOM; | 
|  | 1769 | if (bacmp(&hdev->static_addr, &hdev->random_addr)) | 
|  | 1770 | hci_req_add(req, HCI_OP_LE_SET_RANDOM_ADDR, 6, | 
|  | 1771 | &hdev->static_addr); | 
|  | 1772 | return 0; | 
|  | 1773 | } | 
|  | 1774 |  | 
|  | 1775 | /* Neither privacy nor static address is being used so use a | 
|  | 1776 | * public address. | 
|  | 1777 | */ | 
|  | 1778 | *own_addr_type = ADDR_LE_DEV_PUBLIC; | 
|  | 1779 |  | 
|  | 1780 | return 0; | 
|  | 1781 | } | 
| Johan Hedberg | 2cf2221 | 2014-12-19 22:26:00 +0200 | [diff] [blame] | 1782 |  | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1783 | static bool disconnected_whitelist_entries(struct hci_dev *hdev) | 
|  | 1784 | { | 
|  | 1785 | struct bdaddr_list *b; | 
|  | 1786 |  | 
|  | 1787 | list_for_each_entry(b, &hdev->whitelist, list) { | 
|  | 1788 | struct hci_conn *conn; | 
|  | 1789 |  | 
|  | 1790 | conn = hci_conn_hash_lookup_ba(hdev, ACL_LINK, &b->bdaddr); | 
|  | 1791 | if (!conn) | 
|  | 1792 | return true; | 
|  | 1793 |  | 
|  | 1794 | if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG) | 
|  | 1795 | return true; | 
|  | 1796 | } | 
|  | 1797 |  | 
|  | 1798 | return false; | 
|  | 1799 | } | 
|  | 1800 |  | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 1801 | void __hci_req_update_scan(struct hci_request *req) | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1802 | { | 
|  | 1803 | struct hci_dev *hdev = req->hdev; | 
|  | 1804 | u8 scan; | 
|  | 1805 |  | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1806 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1807 | return; | 
|  | 1808 |  | 
|  | 1809 | if (!hdev_is_powered(hdev)) | 
|  | 1810 | return; | 
|  | 1811 |  | 
|  | 1812 | if (mgmt_powering_down(hdev)) | 
|  | 1813 | return; | 
|  | 1814 |  | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1815 | if (hci_dev_test_flag(hdev, HCI_CONNECTABLE) || | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1816 | disconnected_whitelist_entries(hdev)) | 
|  | 1817 | scan = SCAN_PAGE; | 
|  | 1818 | else | 
|  | 1819 | scan = SCAN_DISABLED; | 
|  | 1820 |  | 
| Marcel Holtmann | d7a5a11 | 2015-03-13 02:11:00 -0700 | [diff] [blame] | 1821 | if (hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1822 | scan |= SCAN_INQUIRY; | 
|  | 1823 |  | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 1824 | if (test_bit(HCI_PSCAN, &hdev->flags) == !!(scan & SCAN_PAGE) && | 
|  | 1825 | test_bit(HCI_ISCAN, &hdev->flags) == !!(scan & SCAN_INQUIRY)) | 
|  | 1826 | return; | 
|  | 1827 |  | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1828 | hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan); | 
|  | 1829 | } | 
|  | 1830 |  | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 1831 | static int update_scan(struct hci_request *req, unsigned long opt) | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1832 | { | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 1833 | hci_dev_lock(req->hdev); | 
|  | 1834 | __hci_req_update_scan(req); | 
|  | 1835 | hci_dev_unlock(req->hdev); | 
|  | 1836 | return 0; | 
|  | 1837 | } | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1838 |  | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 1839 | static void scan_update_work(struct work_struct *work) | 
|  | 1840 | { | 
|  | 1841 | struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); | 
|  | 1842 |  | 
|  | 1843 | hci_req_sync(hdev, update_scan, 0, HCI_CMD_TIMEOUT, NULL); | 
| Johan Hedberg | 405a261 | 2014-12-19 23:18:22 +0200 | [diff] [blame] | 1844 | } | 
|  | 1845 |  | 
| Johan Hedberg | 53c0ba7 | 2015-11-22 16:43:43 +0300 | [diff] [blame] | 1846 | static int connectable_update(struct hci_request *req, unsigned long opt) | 
|  | 1847 | { | 
|  | 1848 | struct hci_dev *hdev = req->hdev; | 
|  | 1849 |  | 
|  | 1850 | hci_dev_lock(hdev); | 
|  | 1851 |  | 
|  | 1852 | __hci_req_update_scan(req); | 
|  | 1853 |  | 
|  | 1854 | /* If BR/EDR is not enabled and we disable advertising as a | 
|  | 1855 | * by-product of disabling connectable, we need to update the | 
|  | 1856 | * advertising flags. | 
|  | 1857 | */ | 
|  | 1858 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1859 | __hci_req_update_adv_data(req, hdev->cur_adv_instance); | 
| Johan Hedberg | 53c0ba7 | 2015-11-22 16:43:43 +0300 | [diff] [blame] | 1860 |  | 
|  | 1861 | /* Update the advertising parameters if necessary */ | 
|  | 1862 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1863 | !list_empty(&hdev->adv_instances)) { | 
|  | 1864 | if (ext_adv_capable(hdev)) | 
|  | 1865 | __hci_req_start_ext_adv(req, hdev->cur_adv_instance); | 
|  | 1866 | else | 
|  | 1867 | __hci_req_enable_advertising(req); | 
|  | 1868 | } | 
| Johan Hedberg | 53c0ba7 | 2015-11-22 16:43:43 +0300 | [diff] [blame] | 1869 |  | 
|  | 1870 | __hci_update_background_scan(req); | 
|  | 1871 |  | 
|  | 1872 | hci_dev_unlock(hdev); | 
|  | 1873 |  | 
|  | 1874 | return 0; | 
|  | 1875 | } | 
|  | 1876 |  | 
|  | 1877 | static void connectable_update_work(struct work_struct *work) | 
|  | 1878 | { | 
|  | 1879 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 1880 | connectable_update); | 
|  | 1881 | u8 status; | 
|  | 1882 |  | 
|  | 1883 | hci_req_sync(hdev, connectable_update, 0, HCI_CMD_TIMEOUT, &status); | 
|  | 1884 | mgmt_set_connectable_complete(hdev, status); | 
|  | 1885 | } | 
|  | 1886 |  | 
| Johan Hedberg | 14bf5ea | 2015-11-22 19:00:22 +0200 | [diff] [blame] | 1887 | static u8 get_service_classes(struct hci_dev *hdev) | 
|  | 1888 | { | 
|  | 1889 | struct bt_uuid *uuid; | 
|  | 1890 | u8 val = 0; | 
|  | 1891 |  | 
|  | 1892 | list_for_each_entry(uuid, &hdev->uuids, list) | 
|  | 1893 | val |= uuid->svc_hint; | 
|  | 1894 |  | 
|  | 1895 | return val; | 
|  | 1896 | } | 
|  | 1897 |  | 
|  | 1898 | void __hci_req_update_class(struct hci_request *req) | 
|  | 1899 | { | 
|  | 1900 | struct hci_dev *hdev = req->hdev; | 
|  | 1901 | u8 cod[3]; | 
|  | 1902 |  | 
|  | 1903 | BT_DBG("%s", hdev->name); | 
|  | 1904 |  | 
|  | 1905 | if (!hdev_is_powered(hdev)) | 
|  | 1906 | return; | 
|  | 1907 |  | 
|  | 1908 | if (!hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) | 
|  | 1909 | return; | 
|  | 1910 |  | 
|  | 1911 | if (hci_dev_test_flag(hdev, HCI_SERVICE_CACHE)) | 
|  | 1912 | return; | 
|  | 1913 |  | 
|  | 1914 | cod[0] = hdev->minor_class; | 
|  | 1915 | cod[1] = hdev->major_class; | 
|  | 1916 | cod[2] = get_service_classes(hdev); | 
|  | 1917 |  | 
|  | 1918 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) | 
|  | 1919 | cod[1] |= 0x20; | 
|  | 1920 |  | 
|  | 1921 | if (memcmp(cod, hdev->dev_class, 3) == 0) | 
|  | 1922 | return; | 
|  | 1923 |  | 
|  | 1924 | hci_req_add(req, HCI_OP_WRITE_CLASS_OF_DEV, sizeof(cod), cod); | 
|  | 1925 | } | 
|  | 1926 |  | 
| Johan Hedberg | aed1a88 | 2015-11-22 17:24:44 +0300 | [diff] [blame] | 1927 | static void write_iac(struct hci_request *req) | 
|  | 1928 | { | 
|  | 1929 | struct hci_dev *hdev = req->hdev; | 
|  | 1930 | struct hci_cp_write_current_iac_lap cp; | 
|  | 1931 |  | 
|  | 1932 | if (!hci_dev_test_flag(hdev, HCI_DISCOVERABLE)) | 
|  | 1933 | return; | 
|  | 1934 |  | 
|  | 1935 | if (hci_dev_test_flag(hdev, HCI_LIMITED_DISCOVERABLE)) { | 
|  | 1936 | /* Limited discoverable mode */ | 
|  | 1937 | cp.num_iac = min_t(u8, hdev->num_iac, 2); | 
|  | 1938 | cp.iac_lap[0] = 0x00;	/* LIAC */ | 
|  | 1939 | cp.iac_lap[1] = 0x8b; | 
|  | 1940 | cp.iac_lap[2] = 0x9e; | 
|  | 1941 | cp.iac_lap[3] = 0x33;	/* GIAC */ | 
|  | 1942 | cp.iac_lap[4] = 0x8b; | 
|  | 1943 | cp.iac_lap[5] = 0x9e; | 
|  | 1944 | } else { | 
|  | 1945 | /* General discoverable mode */ | 
|  | 1946 | cp.num_iac = 1; | 
|  | 1947 | cp.iac_lap[0] = 0x33;	/* GIAC */ | 
|  | 1948 | cp.iac_lap[1] = 0x8b; | 
|  | 1949 | cp.iac_lap[2] = 0x9e; | 
|  | 1950 | } | 
|  | 1951 |  | 
|  | 1952 | hci_req_add(req, HCI_OP_WRITE_CURRENT_IAC_LAP, | 
|  | 1953 | (cp.num_iac * 3) + 1, &cp); | 
|  | 1954 | } | 
|  | 1955 |  | 
|  | 1956 | static int discoverable_update(struct hci_request *req, unsigned long opt) | 
|  | 1957 | { | 
|  | 1958 | struct hci_dev *hdev = req->hdev; | 
|  | 1959 |  | 
|  | 1960 | hci_dev_lock(hdev); | 
|  | 1961 |  | 
|  | 1962 | if (hci_dev_test_flag(hdev, HCI_BREDR_ENABLED)) { | 
|  | 1963 | write_iac(req); | 
|  | 1964 | __hci_req_update_scan(req); | 
|  | 1965 | __hci_req_update_class(req); | 
|  | 1966 | } | 
|  | 1967 |  | 
|  | 1968 | /* Advertising instances don't use the global discoverable setting, so | 
|  | 1969 | * only update AD if advertising was enabled using Set Advertising. | 
|  | 1970 | */ | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1971 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { | 
| Johan Hedberg | cab054a | 2015-11-30 11:21:45 +0200 | [diff] [blame] | 1972 | __hci_req_update_adv_data(req, 0x00); | 
| Johan Hedberg | aed1a88 | 2015-11-22 17:24:44 +0300 | [diff] [blame] | 1973 |  | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1974 | /* Discoverable mode affects the local advertising | 
|  | 1975 | * address in limited privacy mode. | 
|  | 1976 | */ | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 1977 | if (hci_dev_test_flag(hdev, HCI_LIMITED_PRIVACY)) { | 
|  | 1978 | if (ext_adv_capable(hdev)) | 
|  | 1979 | __hci_req_start_ext_adv(req, 0x00); | 
|  | 1980 | else | 
|  | 1981 | __hci_req_enable_advertising(req); | 
|  | 1982 | } | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 1983 | } | 
|  | 1984 |  | 
| Johan Hedberg | aed1a88 | 2015-11-22 17:24:44 +0300 | [diff] [blame] | 1985 | hci_dev_unlock(hdev); | 
|  | 1986 |  | 
|  | 1987 | return 0; | 
|  | 1988 | } | 
|  | 1989 |  | 
|  | 1990 | static void discoverable_update_work(struct work_struct *work) | 
|  | 1991 | { | 
|  | 1992 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 1993 | discoverable_update); | 
|  | 1994 | u8 status; | 
|  | 1995 |  | 
|  | 1996 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, &status); | 
|  | 1997 | mgmt_set_discoverable_complete(hdev, status); | 
|  | 1998 | } | 
|  | 1999 |  | 
| Johan Hedberg | dcc0f0d | 2015-10-22 10:49:37 +0300 | [diff] [blame] | 2000 | void __hci_abort_conn(struct hci_request *req, struct hci_conn *conn, | 
|  | 2001 | u8 reason) | 
|  | 2002 | { | 
|  | 2003 | switch (conn->state) { | 
|  | 2004 | case BT_CONNECTED: | 
|  | 2005 | case BT_CONFIG: | 
|  | 2006 | if (conn->type == AMP_LINK) { | 
|  | 2007 | struct hci_cp_disconn_phy_link cp; | 
|  | 2008 |  | 
|  | 2009 | cp.phy_handle = HCI_PHY_HANDLE(conn->handle); | 
|  | 2010 | cp.reason = reason; | 
|  | 2011 | hci_req_add(req, HCI_OP_DISCONN_PHY_LINK, sizeof(cp), | 
|  | 2012 | &cp); | 
|  | 2013 | } else { | 
|  | 2014 | struct hci_cp_disconnect dc; | 
|  | 2015 |  | 
|  | 2016 | dc.handle = cpu_to_le16(conn->handle); | 
|  | 2017 | dc.reason = reason; | 
|  | 2018 | hci_req_add(req, HCI_OP_DISCONNECT, sizeof(dc), &dc); | 
|  | 2019 | } | 
|  | 2020 |  | 
|  | 2021 | conn->state = BT_DISCONN; | 
|  | 2022 |  | 
|  | 2023 | break; | 
|  | 2024 | case BT_CONNECT: | 
|  | 2025 | if (conn->type == LE_LINK) { | 
|  | 2026 | if (test_bit(HCI_CONN_SCANNING, &conn->flags)) | 
|  | 2027 | break; | 
|  | 2028 | hci_req_add(req, HCI_OP_LE_CREATE_CONN_CANCEL, | 
|  | 2029 | 0, NULL); | 
|  | 2030 | } else if (conn->type == ACL_LINK) { | 
|  | 2031 | if (req->hdev->hci_ver < BLUETOOTH_VER_1_2) | 
|  | 2032 | break; | 
|  | 2033 | hci_req_add(req, HCI_OP_CREATE_CONN_CANCEL, | 
|  | 2034 | 6, &conn->dst); | 
|  | 2035 | } | 
|  | 2036 | break; | 
|  | 2037 | case BT_CONNECT2: | 
|  | 2038 | if (conn->type == ACL_LINK) { | 
|  | 2039 | struct hci_cp_reject_conn_req rej; | 
|  | 2040 |  | 
|  | 2041 | bacpy(&rej.bdaddr, &conn->dst); | 
|  | 2042 | rej.reason = reason; | 
|  | 2043 |  | 
|  | 2044 | hci_req_add(req, HCI_OP_REJECT_CONN_REQ, | 
|  | 2045 | sizeof(rej), &rej); | 
|  | 2046 | } else if (conn->type == SCO_LINK || conn->type == ESCO_LINK) { | 
|  | 2047 | struct hci_cp_reject_sync_conn_req rej; | 
|  | 2048 |  | 
|  | 2049 | bacpy(&rej.bdaddr, &conn->dst); | 
|  | 2050 |  | 
|  | 2051 | /* SCO rejection has its own limited set of | 
|  | 2052 | * allowed error values (0x0D-0x0F) which isn't | 
|  | 2053 | * compatible with most values passed to this | 
|  | 2054 | * function. To be safe hard-code one of the | 
|  | 2055 | * values that's suitable for SCO. | 
|  | 2056 | */ | 
| Frédéric Dalleau | 3c0975a | 2016-09-08 12:00:11 +0200 | [diff] [blame] | 2057 | rej.reason = HCI_ERROR_REJ_LIMITED_RESOURCES; | 
| Johan Hedberg | dcc0f0d | 2015-10-22 10:49:37 +0300 | [diff] [blame] | 2058 |  | 
|  | 2059 | hci_req_add(req, HCI_OP_REJECT_SYNC_CONN_REQ, | 
|  | 2060 | sizeof(rej), &rej); | 
|  | 2061 | } | 
|  | 2062 | break; | 
|  | 2063 | default: | 
|  | 2064 | conn->state = BT_CLOSED; | 
|  | 2065 | break; | 
|  | 2066 | } | 
|  | 2067 | } | 
|  | 2068 |  | 
|  | 2069 | static void abort_conn_complete(struct hci_dev *hdev, u8 status, u16 opcode) | 
|  | 2070 | { | 
|  | 2071 | if (status) | 
|  | 2072 | BT_DBG("Failed to abort connection: status 0x%2.2x", status); | 
|  | 2073 | } | 
|  | 2074 |  | 
|  | 2075 | int hci_abort_conn(struct hci_conn *conn, u8 reason) | 
|  | 2076 | { | 
|  | 2077 | struct hci_request req; | 
|  | 2078 | int err; | 
|  | 2079 |  | 
|  | 2080 | hci_req_init(&req, conn->hdev); | 
|  | 2081 |  | 
|  | 2082 | __hci_abort_conn(&req, conn, reason); | 
|  | 2083 |  | 
|  | 2084 | err = hci_req_run(&req, abort_conn_complete); | 
|  | 2085 | if (err && err != -ENODATA) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 2086 | bt_dev_err(conn->hdev, "failed to run HCI request: err %d", err); | 
| Johan Hedberg | dcc0f0d | 2015-10-22 10:49:37 +0300 | [diff] [blame] | 2087 | return err; | 
|  | 2088 | } | 
|  | 2089 |  | 
|  | 2090 | return 0; | 
|  | 2091 | } | 
| Johan Hedberg | 5fc16cc | 2015-11-11 08:11:16 +0200 | [diff] [blame] | 2092 |  | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 2093 | static int update_bg_scan(struct hci_request *req, unsigned long opt) | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2094 | { | 
|  | 2095 | hci_dev_lock(req->hdev); | 
|  | 2096 | __hci_update_background_scan(req); | 
|  | 2097 | hci_dev_unlock(req->hdev); | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 2098 | return 0; | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2099 | } | 
|  | 2100 |  | 
|  | 2101 | static void bg_scan_update(struct work_struct *work) | 
|  | 2102 | { | 
|  | 2103 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 2104 | bg_scan_update); | 
| Johan Hedberg | 84235d2 | 2015-11-11 08:11:20 +0200 | [diff] [blame] | 2105 | struct hci_conn *conn; | 
|  | 2106 | u8 status; | 
|  | 2107 | int err; | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2108 |  | 
| Johan Hedberg | 84235d2 | 2015-11-11 08:11:20 +0200 | [diff] [blame] | 2109 | err = hci_req_sync(hdev, update_bg_scan, 0, HCI_CMD_TIMEOUT, &status); | 
|  | 2110 | if (!err) | 
|  | 2111 | return; | 
|  | 2112 |  | 
|  | 2113 | hci_dev_lock(hdev); | 
|  | 2114 |  | 
|  | 2115 | conn = hci_conn_hash_lookup_state(hdev, LE_LINK, BT_CONNECT); | 
|  | 2116 | if (conn) | 
|  | 2117 | hci_le_conn_failed(conn, status); | 
|  | 2118 |  | 
|  | 2119 | hci_dev_unlock(hdev); | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2120 | } | 
|  | 2121 |  | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 2122 | static int le_scan_disable(struct hci_request *req, unsigned long opt) | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2123 | { | 
|  | 2124 | hci_req_add_le_scan_disable(req); | 
| Johan Hedberg | a1d01db | 2015-11-11 08:11:25 +0200 | [diff] [blame] | 2125 | return 0; | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2126 | } | 
|  | 2127 |  | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2128 | static int bredr_inquiry(struct hci_request *req, unsigned long opt) | 
|  | 2129 | { | 
|  | 2130 | u8 length = opt; | 
| Johan Hedberg | 78b781c | 2016-01-05 13:19:32 +0200 | [diff] [blame] | 2131 | const u8 giac[3] = { 0x33, 0x8b, 0x9e }; | 
|  | 2132 | const u8 liac[3] = { 0x00, 0x8b, 0x9e }; | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2133 | struct hci_cp_inquiry cp; | 
|  | 2134 |  | 
|  | 2135 | BT_DBG("%s", req->hdev->name); | 
|  | 2136 |  | 
|  | 2137 | hci_dev_lock(req->hdev); | 
|  | 2138 | hci_inquiry_cache_flush(req->hdev); | 
|  | 2139 | hci_dev_unlock(req->hdev); | 
|  | 2140 |  | 
|  | 2141 | memset(&cp, 0, sizeof(cp)); | 
| Johan Hedberg | 78b781c | 2016-01-05 13:19:32 +0200 | [diff] [blame] | 2142 |  | 
|  | 2143 | if (req->hdev->discovery.limited) | 
|  | 2144 | memcpy(&cp.lap, liac, sizeof(cp.lap)); | 
|  | 2145 | else | 
|  | 2146 | memcpy(&cp.lap, giac, sizeof(cp.lap)); | 
|  | 2147 |  | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2148 | cp.length = length; | 
|  | 2149 |  | 
|  | 2150 | hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp); | 
|  | 2151 |  | 
|  | 2152 | return 0; | 
|  | 2153 | } | 
|  | 2154 |  | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2155 | static void le_scan_disable_work(struct work_struct *work) | 
|  | 2156 | { | 
|  | 2157 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 2158 | le_scan_disable.work); | 
|  | 2159 | u8 status; | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2160 |  | 
|  | 2161 | BT_DBG("%s", hdev->name); | 
|  | 2162 |  | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2163 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2164 | return; | 
|  | 2165 |  | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2166 | cancel_delayed_work(&hdev->le_scan_restart); | 
|  | 2167 |  | 
|  | 2168 | hci_req_sync(hdev, le_scan_disable, 0, HCI_CMD_TIMEOUT, &status); | 
|  | 2169 | if (status) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 2170 | bt_dev_err(hdev, "failed to disable LE scan: status 0x%02x", | 
|  | 2171 | status); | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2172 | return; | 
|  | 2173 | } | 
|  | 2174 |  | 
|  | 2175 | hdev->discovery.scan_start = 0; | 
|  | 2176 |  | 
|  | 2177 | /* If we were running LE only scan, change discovery state. If | 
|  | 2178 | * we were running both LE and BR/EDR inquiry simultaneously, | 
|  | 2179 | * and BR/EDR inquiry is already finished, stop discovery, | 
|  | 2180 | * otherwise BR/EDR inquiry will stop discovery when finished. | 
|  | 2181 | * If we will resolve remote device name, do not change | 
|  | 2182 | * discovery state. | 
|  | 2183 | */ | 
|  | 2184 |  | 
|  | 2185 | if (hdev->discovery.type == DISCOV_TYPE_LE) | 
|  | 2186 | goto discov_stopped; | 
|  | 2187 |  | 
|  | 2188 | if (hdev->discovery.type != DISCOV_TYPE_INTERLEAVED) | 
|  | 2189 | return; | 
|  | 2190 |  | 
|  | 2191 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks)) { | 
|  | 2192 | if (!test_bit(HCI_INQUIRY, &hdev->flags) && | 
|  | 2193 | hdev->discovery.state != DISCOVERY_RESOLVING) | 
|  | 2194 | goto discov_stopped; | 
|  | 2195 |  | 
|  | 2196 | return; | 
|  | 2197 | } | 
|  | 2198 |  | 
|  | 2199 | hci_req_sync(hdev, bredr_inquiry, DISCOV_INTERLEAVED_INQUIRY_LEN, | 
|  | 2200 | HCI_CMD_TIMEOUT, &status); | 
|  | 2201 | if (status) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 2202 | bt_dev_err(hdev, "inquiry failed: status 0x%02x", status); | 
| Johan Hedberg | f4a2cb4 | 2015-11-11 12:24:22 +0200 | [diff] [blame] | 2203 | goto discov_stopped; | 
|  | 2204 | } | 
|  | 2205 |  | 
|  | 2206 | return; | 
|  | 2207 |  | 
|  | 2208 | discov_stopped: | 
|  | 2209 | hci_dev_lock(hdev); | 
|  | 2210 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | 
|  | 2211 | hci_dev_unlock(hdev); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2212 | } | 
|  | 2213 |  | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2214 | static int le_scan_restart(struct hci_request *req, unsigned long opt) | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2215 | { | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2216 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2217 |  | 
|  | 2218 | /* If controller is not scanning we are done. */ | 
|  | 2219 | if (!hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 
|  | 2220 | return 0; | 
|  | 2221 |  | 
|  | 2222 | hci_req_add_le_scan_disable(req); | 
|  | 2223 |  | 
| Jaganath Kanakkassery | a2344b9 | 2018-07-06 17:05:28 +0530 | [diff] [blame] | 2224 | if (use_ext_scan(hdev)) { | 
|  | 2225 | struct hci_cp_le_set_ext_scan_enable ext_enable_cp; | 
|  | 2226 |  | 
|  | 2227 | memset(&ext_enable_cp, 0, sizeof(ext_enable_cp)); | 
|  | 2228 | ext_enable_cp.enable = LE_SCAN_ENABLE; | 
|  | 2229 | ext_enable_cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | 
|  | 2230 |  | 
|  | 2231 | hci_req_add(req, HCI_OP_LE_SET_EXT_SCAN_ENABLE, | 
|  | 2232 | sizeof(ext_enable_cp), &ext_enable_cp); | 
|  | 2233 | } else { | 
|  | 2234 | struct hci_cp_le_set_scan_enable cp; | 
|  | 2235 |  | 
|  | 2236 | memset(&cp, 0, sizeof(cp)); | 
|  | 2237 | cp.enable = LE_SCAN_ENABLE; | 
|  | 2238 | cp.filter_dup = LE_SCAN_FILTER_DUP_ENABLE; | 
|  | 2239 | hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp); | 
|  | 2240 | } | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2241 |  | 
|  | 2242 | return 0; | 
|  | 2243 | } | 
|  | 2244 |  | 
|  | 2245 | static void le_scan_restart_work(struct work_struct *work) | 
|  | 2246 | { | 
|  | 2247 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 2248 | le_scan_restart.work); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2249 | unsigned long timeout, duration, scan_start, now; | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2250 | u8 status; | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2251 |  | 
|  | 2252 | BT_DBG("%s", hdev->name); | 
|  | 2253 |  | 
| Johan Hedberg | 3dfe590 | 2015-11-11 12:24:23 +0200 | [diff] [blame] | 2254 | hci_req_sync(hdev, le_scan_restart, 0, HCI_CMD_TIMEOUT, &status); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2255 | if (status) { | 
| Marcel Holtmann | 2064ee3 | 2017-10-30 10:42:59 +0100 | [diff] [blame] | 2256 | bt_dev_err(hdev, "failed to restart LE scan: status %d", | 
|  | 2257 | status); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2258 | return; | 
|  | 2259 | } | 
|  | 2260 |  | 
|  | 2261 | hci_dev_lock(hdev); | 
|  | 2262 |  | 
|  | 2263 | if (!test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) || | 
|  | 2264 | !hdev->discovery.scan_start) | 
|  | 2265 | goto unlock; | 
|  | 2266 |  | 
|  | 2267 | /* When the scan was started, hdev->le_scan_disable has been queued | 
|  | 2268 | * after duration from scan_start. During scan restart this job | 
|  | 2269 | * has been canceled, and we need to queue it again after proper | 
|  | 2270 | * timeout, to make sure that scan does not run indefinitely. | 
|  | 2271 | */ | 
|  | 2272 | duration = hdev->discovery.scan_duration; | 
|  | 2273 | scan_start = hdev->discovery.scan_start; | 
|  | 2274 | now = jiffies; | 
|  | 2275 | if (now - scan_start <= duration) { | 
|  | 2276 | int elapsed; | 
|  | 2277 |  | 
|  | 2278 | if (now >= scan_start) | 
|  | 2279 | elapsed = now - scan_start; | 
|  | 2280 | else | 
|  | 2281 | elapsed = ULONG_MAX - scan_start + now; | 
|  | 2282 |  | 
|  | 2283 | timeout = duration - elapsed; | 
|  | 2284 | } else { | 
|  | 2285 | timeout = 0; | 
|  | 2286 | } | 
|  | 2287 |  | 
|  | 2288 | queue_delayed_work(hdev->req_workqueue, | 
|  | 2289 | &hdev->le_scan_disable, timeout); | 
|  | 2290 |  | 
|  | 2291 | unlock: | 
|  | 2292 | hci_dev_unlock(hdev); | 
|  | 2293 | } | 
|  | 2294 |  | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2295 | static int active_scan(struct hci_request *req, unsigned long opt) | 
|  | 2296 | { | 
|  | 2297 | uint16_t interval = opt; | 
|  | 2298 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2299 | u8 own_addr_type; | 
|  | 2300 | int err; | 
|  | 2301 |  | 
|  | 2302 | BT_DBG("%s", hdev->name); | 
|  | 2303 |  | 
|  | 2304 | if (hci_dev_test_flag(hdev, HCI_LE_ADV)) { | 
|  | 2305 | hci_dev_lock(hdev); | 
|  | 2306 |  | 
|  | 2307 | /* Don't let discovery abort an outgoing connection attempt | 
|  | 2308 | * that's using directed advertising. | 
|  | 2309 | */ | 
|  | 2310 | if (hci_lookup_le_connect(hdev)) { | 
|  | 2311 | hci_dev_unlock(hdev); | 
|  | 2312 | return -EBUSY; | 
|  | 2313 | } | 
|  | 2314 |  | 
|  | 2315 | cancel_adv_timeout(hdev); | 
|  | 2316 | hci_dev_unlock(hdev); | 
|  | 2317 |  | 
| Jaganath Kanakkassery | 94386b6 | 2017-12-11 20:26:47 +0530 | [diff] [blame] | 2318 | __hci_req_disable_advertising(req); | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2319 | } | 
|  | 2320 |  | 
|  | 2321 | /* If controller is scanning, it means the background scanning is | 
|  | 2322 | * running. Thus, we should temporarily stop it in order to set the | 
|  | 2323 | * discovery scanning parameters. | 
|  | 2324 | */ | 
|  | 2325 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) | 
|  | 2326 | hci_req_add_le_scan_disable(req); | 
|  | 2327 |  | 
|  | 2328 | /* All active scans will be done with either a resolvable private | 
|  | 2329 | * address (when privacy feature has been enabled) or non-resolvable | 
|  | 2330 | * private address. | 
|  | 2331 | */ | 
| Johan Hedberg | 82a37ad | 2016-03-09 17:30:34 +0200 | [diff] [blame] | 2332 | err = hci_update_random_address(req, true, scan_use_rpa(hdev), | 
|  | 2333 | &own_addr_type); | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2334 | if (err < 0) | 
|  | 2335 | own_addr_type = ADDR_LE_DEV_PUBLIC; | 
|  | 2336 |  | 
| Jaganath Kanakkassery | 3baef81 | 2018-07-06 17:05:27 +0530 | [diff] [blame] | 2337 | hci_req_start_scan(req, LE_SCAN_ACTIVE, interval, DISCOV_LE_SCAN_WIN, | 
|  | 2338 | own_addr_type, 0); | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2339 | return 0; | 
|  | 2340 | } | 
|  | 2341 |  | 
|  | 2342 | static int interleaved_discov(struct hci_request *req, unsigned long opt) | 
|  | 2343 | { | 
|  | 2344 | int err; | 
|  | 2345 |  | 
|  | 2346 | BT_DBG("%s", req->hdev->name); | 
|  | 2347 |  | 
|  | 2348 | err = active_scan(req, opt); | 
|  | 2349 | if (err) | 
|  | 2350 | return err; | 
|  | 2351 |  | 
| Johan Hedberg | 7df26b5 | 2015-11-11 12:24:21 +0200 | [diff] [blame] | 2352 | return bredr_inquiry(req, DISCOV_BREDR_INQUIRY_LEN); | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2353 | } | 
|  | 2354 |  | 
|  | 2355 | static void start_discovery(struct hci_dev *hdev, u8 *status) | 
|  | 2356 | { | 
|  | 2357 | unsigned long timeout; | 
|  | 2358 |  | 
|  | 2359 | BT_DBG("%s type %u", hdev->name, hdev->discovery.type); | 
|  | 2360 |  | 
|  | 2361 | switch (hdev->discovery.type) { | 
|  | 2362 | case DISCOV_TYPE_BREDR: | 
|  | 2363 | if (!hci_dev_test_flag(hdev, HCI_INQUIRY)) | 
| Johan Hedberg | 7df26b5 | 2015-11-11 12:24:21 +0200 | [diff] [blame] | 2364 | hci_req_sync(hdev, bredr_inquiry, | 
|  | 2365 | DISCOV_BREDR_INQUIRY_LEN, HCI_CMD_TIMEOUT, | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2366 | status); | 
|  | 2367 | return; | 
|  | 2368 | case DISCOV_TYPE_INTERLEAVED: | 
|  | 2369 | /* When running simultaneous discovery, the LE scanning time | 
|  | 2370 | * should occupy the whole discovery time sine BR/EDR inquiry | 
|  | 2371 | * and LE scanning are scheduled by the controller. | 
|  | 2372 | * | 
|  | 2373 | * For interleaving discovery in comparison, BR/EDR inquiry | 
|  | 2374 | * and LE scanning are done sequentially with separate | 
|  | 2375 | * timeouts. | 
|  | 2376 | */ | 
|  | 2377 | if (test_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, | 
|  | 2378 | &hdev->quirks)) { | 
|  | 2379 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | 
|  | 2380 | /* During simultaneous discovery, we double LE scan | 
|  | 2381 | * interval. We must leave some time for the controller | 
|  | 2382 | * to do BR/EDR inquiry. | 
|  | 2383 | */ | 
|  | 2384 | hci_req_sync(hdev, interleaved_discov, | 
|  | 2385 | DISCOV_LE_SCAN_INT * 2, HCI_CMD_TIMEOUT, | 
|  | 2386 | status); | 
|  | 2387 | break; | 
|  | 2388 | } | 
|  | 2389 |  | 
|  | 2390 | timeout = msecs_to_jiffies(hdev->discov_interleaved_timeout); | 
|  | 2391 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | 
|  | 2392 | HCI_CMD_TIMEOUT, status); | 
|  | 2393 | break; | 
|  | 2394 | case DISCOV_TYPE_LE: | 
|  | 2395 | timeout = msecs_to_jiffies(DISCOV_LE_TIMEOUT); | 
|  | 2396 | hci_req_sync(hdev, active_scan, DISCOV_LE_SCAN_INT, | 
|  | 2397 | HCI_CMD_TIMEOUT, status); | 
|  | 2398 | break; | 
|  | 2399 | default: | 
|  | 2400 | *status = HCI_ERROR_UNSPECIFIED; | 
|  | 2401 | return; | 
|  | 2402 | } | 
|  | 2403 |  | 
|  | 2404 | if (*status) | 
|  | 2405 | return; | 
|  | 2406 |  | 
|  | 2407 | BT_DBG("%s timeout %u ms", hdev->name, jiffies_to_msecs(timeout)); | 
|  | 2408 |  | 
|  | 2409 | /* When service discovery is used and the controller has a | 
|  | 2410 | * strict duplicate filter, it is important to remember the | 
|  | 2411 | * start and duration of the scan. This is required for | 
|  | 2412 | * restarting scanning during the discovery phase. | 
|  | 2413 | */ | 
|  | 2414 | if (test_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks) && | 
|  | 2415 | hdev->discovery.result_filtering) { | 
|  | 2416 | hdev->discovery.scan_start = jiffies; | 
|  | 2417 | hdev->discovery.scan_duration = timeout; | 
|  | 2418 | } | 
|  | 2419 |  | 
|  | 2420 | queue_delayed_work(hdev->req_workqueue, &hdev->le_scan_disable, | 
|  | 2421 | timeout); | 
|  | 2422 | } | 
|  | 2423 |  | 
| Johan Hedberg | 2154d3f | 2015-11-11 08:30:45 +0200 | [diff] [blame] | 2424 | bool hci_req_stop_discovery(struct hci_request *req) | 
|  | 2425 | { | 
|  | 2426 | struct hci_dev *hdev = req->hdev; | 
|  | 2427 | struct discovery_state *d = &hdev->discovery; | 
|  | 2428 | struct hci_cp_remote_name_req_cancel cp; | 
|  | 2429 | struct inquiry_entry *e; | 
|  | 2430 | bool ret = false; | 
|  | 2431 |  | 
|  | 2432 | BT_DBG("%s state %u", hdev->name, hdev->discovery.state); | 
|  | 2433 |  | 
|  | 2434 | if (d->state == DISCOVERY_FINDING || d->state == DISCOVERY_STOPPING) { | 
|  | 2435 | if (test_bit(HCI_INQUIRY, &hdev->flags)) | 
|  | 2436 | hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL); | 
|  | 2437 |  | 
|  | 2438 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | 
|  | 2439 | cancel_delayed_work(&hdev->le_scan_disable); | 
|  | 2440 | hci_req_add_le_scan_disable(req); | 
|  | 2441 | } | 
|  | 2442 |  | 
|  | 2443 | ret = true; | 
|  | 2444 | } else { | 
|  | 2445 | /* Passive scanning */ | 
|  | 2446 | if (hci_dev_test_flag(hdev, HCI_LE_SCAN)) { | 
|  | 2447 | hci_req_add_le_scan_disable(req); | 
|  | 2448 | ret = true; | 
|  | 2449 | } | 
|  | 2450 | } | 
|  | 2451 |  | 
|  | 2452 | /* No further actions needed for LE-only discovery */ | 
|  | 2453 | if (d->type == DISCOV_TYPE_LE) | 
|  | 2454 | return ret; | 
|  | 2455 |  | 
|  | 2456 | if (d->state == DISCOVERY_RESOLVING || d->state == DISCOVERY_STOPPING) { | 
|  | 2457 | e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, | 
|  | 2458 | NAME_PENDING); | 
|  | 2459 | if (!e) | 
|  | 2460 | return ret; | 
|  | 2461 |  | 
|  | 2462 | bacpy(&cp.bdaddr, &e->data.bdaddr); | 
|  | 2463 | hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp), | 
|  | 2464 | &cp); | 
|  | 2465 | ret = true; | 
|  | 2466 | } | 
|  | 2467 |  | 
|  | 2468 | return ret; | 
|  | 2469 | } | 
|  | 2470 |  | 
|  | 2471 | static int stop_discovery(struct hci_request *req, unsigned long opt) | 
|  | 2472 | { | 
|  | 2473 | hci_dev_lock(req->hdev); | 
|  | 2474 | hci_req_stop_discovery(req); | 
|  | 2475 | hci_dev_unlock(req->hdev); | 
|  | 2476 |  | 
|  | 2477 | return 0; | 
|  | 2478 | } | 
|  | 2479 |  | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2480 | static void discov_update(struct work_struct *work) | 
|  | 2481 | { | 
|  | 2482 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 2483 | discov_update); | 
|  | 2484 | u8 status = 0; | 
|  | 2485 |  | 
|  | 2486 | switch (hdev->discovery.state) { | 
|  | 2487 | case DISCOVERY_STARTING: | 
|  | 2488 | start_discovery(hdev, &status); | 
|  | 2489 | mgmt_start_discovery_complete(hdev, status); | 
|  | 2490 | if (status) | 
|  | 2491 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | 
|  | 2492 | else | 
|  | 2493 | hci_discovery_set_state(hdev, DISCOVERY_FINDING); | 
|  | 2494 | break; | 
| Johan Hedberg | 2154d3f | 2015-11-11 08:30:45 +0200 | [diff] [blame] | 2495 | case DISCOVERY_STOPPING: | 
|  | 2496 | hci_req_sync(hdev, stop_discovery, 0, HCI_CMD_TIMEOUT, &status); | 
|  | 2497 | mgmt_stop_discovery_complete(hdev, status); | 
|  | 2498 | if (!status) | 
|  | 2499 | hci_discovery_set_state(hdev, DISCOVERY_STOPPED); | 
|  | 2500 | break; | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2501 | case DISCOVERY_STOPPED: | 
|  | 2502 | default: | 
|  | 2503 | return; | 
|  | 2504 | } | 
|  | 2505 | } | 
|  | 2506 |  | 
| Johan Hedberg | c366f55 | 2015-11-23 15:43:06 +0200 | [diff] [blame] | 2507 | static void discov_off(struct work_struct *work) | 
|  | 2508 | { | 
|  | 2509 | struct hci_dev *hdev = container_of(work, struct hci_dev, | 
|  | 2510 | discov_off.work); | 
|  | 2511 |  | 
|  | 2512 | BT_DBG("%s", hdev->name); | 
|  | 2513 |  | 
|  | 2514 | hci_dev_lock(hdev); | 
|  | 2515 |  | 
|  | 2516 | /* When discoverable timeout triggers, then just make sure | 
|  | 2517 | * the limited discoverable flag is cleared. Even in the case | 
|  | 2518 | * of a timeout triggered from general discoverable, it is | 
|  | 2519 | * safe to unconditionally clear the flag. | 
|  | 2520 | */ | 
|  | 2521 | hci_dev_clear_flag(hdev, HCI_LIMITED_DISCOVERABLE); | 
|  | 2522 | hci_dev_clear_flag(hdev, HCI_DISCOVERABLE); | 
|  | 2523 | hdev->discov_timeout = 0; | 
|  | 2524 |  | 
|  | 2525 | hci_dev_unlock(hdev); | 
|  | 2526 |  | 
|  | 2527 | hci_req_sync(hdev, discoverable_update, 0, HCI_CMD_TIMEOUT, NULL); | 
|  | 2528 | mgmt_new_settings(hdev); | 
|  | 2529 | } | 
|  | 2530 |  | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2531 | static int powered_update_hci(struct hci_request *req, unsigned long opt) | 
|  | 2532 | { | 
|  | 2533 | struct hci_dev *hdev = req->hdev; | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2534 | u8 link_sec; | 
|  | 2535 |  | 
|  | 2536 | hci_dev_lock(hdev); | 
|  | 2537 |  | 
|  | 2538 | if (hci_dev_test_flag(hdev, HCI_SSP_ENABLED) && | 
|  | 2539 | !lmp_host_ssp_capable(hdev)) { | 
|  | 2540 | u8 mode = 0x01; | 
|  | 2541 |  | 
|  | 2542 | hci_req_add(req, HCI_OP_WRITE_SSP_MODE, sizeof(mode), &mode); | 
|  | 2543 |  | 
|  | 2544 | if (bredr_sc_enabled(hdev) && !lmp_host_sc_capable(hdev)) { | 
|  | 2545 | u8 support = 0x01; | 
|  | 2546 |  | 
|  | 2547 | hci_req_add(req, HCI_OP_WRITE_SC_SUPPORT, | 
|  | 2548 | sizeof(support), &support); | 
|  | 2549 | } | 
|  | 2550 | } | 
|  | 2551 |  | 
|  | 2552 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED) && | 
|  | 2553 | lmp_bredr_capable(hdev)) { | 
|  | 2554 | struct hci_cp_write_le_host_supported cp; | 
|  | 2555 |  | 
|  | 2556 | cp.le = 0x01; | 
|  | 2557 | cp.simul = 0x00; | 
|  | 2558 |  | 
|  | 2559 | /* Check first if we already have the right | 
|  | 2560 | * host state (host features set) | 
|  | 2561 | */ | 
|  | 2562 | if (cp.le != lmp_host_le_capable(hdev) || | 
|  | 2563 | cp.simul != lmp_host_le_br_capable(hdev)) | 
|  | 2564 | hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, | 
|  | 2565 | sizeof(cp), &cp); | 
|  | 2566 | } | 
|  | 2567 |  | 
| Johan Hedberg | d6b7e2c | 2015-11-30 11:21:44 +0200 | [diff] [blame] | 2568 | if (hci_dev_test_flag(hdev, HCI_LE_ENABLED)) { | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2569 | /* Make sure the controller has a good default for | 
|  | 2570 | * advertising data. This also applies to the case | 
|  | 2571 | * where BR/EDR was toggled during the AUTO_OFF phase. | 
|  | 2572 | */ | 
| Johan Hedberg | d6b7e2c | 2015-11-30 11:21:44 +0200 | [diff] [blame] | 2573 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING) || | 
|  | 2574 | list_empty(&hdev->adv_instances)) { | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 2575 | int err; | 
|  | 2576 |  | 
|  | 2577 | if (ext_adv_capable(hdev)) { | 
|  | 2578 | err = __hci_req_setup_ext_adv_instance(req, | 
|  | 2579 | 0x00); | 
|  | 2580 | if (!err) | 
|  | 2581 | __hci_req_update_scan_rsp_data(req, | 
|  | 2582 | 0x00); | 
|  | 2583 | } else { | 
|  | 2584 | err = 0; | 
|  | 2585 | __hci_req_update_adv_data(req, 0x00); | 
|  | 2586 | __hci_req_update_scan_rsp_data(req, 0x00); | 
|  | 2587 | } | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2588 |  | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 2589 | if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) { | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 2590 | if (!ext_adv_capable(hdev)) | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 2591 | __hci_req_enable_advertising(req); | 
| Jaganath Kanakkassery | a0fb372 | 2018-07-19 17:09:42 +0530 | [diff] [blame] | 2592 | else if (!err) | 
|  | 2593 | __hci_req_enable_ext_advertising(req); | 
| Jaganath Kanakkassery | de181e8 | 2018-07-19 17:09:41 +0530 | [diff] [blame] | 2594 | } | 
| Johan Hedberg | d6b7e2c | 2015-11-30 11:21:44 +0200 | [diff] [blame] | 2595 | } else if (!list_empty(&hdev->adv_instances)) { | 
|  | 2596 | struct adv_info *adv_instance; | 
|  | 2597 |  | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2598 | adv_instance = list_first_entry(&hdev->adv_instances, | 
|  | 2599 | struct adv_info, list); | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2600 | __hci_req_schedule_adv_instance(req, | 
| Johan Hedberg | d6b7e2c | 2015-11-30 11:21:44 +0200 | [diff] [blame] | 2601 | adv_instance->instance, | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2602 | true); | 
| Johan Hedberg | d6b7e2c | 2015-11-30 11:21:44 +0200 | [diff] [blame] | 2603 | } | 
| Johan Hedberg | 2ff1389 | 2015-11-25 16:15:44 +0200 | [diff] [blame] | 2604 | } | 
|  | 2605 |  | 
|  | 2606 | link_sec = hci_dev_test_flag(hdev, HCI_LINK_SECURITY); | 
|  | 2607 | if (link_sec != test_bit(HCI_AUTH, &hdev->flags)) | 
|  | 2608 | hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, | 
|  | 2609 | sizeof(link_sec), &link_sec); | 
|  | 2610 |  | 
|  | 2611 | if (lmp_bredr_capable(hdev)) { | 
|  | 2612 | if (hci_dev_test_flag(hdev, HCI_FAST_CONNECTABLE)) | 
|  | 2613 | __hci_req_write_fast_connectable(req, true); | 
|  | 2614 | else | 
|  | 2615 | __hci_req_write_fast_connectable(req, false); | 
|  | 2616 | __hci_req_update_scan(req); | 
|  | 2617 | __hci_req_update_class(req); | 
|  | 2618 | __hci_req_update_name(req); | 
|  | 2619 | __hci_req_update_eir(req); | 
|  | 2620 | } | 
|  | 2621 |  | 
|  | 2622 | hci_dev_unlock(hdev); | 
|  | 2623 | return 0; | 
|  | 2624 | } | 
|  | 2625 |  | 
|  | 2626 | int __hci_req_hci_power_on(struct hci_dev *hdev) | 
|  | 2627 | { | 
|  | 2628 | /* Register the available SMP channels (BR/EDR and LE) only when | 
|  | 2629 | * successfully powering on the controller. This late | 
|  | 2630 | * registration is required so that LE SMP can clearly decide if | 
|  | 2631 | * the public address or static address is used. | 
|  | 2632 | */ | 
|  | 2633 | smp_register(hdev); | 
|  | 2634 |  | 
|  | 2635 | return __hci_req_sync(hdev, powered_update_hci, 0, HCI_CMD_TIMEOUT, | 
|  | 2636 | NULL); | 
|  | 2637 | } | 
|  | 2638 |  | 
| Johan Hedberg | 5fc16cc | 2015-11-11 08:11:16 +0200 | [diff] [blame] | 2639 | void hci_request_setup(struct hci_dev *hdev) | 
|  | 2640 | { | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2641 | INIT_WORK(&hdev->discov_update, discov_update); | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2642 | INIT_WORK(&hdev->bg_scan_update, bg_scan_update); | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 2643 | INIT_WORK(&hdev->scan_update, scan_update_work); | 
| Johan Hedberg | 53c0ba7 | 2015-11-22 16:43:43 +0300 | [diff] [blame] | 2644 | INIT_WORK(&hdev->connectable_update, connectable_update_work); | 
| Johan Hedberg | aed1a88 | 2015-11-22 17:24:44 +0300 | [diff] [blame] | 2645 | INIT_WORK(&hdev->discoverable_update, discoverable_update_work); | 
| Johan Hedberg | c366f55 | 2015-11-23 15:43:06 +0200 | [diff] [blame] | 2646 | INIT_DELAYED_WORK(&hdev->discov_off, discov_off); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2647 | INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work); | 
|  | 2648 | INIT_DELAYED_WORK(&hdev->le_scan_restart, le_scan_restart_work); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 2649 | INIT_DELAYED_WORK(&hdev->adv_instance_expire, adv_timeout_expire); | 
| Johan Hedberg | 5fc16cc | 2015-11-11 08:11:16 +0200 | [diff] [blame] | 2650 | } | 
|  | 2651 |  | 
|  | 2652 | void hci_request_cancel_all(struct hci_dev *hdev) | 
|  | 2653 | { | 
| Johan Hedberg | 7df0f73 | 2015-11-12 15:15:00 +0200 | [diff] [blame] | 2654 | hci_req_sync_cancel(hdev, ENODEV); | 
|  | 2655 |  | 
| Johan Hedberg | e68f072 | 2015-11-11 08:30:30 +0200 | [diff] [blame] | 2656 | cancel_work_sync(&hdev->discov_update); | 
| Johan Hedberg | 2e93e53 | 2015-11-11 08:11:17 +0200 | [diff] [blame] | 2657 | cancel_work_sync(&hdev->bg_scan_update); | 
| Johan Hedberg | 01b1cb8 | 2015-11-16 12:52:21 +0200 | [diff] [blame] | 2658 | cancel_work_sync(&hdev->scan_update); | 
| Johan Hedberg | 53c0ba7 | 2015-11-22 16:43:43 +0300 | [diff] [blame] | 2659 | cancel_work_sync(&hdev->connectable_update); | 
| Johan Hedberg | aed1a88 | 2015-11-22 17:24:44 +0300 | [diff] [blame] | 2660 | cancel_work_sync(&hdev->discoverable_update); | 
| Johan Hedberg | c366f55 | 2015-11-23 15:43:06 +0200 | [diff] [blame] | 2661 | cancel_delayed_work_sync(&hdev->discov_off); | 
| Johan Hedberg | 7c1fbed | 2015-11-11 08:11:23 +0200 | [diff] [blame] | 2662 | cancel_delayed_work_sync(&hdev->le_scan_disable); | 
|  | 2663 | cancel_delayed_work_sync(&hdev->le_scan_restart); | 
| Johan Hedberg | f225257 | 2015-11-18 12:49:20 +0200 | [diff] [blame] | 2664 |  | 
|  | 2665 | if (hdev->adv_instance_timeout) { | 
|  | 2666 | cancel_delayed_work_sync(&hdev->adv_instance_expire); | 
|  | 2667 | hdev->adv_instance_timeout = 0; | 
|  | 2668 | } | 
| Johan Hedberg | 5fc16cc | 2015-11-11 08:11:16 +0200 | [diff] [blame] | 2669 | } |