blob: 7c323bd112ff68e16fbdebeb9184fd038ccc4801 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +020083static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +020084 void (*func)(struct hci_request *req,
85 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +020086 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Johan Hedberg42c6b122013-03-05 20:37:49 +020088 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
Johan Hedberg42c6b122013-03-05 20:37:49 +020094 hci_req_init(&req, hdev);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 hdev->req_status = HCI_REQ_PEND;
97
Johan Hedberg42c6b122013-03-05 20:37:49 +020098 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +020099
Johan Hedberg42c6b122013-03-05 20:37:49 +0200100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200102 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200108 */
Andre Guedes920c8302013-03-08 11:20:15 -0300109 if (err == -ENODATA)
110 return 0;
111
112 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200113 }
114
Andre Guedesbc4445c2013-03-08 11:20:13 -0300115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700127 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Johan Hedberga5040ef2011-01-10 13:28:59 +0200139 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
Johan Hedberg01178cd2013-03-05 20:37:41 +0200146static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200147 void (*req)(struct hci_request *req,
148 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200149 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 int ret;
152
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* Serialize all requests */
157 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200158 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
Johan Hedberg42c6b122013-03-05 20:37:49 +0200164static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200166 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Johan Hedberg42c6b122013-03-05 20:37:49 +0200173static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200180 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200182
183 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Johan Hedberg42c6b122013-03-05 20:37:49 +0200187static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200188{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200190
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200191 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300193
194 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300196
197 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200199}
200
Johan Hedberg42c6b122013-03-05 20:37:49 +0200201static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_init(&init_req, hdev);
212
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
Johan Hedberg42c6b122013-03-05 20:37:49 +0200218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200222 }
223 skb_queue_purge(&hdev->driver_init);
224
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 hci_req_run(&init_req, NULL);
226
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200229 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300230
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200231 switch (hdev->dev_type) {
232 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200233 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234 break;
235
236 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200237 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200244}
245
Johan Hedberg42c6b122013-03-05 20:37:49 +0200246static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200254
255 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200257
258 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200260
261 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedbergf332ec62013-03-15 17:07:11 -0500275
276 /* Read page scan parameters */
277 if (req->hdev->hci_ver > BLUETOOTH_VER_1_1) {
278 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_ACTIVITY, 0, NULL);
279 hci_req_add(req, HCI_OP_READ_PAGE_SCAN_TYPE, 0, NULL);
280 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281}
282
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284{
285 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200287
288 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200290
291 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200293
294 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200295 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200296
297 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200298 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200299}
300
301static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
302{
303 if (lmp_ext_inq_capable(hdev))
304 return 0x02;
305
306 if (lmp_inq_rssi_capable(hdev))
307 return 0x01;
308
309 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
310 hdev->lmp_subver == 0x0757)
311 return 0x01;
312
313 if (hdev->manufacturer == 15) {
314 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
315 return 0x01;
316 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
317 return 0x01;
318 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
319 return 0x01;
320 }
321
322 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
323 hdev->lmp_subver == 0x1805)
324 return 0x01;
325
326 return 0x00;
327}
328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330{
331 u8 mode;
332
Johan Hedberg42c6b122013-03-05 20:37:49 +0200333 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200334
Johan Hedberg42c6b122013-03-05 20:37:49 +0200335 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200336}
337
Johan Hedberg42c6b122013-03-05 20:37:49 +0200338static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200339{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200340 struct hci_dev *hdev = req->hdev;
341
Johan Hedberg2177bab2013-03-05 20:37:43 +0200342 /* The second byte is 0xff instead of 0x9f (two reserved bits
343 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
344 * command otherwise.
345 */
346 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
347
348 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
349 * any event mask for pre 1.2 devices.
350 */
351 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
352 return;
353
354 if (lmp_bredr_capable(hdev)) {
355 events[4] |= 0x01; /* Flow Specification Complete */
356 events[4] |= 0x02; /* Inquiry Result with RSSI */
357 events[4] |= 0x04; /* Read Remote Extended Features Complete */
358 events[5] |= 0x08; /* Synchronous Connection Complete */
359 events[5] |= 0x10; /* Synchronous Connection Changed */
360 }
361
362 if (lmp_inq_rssi_capable(hdev))
363 events[4] |= 0x02; /* Inquiry Result with RSSI */
364
365 if (lmp_sniffsubr_capable(hdev))
366 events[5] |= 0x20; /* Sniff Subrating */
367
368 if (lmp_pause_enc_capable(hdev))
369 events[5] |= 0x80; /* Encryption Key Refresh Complete */
370
371 if (lmp_ext_inq_capable(hdev))
372 events[5] |= 0x40; /* Extended Inquiry Result */
373
374 if (lmp_no_flush_capable(hdev))
375 events[7] |= 0x01; /* Enhanced Flush Complete */
376
377 if (lmp_lsto_capable(hdev))
378 events[6] |= 0x80; /* Link Supervision Timeout Changed */
379
380 if (lmp_ssp_capable(hdev)) {
381 events[6] |= 0x01; /* IO Capability Request */
382 events[6] |= 0x02; /* IO Capability Response */
383 events[6] |= 0x04; /* User Confirmation Request */
384 events[6] |= 0x08; /* User Passkey Request */
385 events[6] |= 0x10; /* Remote OOB Data Request */
386 events[6] |= 0x20; /* Simple Pairing Complete */
387 events[7] |= 0x04; /* User Passkey Notification */
388 events[7] |= 0x08; /* Keypress Notification */
389 events[7] |= 0x10; /* Remote Host Supported
390 * Features Notification
391 */
392 }
393
394 if (lmp_le_capable(hdev))
395 events[7] |= 0x20; /* LE Meta-Event */
396
Johan Hedberg42c6b122013-03-05 20:37:49 +0200397 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398
399 if (lmp_le_capable(hdev)) {
400 memset(events, 0, sizeof(events));
401 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200402 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
403 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200404 }
405}
406
Johan Hedberg42c6b122013-03-05 20:37:49 +0200407static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200408{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 struct hci_dev *hdev = req->hdev;
410
Johan Hedberg2177bab2013-03-05 20:37:43 +0200411 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200412 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200413
414 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200415 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200416
Johan Hedberg42c6b122013-03-05 20:37:49 +0200417 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200418
419 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200420 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421
422 if (lmp_ssp_capable(hdev)) {
423 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
424 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200425 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
426 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200427 } else {
428 struct hci_cp_write_eir cp;
429
430 memset(hdev->eir, 0, sizeof(hdev->eir));
431 memset(&cp, 0, sizeof(cp));
432
Johan Hedberg42c6b122013-03-05 20:37:49 +0200433 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200434 }
435 }
436
437 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200438 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200439
440 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200442
443 if (lmp_ext_feat_capable(hdev)) {
444 struct hci_cp_read_local_ext_features cp;
445
446 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
448 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449 }
450
451 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
452 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200453 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
454 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455 }
456}
457
Johan Hedberg42c6b122013-03-05 20:37:49 +0200458static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200459{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200460 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200461 struct hci_cp_write_def_link_policy cp;
462 u16 link_policy = 0;
463
464 if (lmp_rswitch_capable(hdev))
465 link_policy |= HCI_LP_RSWITCH;
466 if (lmp_hold_capable(hdev))
467 link_policy |= HCI_LP_HOLD;
468 if (lmp_sniff_capable(hdev))
469 link_policy |= HCI_LP_SNIFF;
470 if (lmp_park_capable(hdev))
471 link_policy |= HCI_LP_PARK;
472
473 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200474 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200475}
476
Johan Hedberg42c6b122013-03-05 20:37:49 +0200477static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200478{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200479 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200480 struct hci_cp_write_le_host_supported cp;
481
482 memset(&cp, 0, sizeof(cp));
483
484 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
485 cp.le = 0x01;
486 cp.simul = lmp_le_br_capable(hdev);
487 }
488
489 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
491 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492}
493
Johan Hedberg42c6b122013-03-05 20:37:49 +0200494static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200495{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 struct hci_dev *hdev = req->hdev;
497
Johan Hedberg2177bab2013-03-05 20:37:43 +0200498 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200499 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200500
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500501 if (lmp_le_capable(hdev)) {
Johan Hedberg42c6b122013-03-05 20:37:49 +0200502 hci_set_le_support(req);
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500503 hci_update_ad(req);
504 }
Johan Hedberg2177bab2013-03-05 20:37:43 +0200505}
506
507static int __hci_init(struct hci_dev *hdev)
508{
509 int err;
510
511 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
512 if (err < 0)
513 return err;
514
515 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
516 * BR/EDR/LE type controllers. AMP controllers only need the
517 * first stage init.
518 */
519 if (hdev->dev_type != HCI_BREDR)
520 return 0;
521
522 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
523 if (err < 0)
524 return err;
525
526 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
527}
528
Johan Hedberg42c6b122013-03-05 20:37:49 +0200529static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 __u8 scan = opt;
532
Johan Hedberg42c6b122013-03-05 20:37:49 +0200533 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534
535 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200536 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537}
538
Johan Hedberg42c6b122013-03-05 20:37:49 +0200539static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540{
541 __u8 auth = opt;
542
Johan Hedberg42c6b122013-03-05 20:37:49 +0200543 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544
545 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200546 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547}
548
Johan Hedberg42c6b122013-03-05 20:37:49 +0200549static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700550{
551 __u8 encrypt = opt;
552
Johan Hedberg42c6b122013-03-05 20:37:49 +0200553 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700554
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200555 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200556 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557}
558
Johan Hedberg42c6b122013-03-05 20:37:49 +0200559static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200560{
561 __le16 policy = cpu_to_le16(opt);
562
Johan Hedberg42c6b122013-03-05 20:37:49 +0200563 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200564
565 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200566 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200567}
568
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900569/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570 * Device is held on return. */
571struct hci_dev *hci_dev_get(int index)
572{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200573 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
575 BT_DBG("%d", index);
576
577 if (index < 0)
578 return NULL;
579
580 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200581 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582 if (d->id == index) {
583 hdev = hci_dev_hold(d);
584 break;
585 }
586 }
587 read_unlock(&hci_dev_list_lock);
588 return hdev;
589}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700590
591/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200592
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200593bool hci_discovery_active(struct hci_dev *hdev)
594{
595 struct discovery_state *discov = &hdev->discovery;
596
Andre Guedes6fbe1952012-02-03 17:47:58 -0300597 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300598 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300599 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200600 return true;
601
Andre Guedes6fbe1952012-02-03 17:47:58 -0300602 default:
603 return false;
604 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200605}
606
Johan Hedbergff9ef572012-01-04 14:23:45 +0200607void hci_discovery_set_state(struct hci_dev *hdev, int state)
608{
609 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
610
611 if (hdev->discovery.state == state)
612 return;
613
614 switch (state) {
615 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300616 if (hdev->discovery.state != DISCOVERY_STARTING)
617 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200618 break;
619 case DISCOVERY_STARTING:
620 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300621 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200622 mgmt_discovering(hdev, 1);
623 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200624 case DISCOVERY_RESOLVING:
625 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200626 case DISCOVERY_STOPPING:
627 break;
628 }
629
630 hdev->discovery.state = state;
631}
632
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633static void inquiry_cache_flush(struct hci_dev *hdev)
634{
Johan Hedberg30883512012-01-04 14:16:21 +0200635 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200636 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637
Johan Hedberg561aafb2012-01-04 13:31:59 +0200638 list_for_each_entry_safe(p, n, &cache->all, all) {
639 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200640 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200642
643 INIT_LIST_HEAD(&cache->unknown);
644 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700645}
646
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300647struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
648 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649{
Johan Hedberg30883512012-01-04 14:16:21 +0200650 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700651 struct inquiry_entry *e;
652
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300653 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700654
Johan Hedberg561aafb2012-01-04 13:31:59 +0200655 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200657 return e;
658 }
659
660 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700661}
662
Johan Hedberg561aafb2012-01-04 13:31:59 +0200663struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300664 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200665{
Johan Hedberg30883512012-01-04 14:16:21 +0200666 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200667 struct inquiry_entry *e;
668
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300669 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200670
671 list_for_each_entry(e, &cache->unknown, list) {
672 if (!bacmp(&e->data.bdaddr, bdaddr))
673 return e;
674 }
675
676 return NULL;
677}
678
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200679struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300680 bdaddr_t *bdaddr,
681 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200682{
683 struct discovery_state *cache = &hdev->discovery;
684 struct inquiry_entry *e;
685
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300686 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200687
688 list_for_each_entry(e, &cache->resolve, list) {
689 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
690 return e;
691 if (!bacmp(&e->data.bdaddr, bdaddr))
692 return e;
693 }
694
695 return NULL;
696}
697
Johan Hedberga3d4e202012-01-09 00:53:02 +0200698void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300699 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200700{
701 struct discovery_state *cache = &hdev->discovery;
702 struct list_head *pos = &cache->resolve;
703 struct inquiry_entry *p;
704
705 list_del(&ie->list);
706
707 list_for_each_entry(p, &cache->resolve, list) {
708 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300709 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200710 break;
711 pos = &p->list;
712 }
713
714 list_add(&ie->list, pos);
715}
716
Johan Hedberg31754052012-01-04 13:39:52 +0200717bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300718 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700719{
Johan Hedberg30883512012-01-04 14:16:21 +0200720 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200721 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300723 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Szymon Janc2b2fec42012-11-20 11:38:54 +0100725 hci_remove_remote_oob_data(hdev, &data->bdaddr);
726
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200727 if (ssp)
728 *ssp = data->ssp_mode;
729
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200730 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200731 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200732 if (ie->data.ssp_mode && ssp)
733 *ssp = true;
734
Johan Hedberga3d4e202012-01-09 00:53:02 +0200735 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300736 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200737 ie->data.rssi = data->rssi;
738 hci_inquiry_cache_update_resolve(hdev, ie);
739 }
740
Johan Hedberg561aafb2012-01-04 13:31:59 +0200741 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200742 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200743
Johan Hedberg561aafb2012-01-04 13:31:59 +0200744 /* Entry not in the cache. Add new one. */
745 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
746 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200747 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200748
749 list_add(&ie->all, &cache->all);
750
751 if (name_known) {
752 ie->name_state = NAME_KNOWN;
753 } else {
754 ie->name_state = NAME_NOT_KNOWN;
755 list_add(&ie->list, &cache->unknown);
756 }
757
758update:
759 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300760 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200761 ie->name_state = NAME_KNOWN;
762 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 }
764
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200765 memcpy(&ie->data, data, sizeof(*data));
766 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700767 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200768
769 if (ie->name_state == NAME_NOT_KNOWN)
770 return false;
771
772 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773}
774
775static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
776{
Johan Hedberg30883512012-01-04 14:16:21 +0200777 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700778 struct inquiry_info *info = (struct inquiry_info *) buf;
779 struct inquiry_entry *e;
780 int copied = 0;
781
Johan Hedberg561aafb2012-01-04 13:31:59 +0200782 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200784
785 if (copied >= num)
786 break;
787
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 bacpy(&info->bdaddr, &data->bdaddr);
789 info->pscan_rep_mode = data->pscan_rep_mode;
790 info->pscan_period_mode = data->pscan_period_mode;
791 info->pscan_mode = data->pscan_mode;
792 memcpy(info->dev_class, data->dev_class, 3);
793 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200794
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200796 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700797 }
798
799 BT_DBG("cache %p, copied %d", cache, copied);
800 return copied;
801}
802
Johan Hedberg42c6b122013-03-05 20:37:49 +0200803static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804{
805 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200806 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 struct hci_cp_inquiry cp;
808
809 BT_DBG("%s", hdev->name);
810
811 if (test_bit(HCI_INQUIRY, &hdev->flags))
812 return;
813
814 /* Start Inquiry */
815 memcpy(&cp.lap, &ir->lap, 3);
816 cp.length = ir->length;
817 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200818 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700819}
820
Andre Guedes3e13fa12013-03-27 20:04:56 -0300821static int wait_inquiry(void *word)
822{
823 schedule();
824 return signal_pending(current);
825}
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827int hci_inquiry(void __user *arg)
828{
829 __u8 __user *ptr = arg;
830 struct hci_inquiry_req ir;
831 struct hci_dev *hdev;
832 int err = 0, do_inquiry = 0, max_rsp;
833 long timeo;
834 __u8 *buf;
835
836 if (copy_from_user(&ir, ptr, sizeof(ir)))
837 return -EFAULT;
838
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200839 hdev = hci_dev_get(ir.dev_id);
840 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841 return -ENODEV;
842
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300843 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900844 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300845 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 inquiry_cache_flush(hdev);
847 do_inquiry = 1;
848 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300849 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850
Marcel Holtmann04837f62006-07-03 10:02:33 +0200851 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200852
853 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200854 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
855 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200856 if (err < 0)
857 goto done;
Andre Guedes3e13fa12013-03-27 20:04:56 -0300858
859 /* Wait until Inquiry procedure finishes (HCI_INQUIRY flag is
860 * cleared). If it is interrupted by a signal, return -EINTR.
861 */
862 if (wait_on_bit(&hdev->flags, HCI_INQUIRY, wait_inquiry,
863 TASK_INTERRUPTIBLE))
864 return -EINTR;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300867 /* for unlimited number of responses we will use buffer with
868 * 255 entries
869 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
871
872 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
873 * copy it to the user space.
874 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100875 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200876 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 err = -ENOMEM;
878 goto done;
879 }
880
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300881 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300883 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884
885 BT_DBG("num_rsp %d", ir.num_rsp);
886
887 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
888 ptr += sizeof(ir);
889 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300890 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900892 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893 err = -EFAULT;
894
895 kfree(buf);
896
897done:
898 hci_dev_put(hdev);
899 return err;
900}
901
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100902static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
903{
904 u8 ad_len = 0, flags = 0;
905 size_t name_len;
906
907 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
908 flags |= LE_AD_GENERAL;
909
910 if (!lmp_bredr_capable(hdev))
911 flags |= LE_AD_NO_BREDR;
912
913 if (lmp_le_br_capable(hdev))
914 flags |= LE_AD_SIM_LE_BREDR_CTRL;
915
916 if (lmp_host_le_br_capable(hdev))
917 flags |= LE_AD_SIM_LE_BREDR_HOST;
918
919 if (flags) {
920 BT_DBG("adv flags 0x%02x", flags);
921
922 ptr[0] = 2;
923 ptr[1] = EIR_FLAGS;
924 ptr[2] = flags;
925
926 ad_len += 3;
927 ptr += 3;
928 }
929
930 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
931 ptr[0] = 2;
932 ptr[1] = EIR_TX_POWER;
933 ptr[2] = (u8) hdev->adv_tx_power;
934
935 ad_len += 3;
936 ptr += 3;
937 }
938
939 name_len = strlen(hdev->dev_name);
940 if (name_len > 0) {
941 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
942
943 if (name_len > max_len) {
944 name_len = max_len;
945 ptr[1] = EIR_NAME_SHORT;
946 } else
947 ptr[1] = EIR_NAME_COMPLETE;
948
949 ptr[0] = name_len + 1;
950
951 memcpy(ptr + 2, hdev->dev_name, name_len);
952
953 ad_len += (name_len + 2);
954 ptr += (name_len + 2);
955 }
956
957 return ad_len;
958}
959
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500960void hci_update_ad(struct hci_request *req)
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100961{
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500962 struct hci_dev *hdev = req->hdev;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100963 struct hci_cp_le_set_adv_data cp;
964 u8 len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100965
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500966 if (!lmp_le_capable(hdev))
967 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100968
969 memset(&cp, 0, sizeof(cp));
970
971 len = create_ad(hdev, cp.data);
972
973 if (hdev->adv_data_len == len &&
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500974 memcmp(cp.data, hdev->adv_data, len) == 0)
975 return;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100976
977 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
978 hdev->adv_data_len = len;
979
980 cp.length = len;
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100981
Johan Hedberg04b4edc2013-03-15 17:07:01 -0500982 hci_req_add(req, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100983}
984
Linus Torvalds1da177e2005-04-16 15:20:36 -0700985/* ---- HCI ioctl helpers ---- */
986
987int hci_dev_open(__u16 dev)
988{
989 struct hci_dev *hdev;
990 int ret = 0;
991
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200992 hdev = hci_dev_get(dev);
993 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 return -ENODEV;
995
996 BT_DBG("%s %p", hdev->name, hdev);
997
998 hci_req_lock(hdev);
999
Johan Hovold94324962012-03-15 14:48:41 +01001000 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
1001 ret = -ENODEV;
1002 goto done;
1003 }
1004
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001005 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
1006 ret = -ERFKILL;
1007 goto done;
1008 }
1009
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010 if (test_bit(HCI_UP, &hdev->flags)) {
1011 ret = -EALREADY;
1012 goto done;
1013 }
1014
1015 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1016 set_bit(HCI_RAW, &hdev->flags);
1017
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001018 /* Treat all non BR/EDR controllers as raw devices if
1019 enable_hs is not set */
1020 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001021 set_bit(HCI_RAW, &hdev->flags);
1022
Linus Torvalds1da177e2005-04-16 15:20:36 -07001023 if (hdev->open(hdev)) {
1024 ret = -EIO;
1025 goto done;
1026 }
1027
1028 if (!test_bit(HCI_RAW, &hdev->flags)) {
1029 atomic_set(&hdev->cmd_cnt, 1);
1030 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001031 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 clear_bit(HCI_INIT, &hdev->flags);
1033 }
1034
1035 if (!ret) {
1036 hci_dev_hold(hdev);
1037 set_bit(HCI_UP, &hdev->flags);
1038 hci_notify(hdev, HCI_DEV_UP);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001039 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1040 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001041 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001042 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001043 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001044 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001045 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001046 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001047 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001048 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001049 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 skb_queue_purge(&hdev->cmd_q);
1052 skb_queue_purge(&hdev->rx_q);
1053
1054 if (hdev->flush)
1055 hdev->flush(hdev);
1056
1057 if (hdev->sent_cmd) {
1058 kfree_skb(hdev->sent_cmd);
1059 hdev->sent_cmd = NULL;
1060 }
1061
1062 hdev->close(hdev);
1063 hdev->flags = 0;
1064 }
1065
1066done:
1067 hci_req_unlock(hdev);
1068 hci_dev_put(hdev);
1069 return ret;
1070}
1071
1072static int hci_dev_do_close(struct hci_dev *hdev)
1073{
1074 BT_DBG("%s %p", hdev->name, hdev);
1075
Andre Guedes28b75a82012-02-03 17:48:00 -03001076 cancel_work_sync(&hdev->le_scan);
1077
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001078 cancel_delayed_work(&hdev->power_off);
1079
Linus Torvalds1da177e2005-04-16 15:20:36 -07001080 hci_req_cancel(hdev, ENODEV);
1081 hci_req_lock(hdev);
1082
1083 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001084 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 hci_req_unlock(hdev);
1086 return 0;
1087 }
1088
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001089 /* Flush RX and TX works */
1090 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001091 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001092
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001093 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001094 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001095 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001096 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001097 }
1098
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001099 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001100 cancel_delayed_work(&hdev->service_cache);
1101
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001102 cancel_delayed_work_sync(&hdev->le_scan_disable);
1103
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001104 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105 inquiry_cache_flush(hdev);
1106 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001107 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001108
1109 hci_notify(hdev, HCI_DEV_DOWN);
1110
1111 if (hdev->flush)
1112 hdev->flush(hdev);
1113
1114 /* Reset device */
1115 skb_queue_purge(&hdev->cmd_q);
1116 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001117 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001118 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001119 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001120 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121 clear_bit(HCI_INIT, &hdev->flags);
1122 }
1123
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001124 /* flush cmd work */
1125 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001126
1127 /* Drop queues */
1128 skb_queue_purge(&hdev->rx_q);
1129 skb_queue_purge(&hdev->cmd_q);
1130 skb_queue_purge(&hdev->raw_q);
1131
1132 /* Drop last sent command */
1133 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001134 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 kfree_skb(hdev->sent_cmd);
1136 hdev->sent_cmd = NULL;
1137 }
1138
Johan Hedbergb6ddb632013-04-02 13:34:31 +03001139 kfree_skb(hdev->recv_evt);
1140 hdev->recv_evt = NULL;
1141
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 /* After this point our queues are empty
1143 * and no tasks are scheduled. */
1144 hdev->close(hdev);
1145
Johan Hedberg35b973c2013-03-15 17:06:59 -05001146 /* Clear flags */
1147 hdev->flags = 0;
1148 hdev->dev_flags &= ~HCI_PERSISTENT_MASK;
1149
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001150 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1151 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001152 hci_dev_lock(hdev);
1153 mgmt_powered(hdev, 0);
1154 hci_dev_unlock(hdev);
1155 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001156
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001157 /* Controller radio is available but is currently powered down */
1158 hdev->amp_status = 0;
1159
Johan Hedberge59fda82012-02-22 18:11:53 +02001160 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001161 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001162
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 hci_req_unlock(hdev);
1164
1165 hci_dev_put(hdev);
1166 return 0;
1167}
1168
1169int hci_dev_close(__u16 dev)
1170{
1171 struct hci_dev *hdev;
1172 int err;
1173
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001174 hdev = hci_dev_get(dev);
1175 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001177
1178 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1179 cancel_delayed_work(&hdev->power_off);
1180
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001182
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 hci_dev_put(hdev);
1184 return err;
1185}
1186
1187int hci_dev_reset(__u16 dev)
1188{
1189 struct hci_dev *hdev;
1190 int ret = 0;
1191
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001192 hdev = hci_dev_get(dev);
1193 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 return -ENODEV;
1195
1196 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 if (!test_bit(HCI_UP, &hdev->flags))
1199 goto done;
1200
1201 /* Drop queues */
1202 skb_queue_purge(&hdev->rx_q);
1203 skb_queue_purge(&hdev->cmd_q);
1204
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001205 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 inquiry_cache_flush(hdev);
1207 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001208 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209
1210 if (hdev->flush)
1211 hdev->flush(hdev);
1212
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001213 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001214 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001215
1216 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001217 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218
1219done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220 hci_req_unlock(hdev);
1221 hci_dev_put(hdev);
1222 return ret;
1223}
1224
1225int hci_dev_reset_stat(__u16 dev)
1226{
1227 struct hci_dev *hdev;
1228 int ret = 0;
1229
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001230 hdev = hci_dev_get(dev);
1231 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001232 return -ENODEV;
1233
1234 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1235
1236 hci_dev_put(hdev);
1237
1238 return ret;
1239}
1240
1241int hci_dev_cmd(unsigned int cmd, void __user *arg)
1242{
1243 struct hci_dev *hdev;
1244 struct hci_dev_req dr;
1245 int err = 0;
1246
1247 if (copy_from_user(&dr, arg, sizeof(dr)))
1248 return -EFAULT;
1249
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001250 hdev = hci_dev_get(dr.dev_id);
1251 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 return -ENODEV;
1253
1254 switch (cmd) {
1255 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001256 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1257 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001258 break;
1259
1260 case HCISETENCRYPT:
1261 if (!lmp_encrypt_capable(hdev)) {
1262 err = -EOPNOTSUPP;
1263 break;
1264 }
1265
1266 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1267 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001268 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1269 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270 if (err)
1271 break;
1272 }
1273
Johan Hedberg01178cd2013-03-05 20:37:41 +02001274 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1275 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276 break;
1277
1278 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001279 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1280 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 break;
1282
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001283 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001284 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1285 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001286 break;
1287
1288 case HCISETLINKMODE:
1289 hdev->link_mode = ((__u16) dr.dev_opt) &
1290 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1291 break;
1292
Linus Torvalds1da177e2005-04-16 15:20:36 -07001293 case HCISETPTYPE:
1294 hdev->pkt_type = (__u16) dr.dev_opt;
1295 break;
1296
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001298 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1299 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001300 break;
1301
1302 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001303 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1304 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 break;
1306
1307 default:
1308 err = -EINVAL;
1309 break;
1310 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001311
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312 hci_dev_put(hdev);
1313 return err;
1314}
1315
1316int hci_get_dev_list(void __user *arg)
1317{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001318 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 struct hci_dev_list_req *dl;
1320 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001321 int n = 0, size, err;
1322 __u16 dev_num;
1323
1324 if (get_user(dev_num, (__u16 __user *) arg))
1325 return -EFAULT;
1326
1327 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1328 return -EINVAL;
1329
1330 size = sizeof(*dl) + dev_num * sizeof(*dr);
1331
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001332 dl = kzalloc(size, GFP_KERNEL);
1333 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001334 return -ENOMEM;
1335
1336 dr = dl->dev_req;
1337
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001338 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001339 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001340 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001341 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001342
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001343 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1344 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001345
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 (dr + n)->dev_id = hdev->id;
1347 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001348
Linus Torvalds1da177e2005-04-16 15:20:36 -07001349 if (++n >= dev_num)
1350 break;
1351 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001352 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353
1354 dl->dev_num = n;
1355 size = sizeof(*dl) + n * sizeof(*dr);
1356
1357 err = copy_to_user(arg, dl, size);
1358 kfree(dl);
1359
1360 return err ? -EFAULT : 0;
1361}
1362
1363int hci_get_dev_info(void __user *arg)
1364{
1365 struct hci_dev *hdev;
1366 struct hci_dev_info di;
1367 int err = 0;
1368
1369 if (copy_from_user(&di, arg, sizeof(di)))
1370 return -EFAULT;
1371
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001372 hdev = hci_dev_get(di.dev_id);
1373 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001374 return -ENODEV;
1375
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001376 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001377 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001378
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001379 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1380 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001381
Linus Torvalds1da177e2005-04-16 15:20:36 -07001382 strcpy(di.name, hdev->name);
1383 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001384 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001385 di.flags = hdev->flags;
1386 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001387 if (lmp_bredr_capable(hdev)) {
1388 di.acl_mtu = hdev->acl_mtu;
1389 di.acl_pkts = hdev->acl_pkts;
1390 di.sco_mtu = hdev->sco_mtu;
1391 di.sco_pkts = hdev->sco_pkts;
1392 } else {
1393 di.acl_mtu = hdev->le_mtu;
1394 di.acl_pkts = hdev->le_pkts;
1395 di.sco_mtu = 0;
1396 di.sco_pkts = 0;
1397 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001398 di.link_policy = hdev->link_policy;
1399 di.link_mode = hdev->link_mode;
1400
1401 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1402 memcpy(&di.features, &hdev->features, sizeof(di.features));
1403
1404 if (copy_to_user(arg, &di, sizeof(di)))
1405 err = -EFAULT;
1406
1407 hci_dev_put(hdev);
1408
1409 return err;
1410}
1411
1412/* ---- Interface to HCI drivers ---- */
1413
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001414static int hci_rfkill_set_block(void *data, bool blocked)
1415{
1416 struct hci_dev *hdev = data;
1417
1418 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1419
1420 if (!blocked)
1421 return 0;
1422
1423 hci_dev_do_close(hdev);
1424
1425 return 0;
1426}
1427
1428static const struct rfkill_ops hci_rfkill_ops = {
1429 .set_block = hci_rfkill_set_block,
1430};
1431
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001432static void hci_power_on(struct work_struct *work)
1433{
1434 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1435
1436 BT_DBG("%s", hdev->name);
1437
1438 if (hci_dev_open(hdev->id) < 0)
1439 return;
1440
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001441 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001442 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1443 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001444
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001445 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001446 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001447}
1448
1449static void hci_power_off(struct work_struct *work)
1450{
Johan Hedberg32435532011-11-07 22:16:04 +02001451 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001452 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001453
1454 BT_DBG("%s", hdev->name);
1455
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001456 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001457}
1458
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001459static void hci_discov_off(struct work_struct *work)
1460{
1461 struct hci_dev *hdev;
1462 u8 scan = SCAN_PAGE;
1463
1464 hdev = container_of(work, struct hci_dev, discov_off.work);
1465
1466 BT_DBG("%s", hdev->name);
1467
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001468 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001469
1470 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1471
1472 hdev->discov_timeout = 0;
1473
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001474 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001475}
1476
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001477int hci_uuids_clear(struct hci_dev *hdev)
1478{
Johan Hedberg48210022013-01-27 00:31:28 +02001479 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001480
Johan Hedberg48210022013-01-27 00:31:28 +02001481 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1482 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001483 kfree(uuid);
1484 }
1485
1486 return 0;
1487}
1488
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001489int hci_link_keys_clear(struct hci_dev *hdev)
1490{
1491 struct list_head *p, *n;
1492
1493 list_for_each_safe(p, n, &hdev->link_keys) {
1494 struct link_key *key;
1495
1496 key = list_entry(p, struct link_key, list);
1497
1498 list_del(p);
1499 kfree(key);
1500 }
1501
1502 return 0;
1503}
1504
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001505int hci_smp_ltks_clear(struct hci_dev *hdev)
1506{
1507 struct smp_ltk *k, *tmp;
1508
1509 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1510 list_del(&k->list);
1511 kfree(k);
1512 }
1513
1514 return 0;
1515}
1516
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001517struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1518{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001519 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001520
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001521 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001522 if (bacmp(bdaddr, &k->bdaddr) == 0)
1523 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001524
1525 return NULL;
1526}
1527
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301528static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001529 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001530{
1531 /* Legacy key */
1532 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301533 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001534
1535 /* Debug keys are insecure so don't store them persistently */
1536 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301537 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001538
1539 /* Changed combination key and there's no previous one */
1540 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301541 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001542
1543 /* Security mode 3 case */
1544 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301545 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001546
1547 /* Neither local nor remote side had no-bonding as requirement */
1548 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301549 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001550
1551 /* Local side had dedicated bonding as requirement */
1552 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301553 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001554
1555 /* Remote side had dedicated bonding as requirement */
1556 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301557 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001558
1559 /* If none of the above criteria match, then don't store the key
1560 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301561 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001562}
1563
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001564struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001565{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001566 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001567
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001568 list_for_each_entry(k, &hdev->long_term_keys, list) {
1569 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001570 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001571 continue;
1572
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001573 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001574 }
1575
1576 return NULL;
1577}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001578
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001579struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001580 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001581{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001582 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001583
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001584 list_for_each_entry(k, &hdev->long_term_keys, list)
1585 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001586 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001587 return k;
1588
1589 return NULL;
1590}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001591
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001592int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001593 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001594{
1595 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301596 u8 old_key_type;
1597 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001598
1599 old_key = hci_find_link_key(hdev, bdaddr);
1600 if (old_key) {
1601 old_key_type = old_key->type;
1602 key = old_key;
1603 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001604 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001605 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1606 if (!key)
1607 return -ENOMEM;
1608 list_add(&key->list, &hdev->link_keys);
1609 }
1610
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001611 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001612
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001613 /* Some buggy controller combinations generate a changed
1614 * combination key for legacy pairing even when there's no
1615 * previous key */
1616 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001617 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001618 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001619 if (conn)
1620 conn->key_type = type;
1621 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001622
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001623 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001624 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001625 key->pin_len = pin_len;
1626
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001627 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001628 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001629 else
1630 key->type = type;
1631
Johan Hedberg4df378a2011-04-28 11:29:03 -07001632 if (!new_key)
1633 return 0;
1634
1635 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1636
Johan Hedberg744cf192011-11-08 20:40:14 +02001637 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001638
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301639 if (conn)
1640 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001641
1642 return 0;
1643}
1644
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001645int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001646 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001647 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001648{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001649 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001650
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001651 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1652 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001653
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001654 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1655 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001656 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001657 else {
1658 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001659 if (!key)
1660 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001661 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001662 }
1663
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001664 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001665 key->bdaddr_type = addr_type;
1666 memcpy(key->val, tk, sizeof(key->val));
1667 key->authenticated = authenticated;
1668 key->ediv = ediv;
1669 key->enc_size = enc_size;
1670 key->type = type;
1671 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001672
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001673 if (!new_key)
1674 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001675
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001676 if (type & HCI_SMP_LTK)
1677 mgmt_new_ltk(hdev, key, 1);
1678
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001679 return 0;
1680}
1681
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001682int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1683{
1684 struct link_key *key;
1685
1686 key = hci_find_link_key(hdev, bdaddr);
1687 if (!key)
1688 return -ENOENT;
1689
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001690 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001691
1692 list_del(&key->list);
1693 kfree(key);
1694
1695 return 0;
1696}
1697
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001698int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1699{
1700 struct smp_ltk *k, *tmp;
1701
1702 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1703 if (bacmp(bdaddr, &k->bdaddr))
1704 continue;
1705
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001706 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001707
1708 list_del(&k->list);
1709 kfree(k);
1710 }
1711
1712 return 0;
1713}
1714
Ville Tervo6bd32322011-02-16 16:32:41 +02001715/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001716static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001717{
1718 struct hci_dev *hdev = (void *) arg;
1719
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001720 if (hdev->sent_cmd) {
1721 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1722 u16 opcode = __le16_to_cpu(sent->opcode);
1723
1724 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1725 } else {
1726 BT_ERR("%s command tx timeout", hdev->name);
1727 }
1728
Ville Tervo6bd32322011-02-16 16:32:41 +02001729 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001730 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001731}
1732
Szymon Janc2763eda2011-03-22 13:12:22 +01001733struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001734 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001735{
1736 struct oob_data *data;
1737
1738 list_for_each_entry(data, &hdev->remote_oob_data, list)
1739 if (bacmp(bdaddr, &data->bdaddr) == 0)
1740 return data;
1741
1742 return NULL;
1743}
1744
1745int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1746{
1747 struct oob_data *data;
1748
1749 data = hci_find_remote_oob_data(hdev, bdaddr);
1750 if (!data)
1751 return -ENOENT;
1752
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001753 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001754
1755 list_del(&data->list);
1756 kfree(data);
1757
1758 return 0;
1759}
1760
1761int hci_remote_oob_data_clear(struct hci_dev *hdev)
1762{
1763 struct oob_data *data, *n;
1764
1765 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1766 list_del(&data->list);
1767 kfree(data);
1768 }
1769
1770 return 0;
1771}
1772
1773int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001774 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001775{
1776 struct oob_data *data;
1777
1778 data = hci_find_remote_oob_data(hdev, bdaddr);
1779
1780 if (!data) {
1781 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1782 if (!data)
1783 return -ENOMEM;
1784
1785 bacpy(&data->bdaddr, bdaddr);
1786 list_add(&data->list, &hdev->remote_oob_data);
1787 }
1788
1789 memcpy(data->hash, hash, sizeof(data->hash));
1790 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1791
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001792 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001793
1794 return 0;
1795}
1796
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001797struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001798{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001799 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001800
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001801 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001802 if (bacmp(bdaddr, &b->bdaddr) == 0)
1803 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001804
1805 return NULL;
1806}
1807
1808int hci_blacklist_clear(struct hci_dev *hdev)
1809{
1810 struct list_head *p, *n;
1811
1812 list_for_each_safe(p, n, &hdev->blacklist) {
1813 struct bdaddr_list *b;
1814
1815 b = list_entry(p, struct bdaddr_list, list);
1816
1817 list_del(p);
1818 kfree(b);
1819 }
1820
1821 return 0;
1822}
1823
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001824int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001825{
1826 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001827
1828 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1829 return -EBADF;
1830
Antti Julku5e762442011-08-25 16:48:02 +03001831 if (hci_blacklist_lookup(hdev, bdaddr))
1832 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001833
1834 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001835 if (!entry)
1836 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001837
1838 bacpy(&entry->bdaddr, bdaddr);
1839
1840 list_add(&entry->list, &hdev->blacklist);
1841
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001842 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001843}
1844
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001845int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001846{
1847 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001848
Szymon Janc1ec918c2011-11-16 09:32:21 +01001849 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001850 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001851
1852 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001853 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001854 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001855
1856 list_del(&entry->list);
1857 kfree(entry);
1858
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001859 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001860}
1861
Johan Hedberg42c6b122013-03-05 20:37:49 +02001862static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001863{
1864 struct le_scan_params *param = (struct le_scan_params *) opt;
1865 struct hci_cp_le_set_scan_param cp;
1866
1867 memset(&cp, 0, sizeof(cp));
1868 cp.type = param->type;
1869 cp.interval = cpu_to_le16(param->interval);
1870 cp.window = cpu_to_le16(param->window);
1871
Johan Hedberg42c6b122013-03-05 20:37:49 +02001872 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001873}
1874
Johan Hedberg42c6b122013-03-05 20:37:49 +02001875static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001876{
1877 struct hci_cp_le_set_scan_enable cp;
1878
1879 memset(&cp, 0, sizeof(cp));
1880 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001881 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001882
Johan Hedberg42c6b122013-03-05 20:37:49 +02001883 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001884}
1885
1886static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001887 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001888{
1889 long timeo = msecs_to_jiffies(3000);
1890 struct le_scan_params param;
1891 int err;
1892
1893 BT_DBG("%s", hdev->name);
1894
1895 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1896 return -EINPROGRESS;
1897
1898 param.type = type;
1899 param.interval = interval;
1900 param.window = window;
1901
1902 hci_req_lock(hdev);
1903
Johan Hedberg01178cd2013-03-05 20:37:41 +02001904 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1905 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001906 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001907 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001908
1909 hci_req_unlock(hdev);
1910
1911 if (err < 0)
1912 return err;
1913
Johan Hedberg46818ed2013-01-14 22:33:52 +02001914 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1915 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001916
1917 return 0;
1918}
1919
Andre Guedes7dbfac12012-03-15 16:52:07 -03001920int hci_cancel_le_scan(struct hci_dev *hdev)
1921{
1922 BT_DBG("%s", hdev->name);
1923
1924 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1925 return -EALREADY;
1926
1927 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1928 struct hci_cp_le_set_scan_enable cp;
1929
1930 /* Send HCI command to disable LE Scan */
1931 memset(&cp, 0, sizeof(cp));
1932 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1933 }
1934
1935 return 0;
1936}
1937
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001938static void le_scan_disable_work(struct work_struct *work)
1939{
1940 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001941 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001942 struct hci_cp_le_set_scan_enable cp;
1943
1944 BT_DBG("%s", hdev->name);
1945
1946 memset(&cp, 0, sizeof(cp));
1947
1948 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1949}
1950
Andre Guedes28b75a82012-02-03 17:48:00 -03001951static void le_scan_work(struct work_struct *work)
1952{
1953 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1954 struct le_scan_params *param = &hdev->le_scan_params;
1955
1956 BT_DBG("%s", hdev->name);
1957
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001958 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1959 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001960}
1961
1962int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001963 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001964{
1965 struct le_scan_params *param = &hdev->le_scan_params;
1966
1967 BT_DBG("%s", hdev->name);
1968
Johan Hedbergf1550472012-10-24 21:12:03 +03001969 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1970 return -ENOTSUPP;
1971
Andre Guedes28b75a82012-02-03 17:48:00 -03001972 if (work_busy(&hdev->le_scan))
1973 return -EINPROGRESS;
1974
1975 param->type = type;
1976 param->interval = interval;
1977 param->window = window;
1978 param->timeout = timeout;
1979
1980 queue_work(system_long_wq, &hdev->le_scan);
1981
1982 return 0;
1983}
1984
David Herrmann9be0dab2012-04-22 14:39:57 +02001985/* Alloc HCI device */
1986struct hci_dev *hci_alloc_dev(void)
1987{
1988 struct hci_dev *hdev;
1989
1990 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1991 if (!hdev)
1992 return NULL;
1993
David Herrmannb1b813d2012-04-22 14:39:58 +02001994 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1995 hdev->esco_type = (ESCO_HV1);
1996 hdev->link_mode = (HCI_LM_ACCEPT);
1997 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001998 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1999 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02002000
David Herrmannb1b813d2012-04-22 14:39:58 +02002001 hdev->sniff_max_interval = 800;
2002 hdev->sniff_min_interval = 80;
2003
2004 mutex_init(&hdev->lock);
2005 mutex_init(&hdev->req_lock);
2006
2007 INIT_LIST_HEAD(&hdev->mgmt_pending);
2008 INIT_LIST_HEAD(&hdev->blacklist);
2009 INIT_LIST_HEAD(&hdev->uuids);
2010 INIT_LIST_HEAD(&hdev->link_keys);
2011 INIT_LIST_HEAD(&hdev->long_term_keys);
2012 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03002013 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002014
2015 INIT_WORK(&hdev->rx_work, hci_rx_work);
2016 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2017 INIT_WORK(&hdev->tx_work, hci_tx_work);
2018 INIT_WORK(&hdev->power_on, hci_power_on);
2019 INIT_WORK(&hdev->le_scan, le_scan_work);
2020
David Herrmannb1b813d2012-04-22 14:39:58 +02002021 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2022 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2023 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2024
David Herrmann9be0dab2012-04-22 14:39:57 +02002025 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002026 skb_queue_head_init(&hdev->rx_q);
2027 skb_queue_head_init(&hdev->cmd_q);
2028 skb_queue_head_init(&hdev->raw_q);
2029
2030 init_waitqueue_head(&hdev->req_wait_q);
2031
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002032 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002033
David Herrmannb1b813d2012-04-22 14:39:58 +02002034 hci_init_sysfs(hdev);
2035 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002036
2037 return hdev;
2038}
2039EXPORT_SYMBOL(hci_alloc_dev);
2040
2041/* Free HCI device */
2042void hci_free_dev(struct hci_dev *hdev)
2043{
2044 skb_queue_purge(&hdev->driver_init);
2045
2046 /* will free via device release */
2047 put_device(&hdev->dev);
2048}
2049EXPORT_SYMBOL(hci_free_dev);
2050
Linus Torvalds1da177e2005-04-16 15:20:36 -07002051/* Register HCI device */
2052int hci_register_dev(struct hci_dev *hdev)
2053{
David Herrmannb1b813d2012-04-22 14:39:58 +02002054 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055
David Herrmann010666a2012-01-07 15:47:07 +01002056 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 return -EINVAL;
2058
Mat Martineau08add512011-11-02 16:18:36 -07002059 /* Do not allow HCI_AMP devices to register at index 0,
2060 * so the index can be used as the AMP controller ID.
2061 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002062 switch (hdev->dev_type) {
2063 case HCI_BREDR:
2064 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2065 break;
2066 case HCI_AMP:
2067 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2068 break;
2069 default:
2070 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002072
Sasha Levin3df92b32012-05-27 22:36:56 +02002073 if (id < 0)
2074 return id;
2075
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076 sprintf(hdev->name, "hci%d", id);
2077 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002078
2079 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2080
Sasha Levin3df92b32012-05-27 22:36:56 +02002081 write_lock(&hci_dev_list_lock);
2082 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002083 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002085 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002086 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002087 if (!hdev->workqueue) {
2088 error = -ENOMEM;
2089 goto err;
2090 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002091
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002092 hdev->req_workqueue = alloc_workqueue(hdev->name,
2093 WQ_HIGHPRI | WQ_UNBOUND |
2094 WQ_MEM_RECLAIM, 1);
2095 if (!hdev->req_workqueue) {
2096 destroy_workqueue(hdev->workqueue);
2097 error = -ENOMEM;
2098 goto err;
2099 }
2100
David Herrmann33ca9542011-10-08 14:58:49 +02002101 error = hci_add_sysfs(hdev);
2102 if (error < 0)
2103 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002104
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002105 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002106 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2107 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002108 if (hdev->rfkill) {
2109 if (rfkill_register(hdev->rfkill) < 0) {
2110 rfkill_destroy(hdev->rfkill);
2111 hdev->rfkill = NULL;
2112 }
2113 }
2114
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002115 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002116
2117 if (hdev->dev_type != HCI_AMP)
2118 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2119
Linus Torvalds1da177e2005-04-16 15:20:36 -07002120 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002121 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
Johan Hedberg19202572013-01-14 22:33:51 +02002123 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002124
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002126
David Herrmann33ca9542011-10-08 14:58:49 +02002127err_wqueue:
2128 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002129 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002130err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002131 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002132 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002133 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002134 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002135
David Herrmann33ca9542011-10-08 14:58:49 +02002136 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002137}
2138EXPORT_SYMBOL(hci_register_dev);
2139
2140/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002141void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142{
Sasha Levin3df92b32012-05-27 22:36:56 +02002143 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002144
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002145 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002146
Johan Hovold94324962012-03-15 14:48:41 +01002147 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2148
Sasha Levin3df92b32012-05-27 22:36:56 +02002149 id = hdev->id;
2150
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002151 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002153 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
2155 hci_dev_do_close(hdev);
2156
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302157 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002158 kfree_skb(hdev->reassembly[i]);
2159
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002160 cancel_work_sync(&hdev->power_on);
2161
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002162 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002163 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002164 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002165 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002166 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002167 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002168
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002169 /* mgmt_index_removed should take care of emptying the
2170 * pending list */
2171 BUG_ON(!list_empty(&hdev->mgmt_pending));
2172
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173 hci_notify(hdev, HCI_DEV_UNREG);
2174
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002175 if (hdev->rfkill) {
2176 rfkill_unregister(hdev->rfkill);
2177 rfkill_destroy(hdev->rfkill);
2178 }
2179
David Herrmannce242972011-10-08 14:58:48 +02002180 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002181
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002182 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002183 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002184
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002185 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002186 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002187 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002188 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002189 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002190 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002191 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002192
David Herrmanndc946bd2012-01-07 15:47:24 +01002193 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002194
2195 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002196}
2197EXPORT_SYMBOL(hci_unregister_dev);
2198
2199/* Suspend HCI device */
2200int hci_suspend_dev(struct hci_dev *hdev)
2201{
2202 hci_notify(hdev, HCI_DEV_SUSPEND);
2203 return 0;
2204}
2205EXPORT_SYMBOL(hci_suspend_dev);
2206
2207/* Resume HCI device */
2208int hci_resume_dev(struct hci_dev *hdev)
2209{
2210 hci_notify(hdev, HCI_DEV_RESUME);
2211 return 0;
2212}
2213EXPORT_SYMBOL(hci_resume_dev);
2214
Marcel Holtmann76bca882009-11-18 00:40:39 +01002215/* Receive frame from HCI drivers */
2216int hci_recv_frame(struct sk_buff *skb)
2217{
2218 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2219 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002220 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002221 kfree_skb(skb);
2222 return -ENXIO;
2223 }
2224
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002225 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002226 bt_cb(skb)->incoming = 1;
2227
2228 /* Time stamp */
2229 __net_timestamp(skb);
2230
Marcel Holtmann76bca882009-11-18 00:40:39 +01002231 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002232 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002233
Marcel Holtmann76bca882009-11-18 00:40:39 +01002234 return 0;
2235}
2236EXPORT_SYMBOL(hci_recv_frame);
2237
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302238static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002239 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302240{
2241 int len = 0;
2242 int hlen = 0;
2243 int remain = count;
2244 struct sk_buff *skb;
2245 struct bt_skb_cb *scb;
2246
2247 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002248 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302249 return -EILSEQ;
2250
2251 skb = hdev->reassembly[index];
2252
2253 if (!skb) {
2254 switch (type) {
2255 case HCI_ACLDATA_PKT:
2256 len = HCI_MAX_FRAME_SIZE;
2257 hlen = HCI_ACL_HDR_SIZE;
2258 break;
2259 case HCI_EVENT_PKT:
2260 len = HCI_MAX_EVENT_SIZE;
2261 hlen = HCI_EVENT_HDR_SIZE;
2262 break;
2263 case HCI_SCODATA_PKT:
2264 len = HCI_MAX_SCO_SIZE;
2265 hlen = HCI_SCO_HDR_SIZE;
2266 break;
2267 }
2268
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002269 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302270 if (!skb)
2271 return -ENOMEM;
2272
2273 scb = (void *) skb->cb;
2274 scb->expect = hlen;
2275 scb->pkt_type = type;
2276
2277 skb->dev = (void *) hdev;
2278 hdev->reassembly[index] = skb;
2279 }
2280
2281 while (count) {
2282 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002283 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302284
2285 memcpy(skb_put(skb, len), data, len);
2286
2287 count -= len;
2288 data += len;
2289 scb->expect -= len;
2290 remain = count;
2291
2292 switch (type) {
2293 case HCI_EVENT_PKT:
2294 if (skb->len == HCI_EVENT_HDR_SIZE) {
2295 struct hci_event_hdr *h = hci_event_hdr(skb);
2296 scb->expect = h->plen;
2297
2298 if (skb_tailroom(skb) < scb->expect) {
2299 kfree_skb(skb);
2300 hdev->reassembly[index] = NULL;
2301 return -ENOMEM;
2302 }
2303 }
2304 break;
2305
2306 case HCI_ACLDATA_PKT:
2307 if (skb->len == HCI_ACL_HDR_SIZE) {
2308 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2309 scb->expect = __le16_to_cpu(h->dlen);
2310
2311 if (skb_tailroom(skb) < scb->expect) {
2312 kfree_skb(skb);
2313 hdev->reassembly[index] = NULL;
2314 return -ENOMEM;
2315 }
2316 }
2317 break;
2318
2319 case HCI_SCODATA_PKT:
2320 if (skb->len == HCI_SCO_HDR_SIZE) {
2321 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2322 scb->expect = h->dlen;
2323
2324 if (skb_tailroom(skb) < scb->expect) {
2325 kfree_skb(skb);
2326 hdev->reassembly[index] = NULL;
2327 return -ENOMEM;
2328 }
2329 }
2330 break;
2331 }
2332
2333 if (scb->expect == 0) {
2334 /* Complete frame */
2335
2336 bt_cb(skb)->pkt_type = type;
2337 hci_recv_frame(skb);
2338
2339 hdev->reassembly[index] = NULL;
2340 return remain;
2341 }
2342 }
2343
2344 return remain;
2345}
2346
Marcel Holtmannef222012007-07-11 06:42:04 +02002347int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2348{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302349 int rem = 0;
2350
Marcel Holtmannef222012007-07-11 06:42:04 +02002351 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2352 return -EILSEQ;
2353
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002354 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002355 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302356 if (rem < 0)
2357 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002358
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302359 data += (count - rem);
2360 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002361 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002362
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302363 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002364}
2365EXPORT_SYMBOL(hci_recv_fragment);
2366
Suraj Sumangala99811512010-07-14 13:02:19 +05302367#define STREAM_REASSEMBLY 0
2368
2369int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2370{
2371 int type;
2372 int rem = 0;
2373
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002374 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302375 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2376
2377 if (!skb) {
2378 struct { char type; } *pkt;
2379
2380 /* Start of the frame */
2381 pkt = data;
2382 type = pkt->type;
2383
2384 data++;
2385 count--;
2386 } else
2387 type = bt_cb(skb)->pkt_type;
2388
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002389 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002390 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302391 if (rem < 0)
2392 return rem;
2393
2394 data += (count - rem);
2395 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002396 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302397
2398 return rem;
2399}
2400EXPORT_SYMBOL(hci_recv_stream_fragment);
2401
Linus Torvalds1da177e2005-04-16 15:20:36 -07002402/* ---- Interface to upper protocols ---- */
2403
Linus Torvalds1da177e2005-04-16 15:20:36 -07002404int hci_register_cb(struct hci_cb *cb)
2405{
2406 BT_DBG("%p name %s", cb, cb->name);
2407
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002408 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002410 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002411
2412 return 0;
2413}
2414EXPORT_SYMBOL(hci_register_cb);
2415
2416int hci_unregister_cb(struct hci_cb *cb)
2417{
2418 BT_DBG("%p name %s", cb, cb->name);
2419
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002420 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002421 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002422 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002423
2424 return 0;
2425}
2426EXPORT_SYMBOL(hci_unregister_cb);
2427
2428static int hci_send_frame(struct sk_buff *skb)
2429{
2430 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2431
2432 if (!hdev) {
2433 kfree_skb(skb);
2434 return -ENODEV;
2435 }
2436
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002437 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002438
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002439 /* Time stamp */
2440 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002441
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002442 /* Send copy to monitor */
2443 hci_send_to_monitor(hdev, skb);
2444
2445 if (atomic_read(&hdev->promisc)) {
2446 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002447 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002448 }
2449
2450 /* Get rid of skb owner, prior to sending to the driver. */
2451 skb_orphan(skb);
2452
2453 return hdev->send(skb);
2454}
2455
Johan Hedberg3119ae92013-03-05 20:37:44 +02002456void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2457{
2458 skb_queue_head_init(&req->cmd_q);
2459 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002460 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002461}
2462
2463int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2464{
2465 struct hci_dev *hdev = req->hdev;
2466 struct sk_buff *skb;
2467 unsigned long flags;
2468
2469 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2470
Andre Guedes5d73e032013-03-08 11:20:16 -03002471 /* If an error occured during request building, remove all HCI
2472 * commands queued on the HCI request queue.
2473 */
2474 if (req->err) {
2475 skb_queue_purge(&req->cmd_q);
2476 return req->err;
2477 }
2478
Johan Hedberg3119ae92013-03-05 20:37:44 +02002479 /* Do not allow empty requests */
2480 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002481 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002482
2483 skb = skb_peek_tail(&req->cmd_q);
2484 bt_cb(skb)->req.complete = complete;
2485
2486 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2487 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2488 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2489
2490 queue_work(hdev->workqueue, &hdev->cmd_work);
2491
2492 return 0;
2493}
2494
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002495static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2496 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002497{
2498 int len = HCI_COMMAND_HDR_SIZE + plen;
2499 struct hci_command_hdr *hdr;
2500 struct sk_buff *skb;
2501
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002503 if (!skb)
2504 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002505
2506 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002507 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002508 hdr->plen = plen;
2509
2510 if (plen)
2511 memcpy(skb_put(skb, plen), param, plen);
2512
2513 BT_DBG("skb len %d", skb->len);
2514
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002515 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002516 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002517
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002518 return skb;
2519}
2520
2521/* Send HCI command */
2522int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2523{
2524 struct sk_buff *skb;
2525
2526 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2527
2528 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2529 if (!skb) {
2530 BT_ERR("%s no memory for command", hdev->name);
2531 return -ENOMEM;
2532 }
2533
Johan Hedberg11714b32013-03-05 20:37:47 +02002534 /* Stand-alone HCI commands must be flaged as
2535 * single-command requests.
2536 */
2537 bt_cb(skb)->req.start = true;
2538
Linus Torvalds1da177e2005-04-16 15:20:36 -07002539 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002540 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002541
2542 return 0;
2543}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002544
Johan Hedberg71c76a12013-03-05 20:37:46 +02002545/* Queue a command to an asynchronous HCI request */
Andre Guedese348fe62013-03-08 11:20:17 -03002546void hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
Johan Hedberg71c76a12013-03-05 20:37:46 +02002547{
2548 struct hci_dev *hdev = req->hdev;
2549 struct sk_buff *skb;
2550
2551 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2552
Andre Guedes34739c12013-03-08 11:20:18 -03002553 /* If an error occured during request building, there is no point in
2554 * queueing the HCI command. We can simply return.
2555 */
2556 if (req->err)
2557 return;
2558
Johan Hedberg71c76a12013-03-05 20:37:46 +02002559 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2560 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002561 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2562 hdev->name, opcode);
2563 req->err = -ENOMEM;
Andre Guedese348fe62013-03-08 11:20:17 -03002564 return;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002565 }
2566
2567 if (skb_queue_empty(&req->cmd_q))
2568 bt_cb(skb)->req.start = true;
2569
2570 skb_queue_tail(&req->cmd_q, skb);
Johan Hedberg71c76a12013-03-05 20:37:46 +02002571}
2572
Linus Torvalds1da177e2005-04-16 15:20:36 -07002573/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002574void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002575{
2576 struct hci_command_hdr *hdr;
2577
2578 if (!hdev->sent_cmd)
2579 return NULL;
2580
2581 hdr = (void *) hdev->sent_cmd->data;
2582
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002583 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584 return NULL;
2585
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002586 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587
2588 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2589}
2590
2591/* Send ACL data */
2592static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2593{
2594 struct hci_acl_hdr *hdr;
2595 int len = skb->len;
2596
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002597 skb_push(skb, HCI_ACL_HDR_SIZE);
2598 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002599 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002600 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2601 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602}
2603
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002604static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002605 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002606{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002607 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002608 struct hci_dev *hdev = conn->hdev;
2609 struct sk_buff *list;
2610
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002611 skb->len = skb_headlen(skb);
2612 skb->data_len = 0;
2613
2614 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002615
2616 switch (hdev->dev_type) {
2617 case HCI_BREDR:
2618 hci_add_acl_hdr(skb, conn->handle, flags);
2619 break;
2620 case HCI_AMP:
2621 hci_add_acl_hdr(skb, chan->handle, flags);
2622 break;
2623 default:
2624 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2625 return;
2626 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002627
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002628 list = skb_shinfo(skb)->frag_list;
2629 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002630 /* Non fragmented */
2631 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2632
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002633 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002634 } else {
2635 /* Fragmented */
2636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2637
2638 skb_shinfo(skb)->frag_list = NULL;
2639
2640 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002641 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002643 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002644
2645 flags &= ~ACL_START;
2646 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002647 do {
2648 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002649
Linus Torvalds1da177e2005-04-16 15:20:36 -07002650 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002651 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002652 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002653
2654 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2655
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002656 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657 } while (list);
2658
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002659 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002661}
2662
2663void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2664{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002665 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002666
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002667 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002668
2669 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002670
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002671 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002673 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002674}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002675
2676/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002677void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678{
2679 struct hci_dev *hdev = conn->hdev;
2680 struct hci_sco_hdr hdr;
2681
2682 BT_DBG("%s len %d", hdev->name, skb->len);
2683
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002684 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685 hdr.dlen = skb->len;
2686
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002687 skb_push(skb, HCI_SCO_HDR_SIZE);
2688 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002689 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690
2691 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002692 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002693
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002695 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002696}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697
2698/* ---- HCI TX task (outgoing data) ---- */
2699
2700/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002701static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2702 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703{
2704 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002705 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002706 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002708 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002709 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002710
2711 rcu_read_lock();
2712
2713 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002714 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002716
2717 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2718 continue;
2719
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720 num++;
2721
2722 if (c->sent < min) {
2723 min = c->sent;
2724 conn = c;
2725 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002726
2727 if (hci_conn_num(hdev, type) == num)
2728 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002729 }
2730
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002731 rcu_read_unlock();
2732
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002734 int cnt, q;
2735
2736 switch (conn->type) {
2737 case ACL_LINK:
2738 cnt = hdev->acl_cnt;
2739 break;
2740 case SCO_LINK:
2741 case ESCO_LINK:
2742 cnt = hdev->sco_cnt;
2743 break;
2744 case LE_LINK:
2745 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2746 break;
2747 default:
2748 cnt = 0;
2749 BT_ERR("Unknown link type");
2750 }
2751
2752 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002753 *quote = q ? q : 1;
2754 } else
2755 *quote = 0;
2756
2757 BT_DBG("conn %p quote %d", conn, *quote);
2758 return conn;
2759}
2760
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002761static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762{
2763 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002764 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002765
Ville Tervobae1f5d92011-02-10 22:38:53 -03002766 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002767
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002768 rcu_read_lock();
2769
Linus Torvalds1da177e2005-04-16 15:20:36 -07002770 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002771 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002772 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002773 BT_ERR("%s killing stalled connection %pMR",
2774 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002775 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002776 }
2777 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002778
2779 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002780}
2781
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002782static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2783 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002784{
2785 struct hci_conn_hash *h = &hdev->conn_hash;
2786 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002787 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002788 struct hci_conn *conn;
2789 int cnt, q, conn_num = 0;
2790
2791 BT_DBG("%s", hdev->name);
2792
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002793 rcu_read_lock();
2794
2795 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002796 struct hci_chan *tmp;
2797
2798 if (conn->type != type)
2799 continue;
2800
2801 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2802 continue;
2803
2804 conn_num++;
2805
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002806 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002807 struct sk_buff *skb;
2808
2809 if (skb_queue_empty(&tmp->data_q))
2810 continue;
2811
2812 skb = skb_peek(&tmp->data_q);
2813 if (skb->priority < cur_prio)
2814 continue;
2815
2816 if (skb->priority > cur_prio) {
2817 num = 0;
2818 min = ~0;
2819 cur_prio = skb->priority;
2820 }
2821
2822 num++;
2823
2824 if (conn->sent < min) {
2825 min = conn->sent;
2826 chan = tmp;
2827 }
2828 }
2829
2830 if (hci_conn_num(hdev, type) == conn_num)
2831 break;
2832 }
2833
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002834 rcu_read_unlock();
2835
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002836 if (!chan)
2837 return NULL;
2838
2839 switch (chan->conn->type) {
2840 case ACL_LINK:
2841 cnt = hdev->acl_cnt;
2842 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002843 case AMP_LINK:
2844 cnt = hdev->block_cnt;
2845 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002846 case SCO_LINK:
2847 case ESCO_LINK:
2848 cnt = hdev->sco_cnt;
2849 break;
2850 case LE_LINK:
2851 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2852 break;
2853 default:
2854 cnt = 0;
2855 BT_ERR("Unknown link type");
2856 }
2857
2858 q = cnt / num;
2859 *quote = q ? q : 1;
2860 BT_DBG("chan %p quote %d", chan, *quote);
2861 return chan;
2862}
2863
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002864static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2865{
2866 struct hci_conn_hash *h = &hdev->conn_hash;
2867 struct hci_conn *conn;
2868 int num = 0;
2869
2870 BT_DBG("%s", hdev->name);
2871
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002872 rcu_read_lock();
2873
2874 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002875 struct hci_chan *chan;
2876
2877 if (conn->type != type)
2878 continue;
2879
2880 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2881 continue;
2882
2883 num++;
2884
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002885 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002886 struct sk_buff *skb;
2887
2888 if (chan->sent) {
2889 chan->sent = 0;
2890 continue;
2891 }
2892
2893 if (skb_queue_empty(&chan->data_q))
2894 continue;
2895
2896 skb = skb_peek(&chan->data_q);
2897 if (skb->priority >= HCI_PRIO_MAX - 1)
2898 continue;
2899
2900 skb->priority = HCI_PRIO_MAX - 1;
2901
2902 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002903 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002904 }
2905
2906 if (hci_conn_num(hdev, type) == num)
2907 break;
2908 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002909
2910 rcu_read_unlock();
2911
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002912}
2913
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002914static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2915{
2916 /* Calculate count of blocks used by this packet */
2917 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2918}
2919
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002920static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002922 if (!test_bit(HCI_RAW, &hdev->flags)) {
2923 /* ACL tx timeout must be longer than maximum
2924 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002925 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002926 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002927 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002929}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002930
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002931static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002932{
2933 unsigned int cnt = hdev->acl_cnt;
2934 struct hci_chan *chan;
2935 struct sk_buff *skb;
2936 int quote;
2937
2938 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002939
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002940 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002941 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002942 u32 priority = (skb_peek(&chan->data_q))->priority;
2943 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002944 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002945 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002946
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002947 /* Stop if priority has changed */
2948 if (skb->priority < priority)
2949 break;
2950
2951 skb = skb_dequeue(&chan->data_q);
2952
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002953 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002954 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002955
Linus Torvalds1da177e2005-04-16 15:20:36 -07002956 hci_send_frame(skb);
2957 hdev->acl_last_tx = jiffies;
2958
2959 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002960 chan->sent++;
2961 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 }
2963 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002964
2965 if (cnt != hdev->acl_cnt)
2966 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002967}
2968
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002969static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002970{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002971 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002972 struct hci_chan *chan;
2973 struct sk_buff *skb;
2974 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002975 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002976
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002977 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002978
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002979 BT_DBG("%s", hdev->name);
2980
2981 if (hdev->dev_type == HCI_AMP)
2982 type = AMP_LINK;
2983 else
2984 type = ACL_LINK;
2985
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002986 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002987 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002988 u32 priority = (skb_peek(&chan->data_q))->priority;
2989 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2990 int blocks;
2991
2992 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002993 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002994
2995 /* Stop if priority has changed */
2996 if (skb->priority < priority)
2997 break;
2998
2999 skb = skb_dequeue(&chan->data_q);
3000
3001 blocks = __get_blocks(hdev, skb);
3002 if (blocks > hdev->block_cnt)
3003 return;
3004
3005 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003006 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003007
3008 hci_send_frame(skb);
3009 hdev->acl_last_tx = jiffies;
3010
3011 hdev->block_cnt -= blocks;
3012 quote -= blocks;
3013
3014 chan->sent += blocks;
3015 chan->conn->sent += blocks;
3016 }
3017 }
3018
3019 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003020 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003021}
3022
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003023static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003024{
3025 BT_DBG("%s", hdev->name);
3026
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003027 /* No ACL link over BR/EDR controller */
3028 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3029 return;
3030
3031 /* No AMP link over AMP controller */
3032 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003033 return;
3034
3035 switch (hdev->flow_ctl_mode) {
3036 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3037 hci_sched_acl_pkt(hdev);
3038 break;
3039
3040 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3041 hci_sched_acl_blk(hdev);
3042 break;
3043 }
3044}
3045
Linus Torvalds1da177e2005-04-16 15:20:36 -07003046/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003047static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003048{
3049 struct hci_conn *conn;
3050 struct sk_buff *skb;
3051 int quote;
3052
3053 BT_DBG("%s", hdev->name);
3054
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003055 if (!hci_conn_num(hdev, SCO_LINK))
3056 return;
3057
Linus Torvalds1da177e2005-04-16 15:20:36 -07003058 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3059 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3060 BT_DBG("skb %p len %d", skb, skb->len);
3061 hci_send_frame(skb);
3062
3063 conn->sent++;
3064 if (conn->sent == ~0)
3065 conn->sent = 0;
3066 }
3067 }
3068}
3069
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003070static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003071{
3072 struct hci_conn *conn;
3073 struct sk_buff *skb;
3074 int quote;
3075
3076 BT_DBG("%s", hdev->name);
3077
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003078 if (!hci_conn_num(hdev, ESCO_LINK))
3079 return;
3080
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003081 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3082 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003083 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3084 BT_DBG("skb %p len %d", skb, skb->len);
3085 hci_send_frame(skb);
3086
3087 conn->sent++;
3088 if (conn->sent == ~0)
3089 conn->sent = 0;
3090 }
3091 }
3092}
3093
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003094static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003095{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003096 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003097 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003098 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003099
3100 BT_DBG("%s", hdev->name);
3101
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003102 if (!hci_conn_num(hdev, LE_LINK))
3103 return;
3104
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003105 if (!test_bit(HCI_RAW, &hdev->flags)) {
3106 /* LE tx timeout must be longer than maximum
3107 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003108 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003109 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003110 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003111 }
3112
3113 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003114 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003115 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003116 u32 priority = (skb_peek(&chan->data_q))->priority;
3117 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003118 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003119 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003120
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003121 /* Stop if priority has changed */
3122 if (skb->priority < priority)
3123 break;
3124
3125 skb = skb_dequeue(&chan->data_q);
3126
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003127 hci_send_frame(skb);
3128 hdev->le_last_tx = jiffies;
3129
3130 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003131 chan->sent++;
3132 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003133 }
3134 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003135
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003136 if (hdev->le_pkts)
3137 hdev->le_cnt = cnt;
3138 else
3139 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003140
3141 if (cnt != tmp)
3142 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003143}
3144
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003145static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003147 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148 struct sk_buff *skb;
3149
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003150 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003151 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152
3153 /* Schedule queues and send stuff to HCI driver */
3154
3155 hci_sched_acl(hdev);
3156
3157 hci_sched_sco(hdev);
3158
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003159 hci_sched_esco(hdev);
3160
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003161 hci_sched_le(hdev);
3162
Linus Torvalds1da177e2005-04-16 15:20:36 -07003163 /* Send next queued raw (unknown type) packet */
3164 while ((skb = skb_dequeue(&hdev->raw_q)))
3165 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003166}
3167
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003168/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003169
3170/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003171static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003172{
3173 struct hci_acl_hdr *hdr = (void *) skb->data;
3174 struct hci_conn *conn;
3175 __u16 handle, flags;
3176
3177 skb_pull(skb, HCI_ACL_HDR_SIZE);
3178
3179 handle = __le16_to_cpu(hdr->handle);
3180 flags = hci_flags(handle);
3181 handle = hci_handle(handle);
3182
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003183 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003184 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003185
3186 hdev->stat.acl_rx++;
3187
3188 hci_dev_lock(hdev);
3189 conn = hci_conn_hash_lookup_handle(hdev, handle);
3190 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003191
Linus Torvalds1da177e2005-04-16 15:20:36 -07003192 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003193 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003194
Linus Torvalds1da177e2005-04-16 15:20:36 -07003195 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003196 l2cap_recv_acldata(conn, skb, flags);
3197 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003198 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003199 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003200 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003201 }
3202
3203 kfree_skb(skb);
3204}
3205
3206/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003207static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208{
3209 struct hci_sco_hdr *hdr = (void *) skb->data;
3210 struct hci_conn *conn;
3211 __u16 handle;
3212
3213 skb_pull(skb, HCI_SCO_HDR_SIZE);
3214
3215 handle = __le16_to_cpu(hdr->handle);
3216
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003217 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003218
3219 hdev->stat.sco_rx++;
3220
3221 hci_dev_lock(hdev);
3222 conn = hci_conn_hash_lookup_handle(hdev, handle);
3223 hci_dev_unlock(hdev);
3224
3225 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003226 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003227 sco_recv_scodata(conn, skb);
3228 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003229 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003230 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003231 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003232 }
3233
3234 kfree_skb(skb);
3235}
3236
Johan Hedberg9238f362013-03-05 20:37:48 +02003237static bool hci_req_is_complete(struct hci_dev *hdev)
3238{
3239 struct sk_buff *skb;
3240
3241 skb = skb_peek(&hdev->cmd_q);
3242 if (!skb)
3243 return true;
3244
3245 return bt_cb(skb)->req.start;
3246}
3247
Johan Hedberg42c6b122013-03-05 20:37:49 +02003248static void hci_resend_last(struct hci_dev *hdev)
3249{
3250 struct hci_command_hdr *sent;
3251 struct sk_buff *skb;
3252 u16 opcode;
3253
3254 if (!hdev->sent_cmd)
3255 return;
3256
3257 sent = (void *) hdev->sent_cmd->data;
3258 opcode = __le16_to_cpu(sent->opcode);
3259 if (opcode == HCI_OP_RESET)
3260 return;
3261
3262 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3263 if (!skb)
3264 return;
3265
3266 skb_queue_head(&hdev->cmd_q, skb);
3267 queue_work(hdev->workqueue, &hdev->cmd_work);
3268}
3269
Johan Hedberg9238f362013-03-05 20:37:48 +02003270void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3271{
3272 hci_req_complete_t req_complete = NULL;
3273 struct sk_buff *skb;
3274 unsigned long flags;
3275
3276 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3277
Johan Hedberg42c6b122013-03-05 20:37:49 +02003278 /* If the completed command doesn't match the last one that was
3279 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003280 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003281 if (!hci_sent_cmd_data(hdev, opcode)) {
3282 /* Some CSR based controllers generate a spontaneous
3283 * reset complete event during init and any pending
3284 * command will never be completed. In such a case we
3285 * need to resend whatever was the last sent
3286 * command.
3287 */
3288 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3289 hci_resend_last(hdev);
3290
Johan Hedberg9238f362013-03-05 20:37:48 +02003291 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003292 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003293
3294 /* If the command succeeded and there's still more commands in
3295 * this request the request is not yet complete.
3296 */
3297 if (!status && !hci_req_is_complete(hdev))
3298 return;
3299
3300 /* If this was the last command in a request the complete
3301 * callback would be found in hdev->sent_cmd instead of the
3302 * command queue (hdev->cmd_q).
3303 */
3304 if (hdev->sent_cmd) {
3305 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3306 if (req_complete)
3307 goto call_complete;
3308 }
3309
3310 /* Remove all pending commands belonging to this request */
3311 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3312 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3313 if (bt_cb(skb)->req.start) {
3314 __skb_queue_head(&hdev->cmd_q, skb);
3315 break;
3316 }
3317
3318 req_complete = bt_cb(skb)->req.complete;
3319 kfree_skb(skb);
3320 }
3321 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3322
3323call_complete:
3324 if (req_complete)
3325 req_complete(hdev, status);
3326}
3327
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003328static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003329{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003330 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003331 struct sk_buff *skb;
3332
3333 BT_DBG("%s", hdev->name);
3334
Linus Torvalds1da177e2005-04-16 15:20:36 -07003335 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003336 /* Send copy to monitor */
3337 hci_send_to_monitor(hdev, skb);
3338
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 if (atomic_read(&hdev->promisc)) {
3340 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003341 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003342 }
3343
3344 if (test_bit(HCI_RAW, &hdev->flags)) {
3345 kfree_skb(skb);
3346 continue;
3347 }
3348
3349 if (test_bit(HCI_INIT, &hdev->flags)) {
3350 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003351 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003352 case HCI_ACLDATA_PKT:
3353 case HCI_SCODATA_PKT:
3354 kfree_skb(skb);
3355 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003357 }
3358
3359 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003360 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003361 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003362 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003363 hci_event_packet(hdev, skb);
3364 break;
3365
3366 case HCI_ACLDATA_PKT:
3367 BT_DBG("%s ACL data packet", hdev->name);
3368 hci_acldata_packet(hdev, skb);
3369 break;
3370
3371 case HCI_SCODATA_PKT:
3372 BT_DBG("%s SCO data packet", hdev->name);
3373 hci_scodata_packet(hdev, skb);
3374 break;
3375
3376 default:
3377 kfree_skb(skb);
3378 break;
3379 }
3380 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003381}
3382
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003383static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003384{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003385 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003386 struct sk_buff *skb;
3387
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003388 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3389 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003390
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003392 if (atomic_read(&hdev->cmd_cnt)) {
3393 skb = skb_dequeue(&hdev->cmd_q);
3394 if (!skb)
3395 return;
3396
Wei Yongjun7585b972009-02-25 18:29:52 +08003397 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003399 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3400 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003401 atomic_dec(&hdev->cmd_cnt);
3402 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003403 if (test_bit(HCI_RESET, &hdev->flags))
3404 del_timer(&hdev->cmd_timer);
3405 else
3406 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003407 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003408 } else {
3409 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003410 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003411 }
3412 }
3413}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003414
3415int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3416{
3417 /* General inquiry access code (GIAC) */
3418 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3419 struct hci_cp_inquiry cp;
3420
3421 BT_DBG("%s", hdev->name);
3422
3423 if (test_bit(HCI_INQUIRY, &hdev->flags))
3424 return -EINPROGRESS;
3425
Johan Hedberg46632622012-01-02 16:06:08 +02003426 inquiry_cache_flush(hdev);
3427
Andre Guedes2519a1f2011-11-07 11:45:24 -03003428 memset(&cp, 0, sizeof(cp));
3429 memcpy(&cp.lap, lap, sizeof(cp.lap));
3430 cp.length = length;
3431
3432 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3433}
Andre Guedes023d50492011-11-04 14:16:52 -03003434
3435int hci_cancel_inquiry(struct hci_dev *hdev)
3436{
3437 BT_DBG("%s", hdev->name);
3438
3439 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003440 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003441
3442 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3443}
Andre Guedes31f79562012-04-24 21:02:53 -03003444
3445u8 bdaddr_to_le(u8 bdaddr_type)
3446{
3447 switch (bdaddr_type) {
3448 case BDADDR_LE_PUBLIC:
3449 return ADDR_LE_DEV_PUBLIC;
3450
3451 default:
3452 /* Fallback to LE Random address type */
3453 return ADDR_LE_DEV_RANDOM;
3454 }
3455}