blob: b432baafdf12f6c1363663dde4e2a7f5a35c6d8f [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02004 Copyright (C) 2011 ProFUSION Embedded Systems
Linus Torvalds1da177e2005-04-16 15:20:36 -07005
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
11
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090016 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090021 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070023 SOFTWARE IS DISCLAIMED.
24*/
25
26/* Bluetooth HCI core. */
27
Gustavo Padovan8c520a52012-05-23 04:04:22 -030028#include <linux/export.h>
Sasha Levin3df92b32012-05-27 22:36:56 +020029#include <linux/idr.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Marcel Holtmann611b30f2009-06-08 14:41:38 +020031#include <linux/rfkill.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032
33#include <net/bluetooth/bluetooth.h>
34#include <net/bluetooth/hci_core.h>
35
Marcel Holtmannb78752c2010-08-08 23:06:53 -040036static void hci_rx_work(struct work_struct *work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -020037static void hci_cmd_work(struct work_struct *work);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -020038static void hci_tx_work(struct work_struct *work);
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040/* HCI device list */
41LIST_HEAD(hci_dev_list);
42DEFINE_RWLOCK(hci_dev_list_lock);
43
44/* HCI callback list */
45LIST_HEAD(hci_cb_list);
46DEFINE_RWLOCK(hci_cb_list_lock);
47
Sasha Levin3df92b32012-05-27 22:36:56 +020048/* HCI ID Numbering */
49static DEFINE_IDA(hci_index_ida);
50
Linus Torvalds1da177e2005-04-16 15:20:36 -070051/* ---- HCI notifications ---- */
52
Marcel Holtmann65164552005-10-28 19:20:48 +020053static void hci_notify(struct hci_dev *hdev, int event)
Linus Torvalds1da177e2005-04-16 15:20:36 -070054{
Marcel Holtmann040030e2012-02-20 14:50:37 +010055 hci_sock_dev_event(hdev, event);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056}
57
58/* ---- HCI requests ---- */
59
Johan Hedberg42c6b122013-03-05 20:37:49 +020060static void hci_req_sync_complete(struct hci_dev *hdev, u8 result)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
Johan Hedberg42c6b122013-03-05 20:37:49 +020062 BT_DBG("%s result 0x%2.2x", hdev->name, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64 if (hdev->req_status == HCI_REQ_PEND) {
65 hdev->req_result = result;
66 hdev->req_status = HCI_REQ_DONE;
67 wake_up_interruptible(&hdev->req_wait_q);
68 }
69}
70
71static void hci_req_cancel(struct hci_dev *hdev, int err)
72{
73 BT_DBG("%s err 0x%2.2x", hdev->name, err);
74
75 if (hdev->req_status == HCI_REQ_PEND) {
76 hdev->req_result = err;
77 hdev->req_status = HCI_REQ_CANCELED;
78 wake_up_interruptible(&hdev->req_wait_q);
79 }
80}
81
82/* Execute request and wait for completion. */
Johan Hedberg01178cd2013-03-05 20:37:41 +020083static int __hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +020084 void (*func)(struct hci_request *req,
85 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +020086 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -070087{
Johan Hedberg42c6b122013-03-05 20:37:49 +020088 struct hci_request req;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 DECLARE_WAITQUEUE(wait, current);
90 int err = 0;
91
92 BT_DBG("%s start", hdev->name);
93
Johan Hedberg42c6b122013-03-05 20:37:49 +020094 hci_req_init(&req, hdev);
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 hdev->req_status = HCI_REQ_PEND;
97
Johan Hedberg42c6b122013-03-05 20:37:49 +020098 func(&req, opt);
Johan Hedberg53cce222013-03-05 20:37:42 +020099
Johan Hedberg42c6b122013-03-05 20:37:49 +0200100 err = hci_req_run(&req, hci_req_sync_complete);
101 if (err < 0) {
Johan Hedberg53cce222013-03-05 20:37:42 +0200102 hdev->req_status = 0;
Andre Guedes920c8302013-03-08 11:20:15 -0300103
104 /* ENODATA means the HCI request command queue is empty.
105 * This can happen when a request with conditionals doesn't
106 * trigger any commands to be sent. This is normal behavior
107 * and should not trigger an error return.
Johan Hedberg42c6b122013-03-05 20:37:49 +0200108 */
Andre Guedes920c8302013-03-08 11:20:15 -0300109 if (err == -ENODATA)
110 return 0;
111
112 return err;
Johan Hedberg53cce222013-03-05 20:37:42 +0200113 }
114
Andre Guedesbc4445c2013-03-08 11:20:13 -0300115 add_wait_queue(&hdev->req_wait_q, &wait);
116 set_current_state(TASK_INTERRUPTIBLE);
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118 schedule_timeout(timeout);
119
120 remove_wait_queue(&hdev->req_wait_q, &wait);
121
122 if (signal_pending(current))
123 return -EINTR;
124
125 switch (hdev->req_status) {
126 case HCI_REQ_DONE:
Joe Perchese1750722011-06-29 18:18:29 -0700127 err = -bt_to_errno(hdev->req_result);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 break;
129
130 case HCI_REQ_CANCELED:
131 err = -hdev->req_result;
132 break;
133
134 default:
135 err = -ETIMEDOUT;
136 break;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -0700137 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138
Johan Hedberga5040ef2011-01-10 13:28:59 +0200139 hdev->req_status = hdev->req_result = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 BT_DBG("%s end: err %d", hdev->name, err);
142
143 return err;
144}
145
Johan Hedberg01178cd2013-03-05 20:37:41 +0200146static int hci_req_sync(struct hci_dev *hdev,
Johan Hedberg42c6b122013-03-05 20:37:49 +0200147 void (*req)(struct hci_request *req,
148 unsigned long opt),
Johan Hedberg01178cd2013-03-05 20:37:41 +0200149 unsigned long opt, __u32 timeout)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150{
151 int ret;
152
Marcel Holtmann7c6a3292008-09-12 03:11:54 +0200153 if (!test_bit(HCI_UP, &hdev->flags))
154 return -ENETDOWN;
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 /* Serialize all requests */
157 hci_req_lock(hdev);
Johan Hedberg01178cd2013-03-05 20:37:41 +0200158 ret = __hci_req_sync(hdev, req, opt, timeout);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 hci_req_unlock(hdev);
160
161 return ret;
162}
163
Johan Hedberg42c6b122013-03-05 20:37:49 +0200164static void hci_reset_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200166 BT_DBG("%s %ld", req->hdev->name, opt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168 /* Reset device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200169 set_bit(HCI_RESET, &req->hdev->flags);
170 hci_req_add(req, HCI_OP_RESET, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171}
172
Johan Hedberg42c6b122013-03-05 20:37:49 +0200173static void bredr_init(struct hci_request *req)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200175 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200176
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 /* Read Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200178 hci_req_add(req, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179
Marcel Holtmann1143e5a2006-09-23 09:57:20 +0200180 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200181 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200182
183 /* Read BD Address */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200184 hci_req_add(req, HCI_OP_READ_BD_ADDR, 0, NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185}
186
Johan Hedberg42c6b122013-03-05 20:37:49 +0200187static void amp_init(struct hci_request *req)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200188{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200189 req->hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
Andrei Emeltchenko2455a3e2011-12-19 16:31:28 +0200190
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200191 /* Read Local Version */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200192 hci_req_add(req, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
Andrei Emeltchenko6bcbc482012-03-28 16:31:24 +0300193
194 /* Read Local AMP Info */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200195 hci_req_add(req, HCI_OP_READ_LOCAL_AMP_INFO, 0, NULL);
Andrei Emeltchenkoe71dfab2012-09-06 15:05:46 +0300196
197 /* Read Data Blk size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200198 hci_req_add(req, HCI_OP_READ_DATA_BLOCK_SIZE, 0, NULL);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200199}
200
Johan Hedberg42c6b122013-03-05 20:37:49 +0200201static void hci_init1_req(struct hci_request *req, unsigned long opt)
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200202{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200203 struct hci_dev *hdev = req->hdev;
204 struct hci_request init_req;
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200205 struct sk_buff *skb;
206
207 BT_DBG("%s %ld", hdev->name, opt);
208
209 /* Driver initialization */
210
Johan Hedberg42c6b122013-03-05 20:37:49 +0200211 hci_req_init(&init_req, hdev);
212
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200213 /* Special commands */
214 while ((skb = skb_dequeue(&hdev->driver_init))) {
215 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
216 skb->dev = (void *) hdev;
217
Johan Hedberg42c6b122013-03-05 20:37:49 +0200218 if (skb_queue_empty(&init_req.cmd_q))
219 bt_cb(skb)->req.start = true;
220
221 skb_queue_tail(&init_req.cmd_q, skb);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200222 }
223 skb_queue_purge(&hdev->driver_init);
224
Johan Hedberg42c6b122013-03-05 20:37:49 +0200225 hci_req_run(&init_req, NULL);
226
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300227 /* Reset */
228 if (!test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200229 hci_reset_req(req, 0);
Andrei Emeltchenko11778712012-06-11 11:13:10 +0300230
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200231 switch (hdev->dev_type) {
232 case HCI_BREDR:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200233 bredr_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200234 break;
235
236 case HCI_AMP:
Johan Hedberg42c6b122013-03-05 20:37:49 +0200237 amp_init(req);
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200238 break;
239
240 default:
241 BT_ERR("Unknown device type %d", hdev->dev_type);
242 break;
243 }
Andrei Emeltchenkoe61ef4992011-12-19 16:31:27 +0200244}
245
Johan Hedberg42c6b122013-03-05 20:37:49 +0200246static void bredr_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200247{
248 struct hci_cp_delete_stored_link_key cp;
249 __le16 param;
250 __u8 flt_type;
251
252 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200253 hci_req_add(req, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200254
255 /* Read Class of Device */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200256 hci_req_add(req, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200257
258 /* Read Local Name */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200259 hci_req_add(req, HCI_OP_READ_LOCAL_NAME, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200260
261 /* Read Voice Setting */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200262 hci_req_add(req, HCI_OP_READ_VOICE_SETTING, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200263
264 /* Clear Event Filters */
265 flt_type = HCI_FLT_CLEAR_ALL;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200266 hci_req_add(req, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200267
268 /* Connection accept timeout ~20 secs */
269 param = __constant_cpu_to_le16(0x7d00);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200270 hci_req_add(req, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200271
272 bacpy(&cp.bdaddr, BDADDR_ANY);
273 cp.delete_all = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200274 hci_req_add(req, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200275}
276
Johan Hedberg42c6b122013-03-05 20:37:49 +0200277static void le_setup(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200278{
279 /* Read LE Buffer Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200280 hci_req_add(req, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200281
282 /* Read LE Local Supported Features */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200283 hci_req_add(req, HCI_OP_LE_READ_LOCAL_FEATURES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200284
285 /* Read LE Advertising Channel TX Power */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200286 hci_req_add(req, HCI_OP_LE_READ_ADV_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200287
288 /* Read LE White List Size */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200289 hci_req_add(req, HCI_OP_LE_READ_WHITE_LIST_SIZE, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200290
291 /* Read LE Supported States */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200292 hci_req_add(req, HCI_OP_LE_READ_SUPPORTED_STATES, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200293}
294
295static u8 hci_get_inquiry_mode(struct hci_dev *hdev)
296{
297 if (lmp_ext_inq_capable(hdev))
298 return 0x02;
299
300 if (lmp_inq_rssi_capable(hdev))
301 return 0x01;
302
303 if (hdev->manufacturer == 11 && hdev->hci_rev == 0x00 &&
304 hdev->lmp_subver == 0x0757)
305 return 0x01;
306
307 if (hdev->manufacturer == 15) {
308 if (hdev->hci_rev == 0x03 && hdev->lmp_subver == 0x6963)
309 return 0x01;
310 if (hdev->hci_rev == 0x09 && hdev->lmp_subver == 0x6963)
311 return 0x01;
312 if (hdev->hci_rev == 0x00 && hdev->lmp_subver == 0x6965)
313 return 0x01;
314 }
315
316 if (hdev->manufacturer == 31 && hdev->hci_rev == 0x2005 &&
317 hdev->lmp_subver == 0x1805)
318 return 0x01;
319
320 return 0x00;
321}
322
Johan Hedberg42c6b122013-03-05 20:37:49 +0200323static void hci_setup_inquiry_mode(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200324{
325 u8 mode;
326
Johan Hedberg42c6b122013-03-05 20:37:49 +0200327 mode = hci_get_inquiry_mode(req->hdev);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200328
Johan Hedberg42c6b122013-03-05 20:37:49 +0200329 hci_req_add(req, HCI_OP_WRITE_INQUIRY_MODE, 1, &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200330}
331
Johan Hedberg42c6b122013-03-05 20:37:49 +0200332static void hci_setup_event_mask(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200333{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200334 struct hci_dev *hdev = req->hdev;
335
Johan Hedberg2177bab2013-03-05 20:37:43 +0200336 /* The second byte is 0xff instead of 0x9f (two reserved bits
337 * disabled) since a Broadcom 1.2 dongle doesn't respond to the
338 * command otherwise.
339 */
340 u8 events[8] = { 0xff, 0xff, 0xfb, 0xff, 0x00, 0x00, 0x00, 0x00 };
341
342 /* CSR 1.1 dongles does not accept any bitfield so don't try to set
343 * any event mask for pre 1.2 devices.
344 */
345 if (hdev->hci_ver < BLUETOOTH_VER_1_2)
346 return;
347
348 if (lmp_bredr_capable(hdev)) {
349 events[4] |= 0x01; /* Flow Specification Complete */
350 events[4] |= 0x02; /* Inquiry Result with RSSI */
351 events[4] |= 0x04; /* Read Remote Extended Features Complete */
352 events[5] |= 0x08; /* Synchronous Connection Complete */
353 events[5] |= 0x10; /* Synchronous Connection Changed */
354 }
355
356 if (lmp_inq_rssi_capable(hdev))
357 events[4] |= 0x02; /* Inquiry Result with RSSI */
358
359 if (lmp_sniffsubr_capable(hdev))
360 events[5] |= 0x20; /* Sniff Subrating */
361
362 if (lmp_pause_enc_capable(hdev))
363 events[5] |= 0x80; /* Encryption Key Refresh Complete */
364
365 if (lmp_ext_inq_capable(hdev))
366 events[5] |= 0x40; /* Extended Inquiry Result */
367
368 if (lmp_no_flush_capable(hdev))
369 events[7] |= 0x01; /* Enhanced Flush Complete */
370
371 if (lmp_lsto_capable(hdev))
372 events[6] |= 0x80; /* Link Supervision Timeout Changed */
373
374 if (lmp_ssp_capable(hdev)) {
375 events[6] |= 0x01; /* IO Capability Request */
376 events[6] |= 0x02; /* IO Capability Response */
377 events[6] |= 0x04; /* User Confirmation Request */
378 events[6] |= 0x08; /* User Passkey Request */
379 events[6] |= 0x10; /* Remote OOB Data Request */
380 events[6] |= 0x20; /* Simple Pairing Complete */
381 events[7] |= 0x04; /* User Passkey Notification */
382 events[7] |= 0x08; /* Keypress Notification */
383 events[7] |= 0x10; /* Remote Host Supported
384 * Features Notification
385 */
386 }
387
388 if (lmp_le_capable(hdev))
389 events[7] |= 0x20; /* LE Meta-Event */
390
Johan Hedberg42c6b122013-03-05 20:37:49 +0200391 hci_req_add(req, HCI_OP_SET_EVENT_MASK, sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200392
393 if (lmp_le_capable(hdev)) {
394 memset(events, 0, sizeof(events));
395 events[0] = 0x1f;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200396 hci_req_add(req, HCI_OP_LE_SET_EVENT_MASK,
397 sizeof(events), events);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200398 }
399}
400
Johan Hedberg42c6b122013-03-05 20:37:49 +0200401static void hci_init2_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200402{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200403 struct hci_dev *hdev = req->hdev;
404
Johan Hedberg2177bab2013-03-05 20:37:43 +0200405 if (lmp_bredr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200406 bredr_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200407
408 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200409 le_setup(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200410
Johan Hedberg42c6b122013-03-05 20:37:49 +0200411 hci_setup_event_mask(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200412
413 if (hdev->hci_ver > BLUETOOTH_VER_1_1)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200414 hci_req_add(req, HCI_OP_READ_LOCAL_COMMANDS, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200415
416 if (lmp_ssp_capable(hdev)) {
417 if (test_bit(HCI_SSP_ENABLED, &hdev->dev_flags)) {
418 u8 mode = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200419 hci_req_add(req, HCI_OP_WRITE_SSP_MODE,
420 sizeof(mode), &mode);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200421 } else {
422 struct hci_cp_write_eir cp;
423
424 memset(hdev->eir, 0, sizeof(hdev->eir));
425 memset(&cp, 0, sizeof(cp));
426
Johan Hedberg42c6b122013-03-05 20:37:49 +0200427 hci_req_add(req, HCI_OP_WRITE_EIR, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200428 }
429 }
430
431 if (lmp_inq_rssi_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200432 hci_setup_inquiry_mode(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200433
434 if (lmp_inq_tx_pwr_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200435 hci_req_add(req, HCI_OP_READ_INQ_RSP_TX_POWER, 0, NULL);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200436
437 if (lmp_ext_feat_capable(hdev)) {
438 struct hci_cp_read_local_ext_features cp;
439
440 cp.page = 0x01;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200441 hci_req_add(req, HCI_OP_READ_LOCAL_EXT_FEATURES,
442 sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200443 }
444
445 if (test_bit(HCI_LINK_SECURITY, &hdev->dev_flags)) {
446 u8 enable = 1;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200447 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, sizeof(enable),
448 &enable);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200449 }
450}
451
Johan Hedberg42c6b122013-03-05 20:37:49 +0200452static void hci_setup_link_policy(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200453{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200454 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200455 struct hci_cp_write_def_link_policy cp;
456 u16 link_policy = 0;
457
458 if (lmp_rswitch_capable(hdev))
459 link_policy |= HCI_LP_RSWITCH;
460 if (lmp_hold_capable(hdev))
461 link_policy |= HCI_LP_HOLD;
462 if (lmp_sniff_capable(hdev))
463 link_policy |= HCI_LP_SNIFF;
464 if (lmp_park_capable(hdev))
465 link_policy |= HCI_LP_PARK;
466
467 cp.policy = cpu_to_le16(link_policy);
Johan Hedberg42c6b122013-03-05 20:37:49 +0200468 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, sizeof(cp), &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200469}
470
Johan Hedberg42c6b122013-03-05 20:37:49 +0200471static void hci_set_le_support(struct hci_request *req)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200472{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200473 struct hci_dev *hdev = req->hdev;
Johan Hedberg2177bab2013-03-05 20:37:43 +0200474 struct hci_cp_write_le_host_supported cp;
475
476 memset(&cp, 0, sizeof(cp));
477
478 if (test_bit(HCI_LE_ENABLED, &hdev->dev_flags)) {
479 cp.le = 0x01;
480 cp.simul = lmp_le_br_capable(hdev);
481 }
482
483 if (cp.le != lmp_host_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200484 hci_req_add(req, HCI_OP_WRITE_LE_HOST_SUPPORTED, sizeof(cp),
485 &cp);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200486}
487
Johan Hedberg42c6b122013-03-05 20:37:49 +0200488static void hci_init3_req(struct hci_request *req, unsigned long opt)
Johan Hedberg2177bab2013-03-05 20:37:43 +0200489{
Johan Hedberg42c6b122013-03-05 20:37:49 +0200490 struct hci_dev *hdev = req->hdev;
491
Johan Hedberg2177bab2013-03-05 20:37:43 +0200492 if (hdev->commands[5] & 0x10)
Johan Hedberg42c6b122013-03-05 20:37:49 +0200493 hci_setup_link_policy(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200494
495 if (lmp_le_capable(hdev))
Johan Hedberg42c6b122013-03-05 20:37:49 +0200496 hci_set_le_support(req);
Johan Hedberg2177bab2013-03-05 20:37:43 +0200497}
498
499static int __hci_init(struct hci_dev *hdev)
500{
501 int err;
502
503 err = __hci_req_sync(hdev, hci_init1_req, 0, HCI_INIT_TIMEOUT);
504 if (err < 0)
505 return err;
506
507 /* HCI_BREDR covers both single-mode LE, BR/EDR and dual-mode
508 * BR/EDR/LE type controllers. AMP controllers only need the
509 * first stage init.
510 */
511 if (hdev->dev_type != HCI_BREDR)
512 return 0;
513
514 err = __hci_req_sync(hdev, hci_init2_req, 0, HCI_INIT_TIMEOUT);
515 if (err < 0)
516 return err;
517
518 return __hci_req_sync(hdev, hci_init3_req, 0, HCI_INIT_TIMEOUT);
519}
520
Johan Hedberg42c6b122013-03-05 20:37:49 +0200521static void hci_scan_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522{
523 __u8 scan = opt;
524
Johan Hedberg42c6b122013-03-05 20:37:49 +0200525 BT_DBG("%s %x", req->hdev->name, scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526
527 /* Inquiry and Page scans */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200528 hci_req_add(req, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700529}
530
Johan Hedberg42c6b122013-03-05 20:37:49 +0200531static void hci_auth_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532{
533 __u8 auth = opt;
534
Johan Hedberg42c6b122013-03-05 20:37:49 +0200535 BT_DBG("%s %x", req->hdev->name, auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536
537 /* Authentication */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200538 hci_req_add(req, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539}
540
Johan Hedberg42c6b122013-03-05 20:37:49 +0200541static void hci_encrypt_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542{
543 __u8 encrypt = opt;
544
Johan Hedberg42c6b122013-03-05 20:37:49 +0200545 BT_DBG("%s %x", req->hdev->name, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200547 /* Encryption */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200548 hci_req_add(req, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549}
550
Johan Hedberg42c6b122013-03-05 20:37:49 +0200551static void hci_linkpol_req(struct hci_request *req, unsigned long opt)
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200552{
553 __le16 policy = cpu_to_le16(opt);
554
Johan Hedberg42c6b122013-03-05 20:37:49 +0200555 BT_DBG("%s %x", req->hdev->name, policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200556
557 /* Default link policy */
Johan Hedberg42c6b122013-03-05 20:37:49 +0200558 hci_req_add(req, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +0200559}
560
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900561/* Get HCI device by index.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 * Device is held on return. */
563struct hci_dev *hci_dev_get(int index)
564{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200565 struct hci_dev *hdev = NULL, *d;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566
567 BT_DBG("%d", index);
568
569 if (index < 0)
570 return NULL;
571
572 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +0200573 list_for_each_entry(d, &hci_dev_list, list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 if (d->id == index) {
575 hdev = hci_dev_hold(d);
576 break;
577 }
578 }
579 read_unlock(&hci_dev_list_lock);
580 return hdev;
581}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
583/* ---- Inquiry support ---- */
Johan Hedbergff9ef572012-01-04 14:23:45 +0200584
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200585bool hci_discovery_active(struct hci_dev *hdev)
586{
587 struct discovery_state *discov = &hdev->discovery;
588
Andre Guedes6fbe1952012-02-03 17:47:58 -0300589 switch (discov->state) {
Andre Guedes343f9352012-02-17 20:39:37 -0300590 case DISCOVERY_FINDING:
Andre Guedes6fbe1952012-02-03 17:47:58 -0300591 case DISCOVERY_RESOLVING:
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200592 return true;
593
Andre Guedes6fbe1952012-02-03 17:47:58 -0300594 default:
595 return false;
596 }
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200597}
598
Johan Hedbergff9ef572012-01-04 14:23:45 +0200599void hci_discovery_set_state(struct hci_dev *hdev, int state)
600{
601 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
602
603 if (hdev->discovery.state == state)
604 return;
605
606 switch (state) {
607 case DISCOVERY_STOPPED:
Andre Guedes7b99b652012-02-13 15:41:02 -0300608 if (hdev->discovery.state != DISCOVERY_STARTING)
609 mgmt_discovering(hdev, 0);
Johan Hedbergff9ef572012-01-04 14:23:45 +0200610 break;
611 case DISCOVERY_STARTING:
612 break;
Andre Guedes343f9352012-02-17 20:39:37 -0300613 case DISCOVERY_FINDING:
Johan Hedbergff9ef572012-01-04 14:23:45 +0200614 mgmt_discovering(hdev, 1);
615 break;
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200616 case DISCOVERY_RESOLVING:
617 break;
Johan Hedbergff9ef572012-01-04 14:23:45 +0200618 case DISCOVERY_STOPPING:
619 break;
620 }
621
622 hdev->discovery.state = state;
623}
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625static void inquiry_cache_flush(struct hci_dev *hdev)
626{
Johan Hedberg30883512012-01-04 14:16:21 +0200627 struct discovery_state *cache = &hdev->discovery;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200628 struct inquiry_entry *p, *n;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
Johan Hedberg561aafb2012-01-04 13:31:59 +0200630 list_for_each_entry_safe(p, n, &cache->all, all) {
631 list_del(&p->all);
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200632 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 }
Johan Hedberg561aafb2012-01-04 13:31:59 +0200634
635 INIT_LIST_HEAD(&cache->unknown);
636 INIT_LIST_HEAD(&cache->resolve);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700637}
638
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300639struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev,
640 bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641{
Johan Hedberg30883512012-01-04 14:16:21 +0200642 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 struct inquiry_entry *e;
644
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300645 BT_DBG("cache %p, %pMR", cache, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646
Johan Hedberg561aafb2012-01-04 13:31:59 +0200647 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 if (!bacmp(&e->data.bdaddr, bdaddr))
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200649 return e;
650 }
651
652 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700653}
654
Johan Hedberg561aafb2012-01-04 13:31:59 +0200655struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300656 bdaddr_t *bdaddr)
Johan Hedberg561aafb2012-01-04 13:31:59 +0200657{
Johan Hedberg30883512012-01-04 14:16:21 +0200658 struct discovery_state *cache = &hdev->discovery;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200659 struct inquiry_entry *e;
660
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300661 BT_DBG("cache %p, %pMR", cache, bdaddr);
Johan Hedberg561aafb2012-01-04 13:31:59 +0200662
663 list_for_each_entry(e, &cache->unknown, list) {
664 if (!bacmp(&e->data.bdaddr, bdaddr))
665 return e;
666 }
667
668 return NULL;
669}
670
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200671struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300672 bdaddr_t *bdaddr,
673 int state)
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200674{
675 struct discovery_state *cache = &hdev->discovery;
676 struct inquiry_entry *e;
677
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300678 BT_DBG("cache %p bdaddr %pMR state %d", cache, bdaddr, state);
Johan Hedberg30dc78e2012-01-04 15:44:20 +0200679
680 list_for_each_entry(e, &cache->resolve, list) {
681 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
682 return e;
683 if (!bacmp(&e->data.bdaddr, bdaddr))
684 return e;
685 }
686
687 return NULL;
688}
689
Johan Hedberga3d4e202012-01-09 00:53:02 +0200690void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300691 struct inquiry_entry *ie)
Johan Hedberga3d4e202012-01-09 00:53:02 +0200692{
693 struct discovery_state *cache = &hdev->discovery;
694 struct list_head *pos = &cache->resolve;
695 struct inquiry_entry *p;
696
697 list_del(&ie->list);
698
699 list_for_each_entry(p, &cache->resolve, list) {
700 if (p->name_state != NAME_PENDING &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300701 abs(p->data.rssi) >= abs(ie->data.rssi))
Johan Hedberga3d4e202012-01-09 00:53:02 +0200702 break;
703 pos = &p->list;
704 }
705
706 list_add(&ie->list, pos);
707}
708
Johan Hedberg31754052012-01-04 13:39:52 +0200709bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
Gustavo F. Padovan04124682012-03-08 01:25:00 -0300710 bool name_known, bool *ssp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711{
Johan Hedberg30883512012-01-04 14:16:21 +0200712 struct discovery_state *cache = &hdev->discovery;
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200713 struct inquiry_entry *ie;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700714
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +0300715 BT_DBG("cache %p, %pMR", cache, &data->bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716
Szymon Janc2b2fec42012-11-20 11:38:54 +0100717 hci_remove_remote_oob_data(hdev, &data->bdaddr);
718
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200719 if (ssp)
720 *ssp = data->ssp_mode;
721
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200722 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
Johan Hedberga3d4e202012-01-09 00:53:02 +0200723 if (ie) {
Johan Hedberg388fc8f2012-02-23 00:38:59 +0200724 if (ie->data.ssp_mode && ssp)
725 *ssp = true;
726
Johan Hedberga3d4e202012-01-09 00:53:02 +0200727 if (ie->name_state == NAME_NEEDED &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300728 data->rssi != ie->data.rssi) {
Johan Hedberga3d4e202012-01-09 00:53:02 +0200729 ie->data.rssi = data->rssi;
730 hci_inquiry_cache_update_resolve(hdev, ie);
731 }
732
Johan Hedberg561aafb2012-01-04 13:31:59 +0200733 goto update;
Johan Hedberga3d4e202012-01-09 00:53:02 +0200734 }
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200735
Johan Hedberg561aafb2012-01-04 13:31:59 +0200736 /* Entry not in the cache. Add new one. */
737 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
738 if (!ie)
Johan Hedberg31754052012-01-04 13:39:52 +0200739 return false;
Johan Hedberg561aafb2012-01-04 13:31:59 +0200740
741 list_add(&ie->all, &cache->all);
742
743 if (name_known) {
744 ie->name_state = NAME_KNOWN;
745 } else {
746 ie->name_state = NAME_NOT_KNOWN;
747 list_add(&ie->list, &cache->unknown);
748 }
749
750update:
751 if (name_known && ie->name_state != NAME_KNOWN &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300752 ie->name_state != NAME_PENDING) {
Johan Hedberg561aafb2012-01-04 13:31:59 +0200753 ie->name_state = NAME_KNOWN;
754 list_del(&ie->list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755 }
756
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200757 memcpy(&ie->data, data, sizeof(*data));
758 ie->timestamp = jiffies;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 cache->timestamp = jiffies;
Johan Hedberg31754052012-01-04 13:39:52 +0200760
761 if (ie->name_state == NAME_NOT_KNOWN)
762 return false;
763
764 return true;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765}
766
767static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
768{
Johan Hedberg30883512012-01-04 14:16:21 +0200769 struct discovery_state *cache = &hdev->discovery;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770 struct inquiry_info *info = (struct inquiry_info *) buf;
771 struct inquiry_entry *e;
772 int copied = 0;
773
Johan Hedberg561aafb2012-01-04 13:31:59 +0200774 list_for_each_entry(e, &cache->all, all) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775 struct inquiry_data *data = &e->data;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200776
777 if (copied >= num)
778 break;
779
Linus Torvalds1da177e2005-04-16 15:20:36 -0700780 bacpy(&info->bdaddr, &data->bdaddr);
781 info->pscan_rep_mode = data->pscan_rep_mode;
782 info->pscan_period_mode = data->pscan_period_mode;
783 info->pscan_mode = data->pscan_mode;
784 memcpy(info->dev_class, data->dev_class, 3);
785 info->clock_offset = data->clock_offset;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200786
Linus Torvalds1da177e2005-04-16 15:20:36 -0700787 info++;
Johan Hedbergb57c1a52012-01-03 16:03:00 +0200788 copied++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700789 }
790
791 BT_DBG("cache %p, copied %d", cache, copied);
792 return copied;
793}
794
Johan Hedberg42c6b122013-03-05 20:37:49 +0200795static void hci_inq_req(struct hci_request *req, unsigned long opt)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700796{
797 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200798 struct hci_dev *hdev = req->hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 struct hci_cp_inquiry cp;
800
801 BT_DBG("%s", hdev->name);
802
803 if (test_bit(HCI_INQUIRY, &hdev->flags))
804 return;
805
806 /* Start Inquiry */
807 memcpy(&cp.lap, &ir->lap, 3);
808 cp.length = ir->length;
809 cp.num_rsp = ir->num_rsp;
Johan Hedberg42c6b122013-03-05 20:37:49 +0200810 hci_req_add(req, HCI_OP_INQUIRY, sizeof(cp), &cp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811}
812
813int hci_inquiry(void __user *arg)
814{
815 __u8 __user *ptr = arg;
816 struct hci_inquiry_req ir;
817 struct hci_dev *hdev;
818 int err = 0, do_inquiry = 0, max_rsp;
819 long timeo;
820 __u8 *buf;
821
822 if (copy_from_user(&ir, ptr, sizeof(ir)))
823 return -EFAULT;
824
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200825 hdev = hci_dev_get(ir.dev_id);
826 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return -ENODEV;
828
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300829 hci_dev_lock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900830 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300831 inquiry_cache_empty(hdev) || ir.flags & IREQ_CACHE_FLUSH) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 inquiry_cache_flush(hdev);
833 do_inquiry = 1;
834 }
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300835 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700836
Marcel Holtmann04837f62006-07-03 10:02:33 +0200837 timeo = ir.length * msecs_to_jiffies(2000);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200838
839 if (do_inquiry) {
Johan Hedberg01178cd2013-03-05 20:37:41 +0200840 err = hci_req_sync(hdev, hci_inq_req, (unsigned long) &ir,
841 timeo);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200842 if (err < 0)
843 goto done;
844 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700845
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -0300846 /* for unlimited number of responses we will use buffer with
847 * 255 entries
848 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
850
851 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
852 * copy it to the user space.
853 */
Szymon Janc01df8c32011-02-17 16:46:47 +0100854 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
Andrei Emeltchenko70f230202010-12-01 16:58:25 +0200855 if (!buf) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856 err = -ENOMEM;
857 goto done;
858 }
859
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300860 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -0300862 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 BT_DBG("num_rsp %d", ir.num_rsp);
865
866 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
867 ptr += sizeof(ir);
868 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
Gustavo Padovana8c5fb12012-05-17 00:36:26 -0300869 ir.num_rsp))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 err = -EFAULT;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900871 } else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 err = -EFAULT;
873
874 kfree(buf);
875
876done:
877 hci_dev_put(hdev);
878 return err;
879}
880
Johan Hedberg3f0f5242012-11-08 01:23:00 +0100881static u8 create_ad(struct hci_dev *hdev, u8 *ptr)
882{
883 u8 ad_len = 0, flags = 0;
884 size_t name_len;
885
886 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
887 flags |= LE_AD_GENERAL;
888
889 if (!lmp_bredr_capable(hdev))
890 flags |= LE_AD_NO_BREDR;
891
892 if (lmp_le_br_capable(hdev))
893 flags |= LE_AD_SIM_LE_BREDR_CTRL;
894
895 if (lmp_host_le_br_capable(hdev))
896 flags |= LE_AD_SIM_LE_BREDR_HOST;
897
898 if (flags) {
899 BT_DBG("adv flags 0x%02x", flags);
900
901 ptr[0] = 2;
902 ptr[1] = EIR_FLAGS;
903 ptr[2] = flags;
904
905 ad_len += 3;
906 ptr += 3;
907 }
908
909 if (hdev->adv_tx_power != HCI_TX_POWER_INVALID) {
910 ptr[0] = 2;
911 ptr[1] = EIR_TX_POWER;
912 ptr[2] = (u8) hdev->adv_tx_power;
913
914 ad_len += 3;
915 ptr += 3;
916 }
917
918 name_len = strlen(hdev->dev_name);
919 if (name_len > 0) {
920 size_t max_len = HCI_MAX_AD_LENGTH - ad_len - 2;
921
922 if (name_len > max_len) {
923 name_len = max_len;
924 ptr[1] = EIR_NAME_SHORT;
925 } else
926 ptr[1] = EIR_NAME_COMPLETE;
927
928 ptr[0] = name_len + 1;
929
930 memcpy(ptr + 2, hdev->dev_name, name_len);
931
932 ad_len += (name_len + 2);
933 ptr += (name_len + 2);
934 }
935
936 return ad_len;
937}
938
939int hci_update_ad(struct hci_dev *hdev)
940{
941 struct hci_cp_le_set_adv_data cp;
942 u8 len;
943 int err;
944
945 hci_dev_lock(hdev);
946
947 if (!lmp_le_capable(hdev)) {
948 err = -EINVAL;
949 goto unlock;
950 }
951
952 memset(&cp, 0, sizeof(cp));
953
954 len = create_ad(hdev, cp.data);
955
956 if (hdev->adv_data_len == len &&
957 memcmp(cp.data, hdev->adv_data, len) == 0) {
958 err = 0;
959 goto unlock;
960 }
961
962 memcpy(hdev->adv_data, cp.data, sizeof(cp.data));
963 hdev->adv_data_len = len;
964
965 cp.length = len;
966 err = hci_send_cmd(hdev, HCI_OP_LE_SET_ADV_DATA, sizeof(cp), &cp);
967
968unlock:
969 hci_dev_unlock(hdev);
970
971 return err;
972}
973
Linus Torvalds1da177e2005-04-16 15:20:36 -0700974/* ---- HCI ioctl helpers ---- */
975
976int hci_dev_open(__u16 dev)
977{
978 struct hci_dev *hdev;
979 int ret = 0;
980
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +0200981 hdev = hci_dev_get(dev);
982 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700983 return -ENODEV;
984
985 BT_DBG("%s %p", hdev->name, hdev);
986
987 hci_req_lock(hdev);
988
Johan Hovold94324962012-03-15 14:48:41 +0100989 if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
990 ret = -ENODEV;
991 goto done;
992 }
993
Marcel Holtmann611b30f2009-06-08 14:41:38 +0200994 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
995 ret = -ERFKILL;
996 goto done;
997 }
998
Linus Torvalds1da177e2005-04-16 15:20:36 -0700999 if (test_bit(HCI_UP, &hdev->flags)) {
1000 ret = -EALREADY;
1001 goto done;
1002 }
1003
1004 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
1005 set_bit(HCI_RAW, &hdev->flags);
1006
Andrei Emeltchenko07e3b942011-11-11 17:02:15 +02001007 /* Treat all non BR/EDR controllers as raw devices if
1008 enable_hs is not set */
1009 if (hdev->dev_type != HCI_BREDR && !enable_hs)
Marcel Holtmann943da252010-02-13 02:28:41 +01001010 set_bit(HCI_RAW, &hdev->flags);
1011
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012 if (hdev->open(hdev)) {
1013 ret = -EIO;
1014 goto done;
1015 }
1016
1017 if (!test_bit(HCI_RAW, &hdev->flags)) {
1018 atomic_set(&hdev->cmd_cnt, 1);
1019 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg2177bab2013-03-05 20:37:43 +02001020 ret = __hci_init(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 clear_bit(HCI_INIT, &hdev->flags);
1022 }
1023
1024 if (!ret) {
1025 hci_dev_hold(hdev);
1026 set_bit(HCI_UP, &hdev->flags);
1027 hci_notify(hdev, HCI_DEV_UP);
Johan Hedberg3f0f5242012-11-08 01:23:00 +01001028 hci_update_ad(hdev);
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001029 if (!test_bit(HCI_SETUP, &hdev->dev_flags) &&
1030 mgmt_valid_hdev(hdev)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001031 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02001032 mgmt_powered(hdev, 1);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001033 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02001034 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001035 } else {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001036 /* Init failed, cleanup */
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001037 flush_work(&hdev->tx_work);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001038 flush_work(&hdev->cmd_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001039 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040
1041 skb_queue_purge(&hdev->cmd_q);
1042 skb_queue_purge(&hdev->rx_q);
1043
1044 if (hdev->flush)
1045 hdev->flush(hdev);
1046
1047 if (hdev->sent_cmd) {
1048 kfree_skb(hdev->sent_cmd);
1049 hdev->sent_cmd = NULL;
1050 }
1051
1052 hdev->close(hdev);
1053 hdev->flags = 0;
1054 }
1055
1056done:
1057 hci_req_unlock(hdev);
1058 hci_dev_put(hdev);
1059 return ret;
1060}
1061
1062static int hci_dev_do_close(struct hci_dev *hdev)
1063{
1064 BT_DBG("%s %p", hdev->name, hdev);
1065
Andre Guedes28b75a82012-02-03 17:48:00 -03001066 cancel_work_sync(&hdev->le_scan);
1067
Vinicius Costa Gomes78c04c02012-09-14 16:34:46 -03001068 cancel_delayed_work(&hdev->power_off);
1069
Linus Torvalds1da177e2005-04-16 15:20:36 -07001070 hci_req_cancel(hdev, ENODEV);
1071 hci_req_lock(hdev);
1072
1073 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001074 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 hci_req_unlock(hdev);
1076 return 0;
1077 }
1078
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02001079 /* Flush RX and TX works */
1080 flush_work(&hdev->tx_work);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04001081 flush_work(&hdev->rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001082
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001083 if (hdev->discov_timeout > 0) {
Johan Hedberge0f93092011-11-09 01:44:22 +02001084 cancel_delayed_work(&hdev->discov_off);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001085 hdev->discov_timeout = 0;
Johan Hedberg5e5282b2012-02-21 16:01:30 +02001086 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001087 }
1088
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001089 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
Johan Hedberg7d785252011-12-15 00:47:39 +02001090 cancel_delayed_work(&hdev->service_cache);
1091
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001092 cancel_delayed_work_sync(&hdev->le_scan_disable);
1093
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001094 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 inquiry_cache_flush(hdev);
1096 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001097 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001098
1099 hci_notify(hdev, HCI_DEV_DOWN);
1100
1101 if (hdev->flush)
1102 hdev->flush(hdev);
1103
1104 /* Reset device */
1105 skb_queue_purge(&hdev->cmd_q);
1106 atomic_set(&hdev->cmd_cnt, 1);
Johan Hedberg8af59462012-02-03 21:29:40 +02001107 if (!test_bit(HCI_RAW, &hdev->flags) &&
Szymon Janca6c511c2012-05-23 12:35:46 +02001108 test_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109 set_bit(HCI_INIT, &hdev->flags);
Johan Hedberg01178cd2013-03-05 20:37:41 +02001110 __hci_req_sync(hdev, hci_reset_req, 0, HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111 clear_bit(HCI_INIT, &hdev->flags);
1112 }
1113
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001114 /* flush cmd work */
1115 flush_work(&hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116
1117 /* Drop queues */
1118 skb_queue_purge(&hdev->rx_q);
1119 skb_queue_purge(&hdev->cmd_q);
1120 skb_queue_purge(&hdev->raw_q);
1121
1122 /* Drop last sent command */
1123 if (hdev->sent_cmd) {
Vinicius Costa Gomesb79f44c2011-04-11 18:46:55 -03001124 del_timer_sync(&hdev->cmd_timer);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 kfree_skb(hdev->sent_cmd);
1126 hdev->sent_cmd = NULL;
1127 }
1128
1129 /* After this point our queues are empty
1130 * and no tasks are scheduled. */
1131 hdev->close(hdev);
1132
Andrei Emeltchenkobb4b2a92012-07-19 17:03:40 +03001133 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags) &&
1134 mgmt_valid_hdev(hdev)) {
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001135 hci_dev_lock(hdev);
1136 mgmt_powered(hdev, 0);
1137 hci_dev_unlock(hdev);
1138 }
Johan Hedberg5add6af2010-12-16 10:00:37 +02001139
Linus Torvalds1da177e2005-04-16 15:20:36 -07001140 /* Clear flags */
1141 hdev->flags = 0;
1142
Andrei Emeltchenkoced5c332012-11-28 17:59:42 +02001143 /* Controller radio is available but is currently powered down */
1144 hdev->amp_status = 0;
1145
Johan Hedberge59fda82012-02-22 18:11:53 +02001146 memset(hdev->eir, 0, sizeof(hdev->eir));
Johan Hedberg09b3c3f2012-02-22 22:01:41 +02001147 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
Johan Hedberge59fda82012-02-22 18:11:53 +02001148
Linus Torvalds1da177e2005-04-16 15:20:36 -07001149 hci_req_unlock(hdev);
1150
1151 hci_dev_put(hdev);
1152 return 0;
1153}
1154
1155int hci_dev_close(__u16 dev)
1156{
1157 struct hci_dev *hdev;
1158 int err;
1159
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001160 hdev = hci_dev_get(dev);
1161 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001162 return -ENODEV;
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001163
1164 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1165 cancel_delayed_work(&hdev->power_off);
1166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 err = hci_dev_do_close(hdev);
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001168
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 hci_dev_put(hdev);
1170 return err;
1171}
1172
1173int hci_dev_reset(__u16 dev)
1174{
1175 struct hci_dev *hdev;
1176 int ret = 0;
1177
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001178 hdev = hci_dev_get(dev);
1179 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 return -ENODEV;
1181
1182 hci_req_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
1184 if (!test_bit(HCI_UP, &hdev->flags))
1185 goto done;
1186
1187 /* Drop queues */
1188 skb_queue_purge(&hdev->rx_q);
1189 skb_queue_purge(&hdev->cmd_q);
1190
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001191 hci_dev_lock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192 inquiry_cache_flush(hdev);
1193 hci_conn_hash_flush(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001194 hci_dev_unlock(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
1196 if (hdev->flush)
1197 hdev->flush(hdev);
1198
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001199 atomic_set(&hdev->cmd_cnt, 1);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03001200 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
1202 if (!test_bit(HCI_RAW, &hdev->flags))
Johan Hedberg01178cd2013-03-05 20:37:41 +02001203 ret = __hci_req_sync(hdev, hci_reset_req, 0, HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204
1205done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206 hci_req_unlock(hdev);
1207 hci_dev_put(hdev);
1208 return ret;
1209}
1210
1211int hci_dev_reset_stat(__u16 dev)
1212{
1213 struct hci_dev *hdev;
1214 int ret = 0;
1215
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001216 hdev = hci_dev_get(dev);
1217 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001218 return -ENODEV;
1219
1220 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1221
1222 hci_dev_put(hdev);
1223
1224 return ret;
1225}
1226
1227int hci_dev_cmd(unsigned int cmd, void __user *arg)
1228{
1229 struct hci_dev *hdev;
1230 struct hci_dev_req dr;
1231 int err = 0;
1232
1233 if (copy_from_user(&dr, arg, sizeof(dr)))
1234 return -EFAULT;
1235
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001236 hdev = hci_dev_get(dr.dev_id);
1237 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 return -ENODEV;
1239
1240 switch (cmd) {
1241 case HCISETAUTH:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001242 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1243 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001244 break;
1245
1246 case HCISETENCRYPT:
1247 if (!lmp_encrypt_capable(hdev)) {
1248 err = -EOPNOTSUPP;
1249 break;
1250 }
1251
1252 if (!test_bit(HCI_AUTH, &hdev->flags)) {
1253 /* Auth must be enabled first */
Johan Hedberg01178cd2013-03-05 20:37:41 +02001254 err = hci_req_sync(hdev, hci_auth_req, dr.dev_opt,
1255 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 if (err)
1257 break;
1258 }
1259
Johan Hedberg01178cd2013-03-05 20:37:41 +02001260 err = hci_req_sync(hdev, hci_encrypt_req, dr.dev_opt,
1261 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262 break;
1263
1264 case HCISETSCAN:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001265 err = hci_req_sync(hdev, hci_scan_req, dr.dev_opt,
1266 HCI_INIT_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 break;
1268
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001269 case HCISETLINKPOL:
Johan Hedberg01178cd2013-03-05 20:37:41 +02001270 err = hci_req_sync(hdev, hci_linkpol_req, dr.dev_opt,
1271 HCI_INIT_TIMEOUT);
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001272 break;
1273
1274 case HCISETLINKMODE:
1275 hdev->link_mode = ((__u16) dr.dev_opt) &
1276 (HCI_LM_MASTER | HCI_LM_ACCEPT);
1277 break;
1278
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 case HCISETPTYPE:
1280 hdev->pkt_type = (__u16) dr.dev_opt;
1281 break;
1282
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283 case HCISETACLMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001284 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
1285 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 break;
1287
1288 case HCISETSCOMTU:
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001289 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
1290 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291 break;
1292
1293 default:
1294 err = -EINVAL;
1295 break;
1296 }
Marcel Holtmanne4e8e372008-07-14 20:13:47 +02001297
Linus Torvalds1da177e2005-04-16 15:20:36 -07001298 hci_dev_put(hdev);
1299 return err;
1300}
1301
1302int hci_get_dev_list(void __user *arg)
1303{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001304 struct hci_dev *hdev;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305 struct hci_dev_list_req *dl;
1306 struct hci_dev_req *dr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001307 int n = 0, size, err;
1308 __u16 dev_num;
1309
1310 if (get_user(dev_num, (__u16 __user *) arg))
1311 return -EFAULT;
1312
1313 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
1314 return -EINVAL;
1315
1316 size = sizeof(*dl) + dev_num * sizeof(*dr);
1317
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001318 dl = kzalloc(size, GFP_KERNEL);
1319 if (!dl)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 return -ENOMEM;
1321
1322 dr = dl->dev_req;
1323
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001324 read_lock(&hci_dev_list_lock);
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001325 list_for_each_entry(hdev, &hci_dev_list, list) {
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001326 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberge0f93092011-11-09 01:44:22 +02001327 cancel_delayed_work(&hdev->power_off);
Johan Hedbergc542a062011-01-26 13:11:03 +02001328
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001329 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1330 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 (dr + n)->dev_id = hdev->id;
1333 (dr + n)->dev_opt = hdev->flags;
Johan Hedbergc542a062011-01-26 13:11:03 +02001334
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 if (++n >= dev_num)
1336 break;
1337 }
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02001338 read_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339
1340 dl->dev_num = n;
1341 size = sizeof(*dl) + n * sizeof(*dr);
1342
1343 err = copy_to_user(arg, dl, size);
1344 kfree(dl);
1345
1346 return err ? -EFAULT : 0;
1347}
1348
1349int hci_get_dev_info(void __user *arg)
1350{
1351 struct hci_dev *hdev;
1352 struct hci_dev_info di;
1353 int err = 0;
1354
1355 if (copy_from_user(&di, arg, sizeof(di)))
1356 return -EFAULT;
1357
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02001358 hdev = hci_dev_get(di.dev_id);
1359 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 return -ENODEV;
1361
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001362 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg32435532011-11-07 22:16:04 +02001363 cancel_delayed_work_sync(&hdev->power_off);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001364
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001365 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1366 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
Johan Hedbergc542a062011-01-26 13:11:03 +02001367
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368 strcpy(di.name, hdev->name);
1369 di.bdaddr = hdev->bdaddr;
Marcel Holtmann943da252010-02-13 02:28:41 +01001370 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 di.flags = hdev->flags;
1372 di.pkt_type = hdev->pkt_type;
Johan Hedberg572c7f82012-10-19 20:57:46 +03001373 if (lmp_bredr_capable(hdev)) {
1374 di.acl_mtu = hdev->acl_mtu;
1375 di.acl_pkts = hdev->acl_pkts;
1376 di.sco_mtu = hdev->sco_mtu;
1377 di.sco_pkts = hdev->sco_pkts;
1378 } else {
1379 di.acl_mtu = hdev->le_mtu;
1380 di.acl_pkts = hdev->le_pkts;
1381 di.sco_mtu = 0;
1382 di.sco_pkts = 0;
1383 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001384 di.link_policy = hdev->link_policy;
1385 di.link_mode = hdev->link_mode;
1386
1387 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1388 memcpy(&di.features, &hdev->features, sizeof(di.features));
1389
1390 if (copy_to_user(arg, &di, sizeof(di)))
1391 err = -EFAULT;
1392
1393 hci_dev_put(hdev);
1394
1395 return err;
1396}
1397
1398/* ---- Interface to HCI drivers ---- */
1399
Marcel Holtmann611b30f2009-06-08 14:41:38 +02001400static int hci_rfkill_set_block(void *data, bool blocked)
1401{
1402 struct hci_dev *hdev = data;
1403
1404 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1405
1406 if (!blocked)
1407 return 0;
1408
1409 hci_dev_do_close(hdev);
1410
1411 return 0;
1412}
1413
1414static const struct rfkill_ops hci_rfkill_ops = {
1415 .set_block = hci_rfkill_set_block,
1416};
1417
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001418static void hci_power_on(struct work_struct *work)
1419{
1420 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1421
1422 BT_DBG("%s", hdev->name);
1423
1424 if (hci_dev_open(hdev->id) < 0)
1425 return;
1426
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001427 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
Johan Hedberg19202572013-01-14 22:33:51 +02001428 queue_delayed_work(hdev->req_workqueue, &hdev->power_off,
1429 HCI_AUTO_OFF_TIMEOUT);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001430
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02001431 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
Johan Hedberg744cf192011-11-08 20:40:14 +02001432 mgmt_index_added(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001433}
1434
1435static void hci_power_off(struct work_struct *work)
1436{
Johan Hedberg32435532011-11-07 22:16:04 +02001437 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001438 power_off.work);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001439
1440 BT_DBG("%s", hdev->name);
1441
Marcel Holtmann8ee56542012-02-21 12:33:48 +01001442 hci_dev_do_close(hdev);
Johan Hedbergab81cbf2010-12-15 13:53:18 +02001443}
1444
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001445static void hci_discov_off(struct work_struct *work)
1446{
1447 struct hci_dev *hdev;
1448 u8 scan = SCAN_PAGE;
1449
1450 hdev = container_of(work, struct hci_dev, discov_off.work);
1451
1452 BT_DBG("%s", hdev->name);
1453
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001454 hci_dev_lock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001455
1456 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1457
1458 hdev->discov_timeout = 0;
1459
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03001460 hci_dev_unlock(hdev);
Johan Hedberg16ab91a2011-11-07 22:16:02 +02001461}
1462
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001463int hci_uuids_clear(struct hci_dev *hdev)
1464{
Johan Hedberg48210022013-01-27 00:31:28 +02001465 struct bt_uuid *uuid, *tmp;
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001466
Johan Hedberg48210022013-01-27 00:31:28 +02001467 list_for_each_entry_safe(uuid, tmp, &hdev->uuids, list) {
1468 list_del(&uuid->list);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02001469 kfree(uuid);
1470 }
1471
1472 return 0;
1473}
1474
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001475int hci_link_keys_clear(struct hci_dev *hdev)
1476{
1477 struct list_head *p, *n;
1478
1479 list_for_each_safe(p, n, &hdev->link_keys) {
1480 struct link_key *key;
1481
1482 key = list_entry(p, struct link_key, list);
1483
1484 list_del(p);
1485 kfree(key);
1486 }
1487
1488 return 0;
1489}
1490
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001491int hci_smp_ltks_clear(struct hci_dev *hdev)
1492{
1493 struct smp_ltk *k, *tmp;
1494
1495 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1496 list_del(&k->list);
1497 kfree(k);
1498 }
1499
1500 return 0;
1501}
1502
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001503struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1504{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001505 struct link_key *k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001506
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001507 list_for_each_entry(k, &hdev->link_keys, list)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001508 if (bacmp(bdaddr, &k->bdaddr) == 0)
1509 return k;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001510
1511 return NULL;
1512}
1513
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301514static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001515 u8 key_type, u8 old_key_type)
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001516{
1517 /* Legacy key */
1518 if (key_type < 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301519 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001520
1521 /* Debug keys are insecure so don't store them persistently */
1522 if (key_type == HCI_LK_DEBUG_COMBINATION)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301523 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001524
1525 /* Changed combination key and there's no previous one */
1526 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301527 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001528
1529 /* Security mode 3 case */
1530 if (!conn)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301531 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001532
1533 /* Neither local nor remote side had no-bonding as requirement */
1534 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301535 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001536
1537 /* Local side had dedicated bonding as requirement */
1538 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301539 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001540
1541 /* Remote side had dedicated bonding as requirement */
1542 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301543 return true;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001544
1545 /* If none of the above criteria match, then don't store the key
1546 * persistently */
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301547 return false;
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001548}
1549
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001550struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001551{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001552 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001553
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001554 list_for_each_entry(k, &hdev->long_term_keys, list) {
1555 if (k->ediv != ediv ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001556 memcmp(rand, k->rand, sizeof(k->rand)))
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001557 continue;
1558
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001559 return k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001560 }
1561
1562 return NULL;
1563}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001564
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001565struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001566 u8 addr_type)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001567{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001568 struct smp_ltk *k;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001569
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001570 list_for_each_entry(k, &hdev->long_term_keys, list)
1571 if (addr_type == k->bdaddr_type &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001572 bacmp(bdaddr, &k->bdaddr) == 0)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001573 return k;
1574
1575 return NULL;
1576}
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001577
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001578int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001579 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001580{
1581 struct link_key *key, *old_key;
Vishal Agarwal745c0ce2012-04-13 17:43:22 +05301582 u8 old_key_type;
1583 bool persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001584
1585 old_key = hci_find_link_key(hdev, bdaddr);
1586 if (old_key) {
1587 old_key_type = old_key->type;
1588 key = old_key;
1589 } else {
Johan Hedberg12adcf32011-04-28 11:29:00 -07001590 old_key_type = conn ? conn->key_type : 0xff;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001591 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1592 if (!key)
1593 return -ENOMEM;
1594 list_add(&key->list, &hdev->link_keys);
1595 }
1596
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001597 BT_DBG("%s key for %pMR type %u", hdev->name, bdaddr, type);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001598
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001599 /* Some buggy controller combinations generate a changed
1600 * combination key for legacy pairing even when there's no
1601 * previous key */
1602 if (type == HCI_LK_CHANGED_COMBINATION &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03001603 (!conn || conn->remote_auth == 0xff) && old_key_type == 0xff) {
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001604 type = HCI_LK_COMBINATION;
Johan Hedberg655fe6e2011-04-28 11:29:01 -07001605 if (conn)
1606 conn->key_type = type;
1607 }
Johan Hedbergd25e28a2011-04-28 11:28:59 -07001608
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001609 bacpy(&key->bdaddr, bdaddr);
Andrei Emeltchenko9b3b4462012-05-23 11:31:20 +03001610 memcpy(key->val, val, HCI_LINK_KEY_SIZE);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001611 key->pin_len = pin_len;
1612
Waldemar Rymarkiewiczb6020ba2011-04-28 12:07:53 +02001613 if (type == HCI_LK_CHANGED_COMBINATION)
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001614 key->type = old_key_type;
Johan Hedberg4748fed2011-04-28 11:29:02 -07001615 else
1616 key->type = type;
1617
Johan Hedberg4df378a2011-04-28 11:29:03 -07001618 if (!new_key)
1619 return 0;
1620
1621 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1622
Johan Hedberg744cf192011-11-08 20:40:14 +02001623 mgmt_new_link_key(hdev, key, persistent);
Johan Hedberg4df378a2011-04-28 11:29:03 -07001624
Vishal Agarwal6ec5bca2012-04-16 14:44:44 +05301625 if (conn)
1626 conn->flush_key = !persistent;
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001627
1628 return 0;
1629}
1630
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001631int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
Andrei Emeltchenko9a006652012-03-09 12:12:12 +02001632 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, __le16
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001633 ediv, u8 rand[8])
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001634{
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001635 struct smp_ltk *key, *old_key;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001636
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001637 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1638 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001639
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001640 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1641 if (old_key)
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001642 key = old_key;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001643 else {
1644 key = kzalloc(sizeof(*key), GFP_ATOMIC);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001645 if (!key)
1646 return -ENOMEM;
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001647 list_add(&key->list, &hdev->long_term_keys);
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001648 }
1649
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001650 bacpy(&key->bdaddr, bdaddr);
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001651 key->bdaddr_type = addr_type;
1652 memcpy(key->val, tk, sizeof(key->val));
1653 key->authenticated = authenticated;
1654 key->ediv = ediv;
1655 key->enc_size = enc_size;
1656 key->type = type;
1657 memcpy(key->rand, rand, sizeof(key->rand));
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001658
Vinicius Costa Gomesc9839a12012-02-02 21:08:01 -03001659 if (!new_key)
1660 return 0;
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001661
Vinicius Costa Gomes261cc5a2012-02-02 21:08:05 -03001662 if (type & HCI_SMP_LTK)
1663 mgmt_new_ltk(hdev, key, 1);
1664
Vinicius Costa Gomes75d262c2011-07-07 18:59:36 -03001665 return 0;
1666}
1667
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001668int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1669{
1670 struct link_key *key;
1671
1672 key = hci_find_link_key(hdev, bdaddr);
1673 if (!key)
1674 return -ENOENT;
1675
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001676 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02001677
1678 list_del(&key->list);
1679 kfree(key);
1680
1681 return 0;
1682}
1683
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001684int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1685{
1686 struct smp_ltk *k, *tmp;
1687
1688 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1689 if (bacmp(bdaddr, &k->bdaddr))
1690 continue;
1691
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001692 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03001693
1694 list_del(&k->list);
1695 kfree(k);
1696 }
1697
1698 return 0;
1699}
1700
Ville Tervo6bd32322011-02-16 16:32:41 +02001701/* HCI command timer function */
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001702static void hci_cmd_timeout(unsigned long arg)
Ville Tervo6bd32322011-02-16 16:32:41 +02001703{
1704 struct hci_dev *hdev = (void *) arg;
1705
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03001706 if (hdev->sent_cmd) {
1707 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
1708 u16 opcode = __le16_to_cpu(sent->opcode);
1709
1710 BT_ERR("%s command 0x%4.4x tx timeout", hdev->name, opcode);
1711 } else {
1712 BT_ERR("%s command tx timeout", hdev->name);
1713 }
1714
Ville Tervo6bd32322011-02-16 16:32:41 +02001715 atomic_set(&hdev->cmd_cnt, 1);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02001716 queue_work(hdev->workqueue, &hdev->cmd_work);
Ville Tervo6bd32322011-02-16 16:32:41 +02001717}
1718
Szymon Janc2763eda2011-03-22 13:12:22 +01001719struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001720 bdaddr_t *bdaddr)
Szymon Janc2763eda2011-03-22 13:12:22 +01001721{
1722 struct oob_data *data;
1723
1724 list_for_each_entry(data, &hdev->remote_oob_data, list)
1725 if (bacmp(bdaddr, &data->bdaddr) == 0)
1726 return data;
1727
1728 return NULL;
1729}
1730
1731int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1732{
1733 struct oob_data *data;
1734
1735 data = hci_find_remote_oob_data(hdev, bdaddr);
1736 if (!data)
1737 return -ENOENT;
1738
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001739 BT_DBG("%s removing %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001740
1741 list_del(&data->list);
1742 kfree(data);
1743
1744 return 0;
1745}
1746
1747int hci_remote_oob_data_clear(struct hci_dev *hdev)
1748{
1749 struct oob_data *data, *n;
1750
1751 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1752 list_del(&data->list);
1753 kfree(data);
1754 }
1755
1756 return 0;
1757}
1758
1759int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001760 u8 *randomizer)
Szymon Janc2763eda2011-03-22 13:12:22 +01001761{
1762 struct oob_data *data;
1763
1764 data = hci_find_remote_oob_data(hdev, bdaddr);
1765
1766 if (!data) {
1767 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1768 if (!data)
1769 return -ENOMEM;
1770
1771 bacpy(&data->bdaddr, bdaddr);
1772 list_add(&data->list, &hdev->remote_oob_data);
1773 }
1774
1775 memcpy(data->hash, hash, sizeof(data->hash));
1776 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1777
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03001778 BT_DBG("%s for %pMR", hdev->name, bdaddr);
Szymon Janc2763eda2011-03-22 13:12:22 +01001779
1780 return 0;
1781}
1782
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001783struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001784{
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001785 struct bdaddr_list *b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001786
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02001787 list_for_each_entry(b, &hdev->blacklist, list)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001788 if (bacmp(bdaddr, &b->bdaddr) == 0)
1789 return b;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001790
1791 return NULL;
1792}
1793
1794int hci_blacklist_clear(struct hci_dev *hdev)
1795{
1796 struct list_head *p, *n;
1797
1798 list_for_each_safe(p, n, &hdev->blacklist) {
1799 struct bdaddr_list *b;
1800
1801 b = list_entry(p, struct bdaddr_list, list);
1802
1803 list_del(p);
1804 kfree(b);
1805 }
1806
1807 return 0;
1808}
1809
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001810int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001811{
1812 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001813
1814 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1815 return -EBADF;
1816
Antti Julku5e762442011-08-25 16:48:02 +03001817 if (hci_blacklist_lookup(hdev, bdaddr))
1818 return -EEXIST;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001819
1820 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
Antti Julku5e762442011-08-25 16:48:02 +03001821 if (!entry)
1822 return -ENOMEM;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001823
1824 bacpy(&entry->bdaddr, bdaddr);
1825
1826 list_add(&entry->list, &hdev->blacklist);
1827
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001828 return mgmt_device_blocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001829}
1830
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001831int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
Antti Julkub2a66aa2011-06-15 12:01:14 +03001832{
1833 struct bdaddr_list *entry;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001834
Szymon Janc1ec918c2011-11-16 09:32:21 +01001835 if (bacmp(bdaddr, BDADDR_ANY) == 0)
Antti Julku5e762442011-08-25 16:48:02 +03001836 return hci_blacklist_clear(hdev);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001837
1838 entry = hci_blacklist_lookup(hdev, bdaddr);
Szymon Janc1ec918c2011-11-16 09:32:21 +01001839 if (!entry)
Antti Julku5e762442011-08-25 16:48:02 +03001840 return -ENOENT;
Antti Julkub2a66aa2011-06-15 12:01:14 +03001841
1842 list_del(&entry->list);
1843 kfree(entry);
1844
Johan Hedberg88c1fe42012-02-09 15:56:11 +02001845 return mgmt_device_unblocked(hdev, bdaddr, type);
Antti Julkub2a66aa2011-06-15 12:01:14 +03001846}
1847
Johan Hedberg42c6b122013-03-05 20:37:49 +02001848static void le_scan_param_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001849{
1850 struct le_scan_params *param = (struct le_scan_params *) opt;
1851 struct hci_cp_le_set_scan_param cp;
1852
1853 memset(&cp, 0, sizeof(cp));
1854 cp.type = param->type;
1855 cp.interval = cpu_to_le16(param->interval);
1856 cp.window = cpu_to_le16(param->window);
1857
Johan Hedberg42c6b122013-03-05 20:37:49 +02001858 hci_req_add(req, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001859}
1860
Johan Hedberg42c6b122013-03-05 20:37:49 +02001861static void le_scan_enable_req(struct hci_request *req, unsigned long opt)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001862{
1863 struct hci_cp_le_set_scan_enable cp;
1864
1865 memset(&cp, 0, sizeof(cp));
1866 cp.enable = 1;
Andre Guedes0431a432012-05-31 20:01:41 -03001867 cp.filter_dup = 1;
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001868
Johan Hedberg42c6b122013-03-05 20:37:49 +02001869 hci_req_add(req, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001870}
1871
1872static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001873 u16 window, int timeout)
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001874{
1875 long timeo = msecs_to_jiffies(3000);
1876 struct le_scan_params param;
1877 int err;
1878
1879 BT_DBG("%s", hdev->name);
1880
1881 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1882 return -EINPROGRESS;
1883
1884 param.type = type;
1885 param.interval = interval;
1886 param.window = window;
1887
1888 hci_req_lock(hdev);
1889
Johan Hedberg01178cd2013-03-05 20:37:41 +02001890 err = __hci_req_sync(hdev, le_scan_param_req, (unsigned long) &param,
1891 timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001892 if (!err)
Johan Hedberg01178cd2013-03-05 20:37:41 +02001893 err = __hci_req_sync(hdev, le_scan_enable_req, 0, timeo);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001894
1895 hci_req_unlock(hdev);
1896
1897 if (err < 0)
1898 return err;
1899
Johan Hedberg46818ed2013-01-14 22:33:52 +02001900 queue_delayed_work(hdev->workqueue, &hdev->le_scan_disable,
1901 msecs_to_jiffies(timeout));
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001902
1903 return 0;
1904}
1905
Andre Guedes7dbfac12012-03-15 16:52:07 -03001906int hci_cancel_le_scan(struct hci_dev *hdev)
1907{
1908 BT_DBG("%s", hdev->name);
1909
1910 if (!test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1911 return -EALREADY;
1912
1913 if (cancel_delayed_work(&hdev->le_scan_disable)) {
1914 struct hci_cp_le_set_scan_enable cp;
1915
1916 /* Send HCI command to disable LE Scan */
1917 memset(&cp, 0, sizeof(cp));
1918 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1919 }
1920
1921 return 0;
1922}
1923
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001924static void le_scan_disable_work(struct work_struct *work)
1925{
1926 struct hci_dev *hdev = container_of(work, struct hci_dev,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001927 le_scan_disable.work);
Andre Guedes7ba8b4b2012-02-03 17:47:59 -03001928 struct hci_cp_le_set_scan_enable cp;
1929
1930 BT_DBG("%s", hdev->name);
1931
1932 memset(&cp, 0, sizeof(cp));
1933
1934 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1935}
1936
Andre Guedes28b75a82012-02-03 17:48:00 -03001937static void le_scan_work(struct work_struct *work)
1938{
1939 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1940 struct le_scan_params *param = &hdev->le_scan_params;
1941
1942 BT_DBG("%s", hdev->name);
1943
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001944 hci_do_le_scan(hdev, param->type, param->interval, param->window,
1945 param->timeout);
Andre Guedes28b75a82012-02-03 17:48:00 -03001946}
1947
1948int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03001949 int timeout)
Andre Guedes28b75a82012-02-03 17:48:00 -03001950{
1951 struct le_scan_params *param = &hdev->le_scan_params;
1952
1953 BT_DBG("%s", hdev->name);
1954
Johan Hedbergf1550472012-10-24 21:12:03 +03001955 if (test_bit(HCI_LE_PERIPHERAL, &hdev->dev_flags))
1956 return -ENOTSUPP;
1957
Andre Guedes28b75a82012-02-03 17:48:00 -03001958 if (work_busy(&hdev->le_scan))
1959 return -EINPROGRESS;
1960
1961 param->type = type;
1962 param->interval = interval;
1963 param->window = window;
1964 param->timeout = timeout;
1965
1966 queue_work(system_long_wq, &hdev->le_scan);
1967
1968 return 0;
1969}
1970
David Herrmann9be0dab2012-04-22 14:39:57 +02001971/* Alloc HCI device */
1972struct hci_dev *hci_alloc_dev(void)
1973{
1974 struct hci_dev *hdev;
1975
1976 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1977 if (!hdev)
1978 return NULL;
1979
David Herrmannb1b813d2012-04-22 14:39:58 +02001980 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1981 hdev->esco_type = (ESCO_HV1);
1982 hdev->link_mode = (HCI_LM_ACCEPT);
1983 hdev->io_capability = 0x03; /* No Input No Output */
Johan Hedbergbbaf4442012-11-08 01:22:59 +01001984 hdev->inq_tx_power = HCI_TX_POWER_INVALID;
1985 hdev->adv_tx_power = HCI_TX_POWER_INVALID;
David Herrmannb1b813d2012-04-22 14:39:58 +02001986
David Herrmannb1b813d2012-04-22 14:39:58 +02001987 hdev->sniff_max_interval = 800;
1988 hdev->sniff_min_interval = 80;
1989
1990 mutex_init(&hdev->lock);
1991 mutex_init(&hdev->req_lock);
1992
1993 INIT_LIST_HEAD(&hdev->mgmt_pending);
1994 INIT_LIST_HEAD(&hdev->blacklist);
1995 INIT_LIST_HEAD(&hdev->uuids);
1996 INIT_LIST_HEAD(&hdev->link_keys);
1997 INIT_LIST_HEAD(&hdev->long_term_keys);
1998 INIT_LIST_HEAD(&hdev->remote_oob_data);
Andrei Emeltchenko6b536b52012-08-31 16:39:28 +03001999 INIT_LIST_HEAD(&hdev->conn_hash.list);
David Herrmannb1b813d2012-04-22 14:39:58 +02002000
2001 INIT_WORK(&hdev->rx_work, hci_rx_work);
2002 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
2003 INIT_WORK(&hdev->tx_work, hci_tx_work);
2004 INIT_WORK(&hdev->power_on, hci_power_on);
2005 INIT_WORK(&hdev->le_scan, le_scan_work);
2006
David Herrmannb1b813d2012-04-22 14:39:58 +02002007 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
2008 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
2009 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
2010
David Herrmann9be0dab2012-04-22 14:39:57 +02002011 skb_queue_head_init(&hdev->driver_init);
David Herrmannb1b813d2012-04-22 14:39:58 +02002012 skb_queue_head_init(&hdev->rx_q);
2013 skb_queue_head_init(&hdev->cmd_q);
2014 skb_queue_head_init(&hdev->raw_q);
2015
2016 init_waitqueue_head(&hdev->req_wait_q);
2017
Andrei Emeltchenkobda4f232012-06-11 11:13:08 +03002018 setup_timer(&hdev->cmd_timer, hci_cmd_timeout, (unsigned long) hdev);
David Herrmannb1b813d2012-04-22 14:39:58 +02002019
David Herrmannb1b813d2012-04-22 14:39:58 +02002020 hci_init_sysfs(hdev);
2021 discovery_init(hdev);
David Herrmann9be0dab2012-04-22 14:39:57 +02002022
2023 return hdev;
2024}
2025EXPORT_SYMBOL(hci_alloc_dev);
2026
2027/* Free HCI device */
2028void hci_free_dev(struct hci_dev *hdev)
2029{
2030 skb_queue_purge(&hdev->driver_init);
2031
2032 /* will free via device release */
2033 put_device(&hdev->dev);
2034}
2035EXPORT_SYMBOL(hci_free_dev);
2036
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037/* Register HCI device */
2038int hci_register_dev(struct hci_dev *hdev)
2039{
David Herrmannb1b813d2012-04-22 14:39:58 +02002040 int id, error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002041
David Herrmann010666a2012-01-07 15:47:07 +01002042 if (!hdev->open || !hdev->close)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002043 return -EINVAL;
2044
Mat Martineau08add512011-11-02 16:18:36 -07002045 /* Do not allow HCI_AMP devices to register at index 0,
2046 * so the index can be used as the AMP controller ID.
2047 */
Sasha Levin3df92b32012-05-27 22:36:56 +02002048 switch (hdev->dev_type) {
2049 case HCI_BREDR:
2050 id = ida_simple_get(&hci_index_ida, 0, 0, GFP_KERNEL);
2051 break;
2052 case HCI_AMP:
2053 id = ida_simple_get(&hci_index_ida, 1, 0, GFP_KERNEL);
2054 break;
2055 default:
2056 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002057 }
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002058
Sasha Levin3df92b32012-05-27 22:36:56 +02002059 if (id < 0)
2060 return id;
2061
Linus Torvalds1da177e2005-04-16 15:20:36 -07002062 sprintf(hdev->name, "hci%d", id);
2063 hdev->id = id;
Andrei Emeltchenko2d8b3a12012-04-16 16:32:04 +03002064
2065 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
2066
Sasha Levin3df92b32012-05-27 22:36:56 +02002067 write_lock(&hci_dev_list_lock);
2068 list_add(&hdev->list, &hci_dev_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002069 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070
Gustavo F. Padovan32845eb2011-12-17 17:47:30 -02002071 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002072 WQ_MEM_RECLAIM, 1);
David Herrmann33ca9542011-10-08 14:58:49 +02002073 if (!hdev->workqueue) {
2074 error = -ENOMEM;
2075 goto err;
2076 }
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002077
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002078 hdev->req_workqueue = alloc_workqueue(hdev->name,
2079 WQ_HIGHPRI | WQ_UNBOUND |
2080 WQ_MEM_RECLAIM, 1);
2081 if (!hdev->req_workqueue) {
2082 destroy_workqueue(hdev->workqueue);
2083 error = -ENOMEM;
2084 goto err;
2085 }
2086
David Herrmann33ca9542011-10-08 14:58:49 +02002087 error = hci_add_sysfs(hdev);
2088 if (error < 0)
2089 goto err_wqueue;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002091 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002092 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops,
2093 hdev);
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002094 if (hdev->rfkill) {
2095 if (rfkill_register(hdev->rfkill) < 0) {
2096 rfkill_destroy(hdev->rfkill);
2097 hdev->rfkill = NULL;
2098 }
2099 }
2100
Johan Hedberga8b2d5c2012-01-08 23:11:15 +02002101 set_bit(HCI_SETUP, &hdev->dev_flags);
Andrei Emeltchenkoce2be9a2012-06-29 15:07:00 +03002102
2103 if (hdev->dev_type != HCI_AMP)
2104 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
2105
Linus Torvalds1da177e2005-04-16 15:20:36 -07002106 hci_notify(hdev, HCI_DEV_REG);
David Herrmanndc946bd2012-01-07 15:47:24 +01002107 hci_dev_hold(hdev);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108
Johan Hedberg19202572013-01-14 22:33:51 +02002109 queue_work(hdev->req_workqueue, &hdev->power_on);
Marcel Holtmannfbe96d62012-10-30 01:35:40 -07002110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 return id;
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002112
David Herrmann33ca9542011-10-08 14:58:49 +02002113err_wqueue:
2114 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002115 destroy_workqueue(hdev->req_workqueue);
David Herrmann33ca9542011-10-08 14:58:49 +02002116err:
Sasha Levin3df92b32012-05-27 22:36:56 +02002117 ida_simple_remove(&hci_index_ida, hdev->id);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002118 write_lock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002119 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002120 write_unlock(&hci_dev_list_lock);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002121
David Herrmann33ca9542011-10-08 14:58:49 +02002122 return error;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123}
2124EXPORT_SYMBOL(hci_register_dev);
2125
2126/* Unregister HCI device */
David Herrmann59735632011-10-26 10:43:19 +02002127void hci_unregister_dev(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128{
Sasha Levin3df92b32012-05-27 22:36:56 +02002129 int i, id;
Marcel Holtmannef222012007-07-11 06:42:04 +02002130
Marcel Holtmannc13854c2010-02-08 15:27:07 +01002131 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
Johan Hovold94324962012-03-15 14:48:41 +01002133 set_bit(HCI_UNREGISTER, &hdev->dev_flags);
2134
Sasha Levin3df92b32012-05-27 22:36:56 +02002135 id = hdev->id;
2136
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002137 write_lock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002138 list_del(&hdev->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002139 write_unlock(&hci_dev_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002140
2141 hci_dev_do_close(hdev);
2142
Suraj Sumangalacd4c5392010-07-14 13:02:16 +05302143 for (i = 0; i < NUM_REASSEMBLY; i++)
Marcel Holtmannef222012007-07-11 06:42:04 +02002144 kfree_skb(hdev->reassembly[i]);
2145
Gustavo Padovanb9b5ef12012-11-21 00:50:21 -02002146 cancel_work_sync(&hdev->power_on);
2147
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002148 if (!test_bit(HCI_INIT, &hdev->flags) &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002149 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002150 hci_dev_lock(hdev);
Johan Hedberg744cf192011-11-08 20:40:14 +02002151 mgmt_index_removed(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002152 hci_dev_unlock(hdev);
Johan Hedberg56e5cb82011-11-08 20:40:16 +02002153 }
Johan Hedbergab81cbf2010-12-15 13:53:18 +02002154
Johan Hedberg2e58ef32011-11-08 20:40:15 +02002155 /* mgmt_index_removed should take care of emptying the
2156 * pending list */
2157 BUG_ON(!list_empty(&hdev->mgmt_pending));
2158
Linus Torvalds1da177e2005-04-16 15:20:36 -07002159 hci_notify(hdev, HCI_DEV_UNREG);
2160
Marcel Holtmann611b30f2009-06-08 14:41:38 +02002161 if (hdev->rfkill) {
2162 rfkill_unregister(hdev->rfkill);
2163 rfkill_destroy(hdev->rfkill);
2164 }
2165
David Herrmannce242972011-10-08 14:58:48 +02002166 hci_del_sysfs(hdev);
Dave Young147e2d52008-03-05 18:45:59 -08002167
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002168 destroy_workqueue(hdev->workqueue);
Johan Hedberg6ead1bb2013-01-14 22:33:50 +02002169 destroy_workqueue(hdev->req_workqueue);
Marcel Holtmannf48fd9c2010-03-20 15:20:04 +01002170
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002171 hci_dev_lock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002172 hci_blacklist_clear(hdev);
Johan Hedberg2aeb9a12011-01-04 12:08:51 +02002173 hci_uuids_clear(hdev);
Johan Hedberg55ed8ca12011-01-17 14:41:05 +02002174 hci_link_keys_clear(hdev);
Vinicius Costa Gomesb899efa2012-02-02 21:08:00 -03002175 hci_smp_ltks_clear(hdev);
Szymon Janc2763eda2011-03-22 13:12:22 +01002176 hci_remote_oob_data_clear(hdev);
Gustavo F. Padovan09fd0de2011-06-17 13:03:21 -03002177 hci_dev_unlock(hdev);
Johan Hedberge2e0cac2011-01-04 12:08:50 +02002178
David Herrmanndc946bd2012-01-07 15:47:24 +01002179 hci_dev_put(hdev);
Sasha Levin3df92b32012-05-27 22:36:56 +02002180
2181 ida_simple_remove(&hci_index_ida, id);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182}
2183EXPORT_SYMBOL(hci_unregister_dev);
2184
2185/* Suspend HCI device */
2186int hci_suspend_dev(struct hci_dev *hdev)
2187{
2188 hci_notify(hdev, HCI_DEV_SUSPEND);
2189 return 0;
2190}
2191EXPORT_SYMBOL(hci_suspend_dev);
2192
2193/* Resume HCI device */
2194int hci_resume_dev(struct hci_dev *hdev)
2195{
2196 hci_notify(hdev, HCI_DEV_RESUME);
2197 return 0;
2198}
2199EXPORT_SYMBOL(hci_resume_dev);
2200
Marcel Holtmann76bca882009-11-18 00:40:39 +01002201/* Receive frame from HCI drivers */
2202int hci_recv_frame(struct sk_buff *skb)
2203{
2204 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2205 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002206 && !test_bit(HCI_INIT, &hdev->flags))) {
Marcel Holtmann76bca882009-11-18 00:40:39 +01002207 kfree_skb(skb);
2208 return -ENXIO;
2209 }
2210
Jorrit Schippersd82603c2012-12-27 17:33:02 +01002211 /* Incoming skb */
Marcel Holtmann76bca882009-11-18 00:40:39 +01002212 bt_cb(skb)->incoming = 1;
2213
2214 /* Time stamp */
2215 __net_timestamp(skb);
2216
Marcel Holtmann76bca882009-11-18 00:40:39 +01002217 skb_queue_tail(&hdev->rx_q, skb);
Marcel Holtmannb78752c2010-08-08 23:06:53 -04002218 queue_work(hdev->workqueue, &hdev->rx_work);
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002219
Marcel Holtmann76bca882009-11-18 00:40:39 +01002220 return 0;
2221}
2222EXPORT_SYMBOL(hci_recv_frame);
2223
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302224static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002225 int count, __u8 index)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302226{
2227 int len = 0;
2228 int hlen = 0;
2229 int remain = count;
2230 struct sk_buff *skb;
2231 struct bt_skb_cb *scb;
2232
2233 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002234 index >= NUM_REASSEMBLY)
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302235 return -EILSEQ;
2236
2237 skb = hdev->reassembly[index];
2238
2239 if (!skb) {
2240 switch (type) {
2241 case HCI_ACLDATA_PKT:
2242 len = HCI_MAX_FRAME_SIZE;
2243 hlen = HCI_ACL_HDR_SIZE;
2244 break;
2245 case HCI_EVENT_PKT:
2246 len = HCI_MAX_EVENT_SIZE;
2247 hlen = HCI_EVENT_HDR_SIZE;
2248 break;
2249 case HCI_SCODATA_PKT:
2250 len = HCI_MAX_SCO_SIZE;
2251 hlen = HCI_SCO_HDR_SIZE;
2252 break;
2253 }
2254
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002255 skb = bt_skb_alloc(len, GFP_ATOMIC);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302256 if (!skb)
2257 return -ENOMEM;
2258
2259 scb = (void *) skb->cb;
2260 scb->expect = hlen;
2261 scb->pkt_type = type;
2262
2263 skb->dev = (void *) hdev;
2264 hdev->reassembly[index] = skb;
2265 }
2266
2267 while (count) {
2268 scb = (void *) skb->cb;
Dan Carpenter89bb46d2012-02-28 09:57:59 +03002269 len = min_t(uint, scb->expect, count);
Suraj Sumangala33e882a2010-07-14 13:02:17 +05302270
2271 memcpy(skb_put(skb, len), data, len);
2272
2273 count -= len;
2274 data += len;
2275 scb->expect -= len;
2276 remain = count;
2277
2278 switch (type) {
2279 case HCI_EVENT_PKT:
2280 if (skb->len == HCI_EVENT_HDR_SIZE) {
2281 struct hci_event_hdr *h = hci_event_hdr(skb);
2282 scb->expect = h->plen;
2283
2284 if (skb_tailroom(skb) < scb->expect) {
2285 kfree_skb(skb);
2286 hdev->reassembly[index] = NULL;
2287 return -ENOMEM;
2288 }
2289 }
2290 break;
2291
2292 case HCI_ACLDATA_PKT:
2293 if (skb->len == HCI_ACL_HDR_SIZE) {
2294 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2295 scb->expect = __le16_to_cpu(h->dlen);
2296
2297 if (skb_tailroom(skb) < scb->expect) {
2298 kfree_skb(skb);
2299 hdev->reassembly[index] = NULL;
2300 return -ENOMEM;
2301 }
2302 }
2303 break;
2304
2305 case HCI_SCODATA_PKT:
2306 if (skb->len == HCI_SCO_HDR_SIZE) {
2307 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2308 scb->expect = h->dlen;
2309
2310 if (skb_tailroom(skb) < scb->expect) {
2311 kfree_skb(skb);
2312 hdev->reassembly[index] = NULL;
2313 return -ENOMEM;
2314 }
2315 }
2316 break;
2317 }
2318
2319 if (scb->expect == 0) {
2320 /* Complete frame */
2321
2322 bt_cb(skb)->pkt_type = type;
2323 hci_recv_frame(skb);
2324
2325 hdev->reassembly[index] = NULL;
2326 return remain;
2327 }
2328 }
2329
2330 return remain;
2331}
2332
Marcel Holtmannef222012007-07-11 06:42:04 +02002333int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2334{
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302335 int rem = 0;
2336
Marcel Holtmannef222012007-07-11 06:42:04 +02002337 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2338 return -EILSEQ;
2339
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002340 while (count) {
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002341 rem = hci_reassembly(hdev, type, data, count, type - 1);
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302342 if (rem < 0)
2343 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002344
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302345 data += (count - rem);
2346 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002347 }
Marcel Holtmannef222012007-07-11 06:42:04 +02002348
Suraj Sumangalaf39a3c02010-07-14 13:02:18 +05302349 return rem;
Marcel Holtmannef222012007-07-11 06:42:04 +02002350}
2351EXPORT_SYMBOL(hci_recv_fragment);
2352
Suraj Sumangala99811512010-07-14 13:02:19 +05302353#define STREAM_REASSEMBLY 0
2354
2355int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2356{
2357 int type;
2358 int rem = 0;
2359
Gustavo F. Padovanda5f6c32010-07-24 01:34:54 -03002360 while (count) {
Suraj Sumangala99811512010-07-14 13:02:19 +05302361 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2362
2363 if (!skb) {
2364 struct { char type; } *pkt;
2365
2366 /* Start of the frame */
2367 pkt = data;
2368 type = pkt->type;
2369
2370 data++;
2371 count--;
2372 } else
2373 type = bt_cb(skb)->pkt_type;
2374
Gustavo F. Padovan1e429f32011-04-04 18:25:14 -03002375 rem = hci_reassembly(hdev, type, data, count,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002376 STREAM_REASSEMBLY);
Suraj Sumangala99811512010-07-14 13:02:19 +05302377 if (rem < 0)
2378 return rem;
2379
2380 data += (count - rem);
2381 count = rem;
Joe Perchesf81c6222011-06-03 11:51:19 +00002382 }
Suraj Sumangala99811512010-07-14 13:02:19 +05302383
2384 return rem;
2385}
2386EXPORT_SYMBOL(hci_recv_stream_fragment);
2387
Linus Torvalds1da177e2005-04-16 15:20:36 -07002388/* ---- Interface to upper protocols ---- */
2389
Linus Torvalds1da177e2005-04-16 15:20:36 -07002390int hci_register_cb(struct hci_cb *cb)
2391{
2392 BT_DBG("%p name %s", cb, cb->name);
2393
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002394 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002395 list_add(&cb->list, &hci_cb_list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002396 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002397
2398 return 0;
2399}
2400EXPORT_SYMBOL(hci_register_cb);
2401
2402int hci_unregister_cb(struct hci_cb *cb)
2403{
2404 BT_DBG("%p name %s", cb, cb->name);
2405
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002406 write_lock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002407 list_del(&cb->list);
Gustavo F. Padovanf20d09d2011-12-22 16:30:27 -02002408 write_unlock(&hci_cb_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002409
2410 return 0;
2411}
2412EXPORT_SYMBOL(hci_unregister_cb);
2413
2414static int hci_send_frame(struct sk_buff *skb)
2415{
2416 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2417
2418 if (!hdev) {
2419 kfree_skb(skb);
2420 return -ENODEV;
2421 }
2422
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002423 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002424
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002425 /* Time stamp */
2426 __net_timestamp(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002427
Marcel Holtmanncd82e612012-02-20 20:34:38 +01002428 /* Send copy to monitor */
2429 hci_send_to_monitor(hdev, skb);
2430
2431 if (atomic_read(&hdev->promisc)) {
2432 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01002433 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002434 }
2435
2436 /* Get rid of skb owner, prior to sending to the driver. */
2437 skb_orphan(skb);
2438
2439 return hdev->send(skb);
2440}
2441
Johan Hedberg3119ae92013-03-05 20:37:44 +02002442void hci_req_init(struct hci_request *req, struct hci_dev *hdev)
2443{
2444 skb_queue_head_init(&req->cmd_q);
2445 req->hdev = hdev;
Andre Guedes5d73e032013-03-08 11:20:16 -03002446 req->err = 0;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002447}
2448
2449int hci_req_run(struct hci_request *req, hci_req_complete_t complete)
2450{
2451 struct hci_dev *hdev = req->hdev;
2452 struct sk_buff *skb;
2453 unsigned long flags;
2454
2455 BT_DBG("length %u", skb_queue_len(&req->cmd_q));
2456
Andre Guedes5d73e032013-03-08 11:20:16 -03002457 /* If an error occured during request building, remove all HCI
2458 * commands queued on the HCI request queue.
2459 */
2460 if (req->err) {
2461 skb_queue_purge(&req->cmd_q);
2462 return req->err;
2463 }
2464
Johan Hedberg3119ae92013-03-05 20:37:44 +02002465 /* Do not allow empty requests */
2466 if (skb_queue_empty(&req->cmd_q))
Andre Guedes382b0c32013-03-08 11:20:14 -03002467 return -ENODATA;
Johan Hedberg3119ae92013-03-05 20:37:44 +02002468
2469 skb = skb_peek_tail(&req->cmd_q);
2470 bt_cb(skb)->req.complete = complete;
2471
2472 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
2473 skb_queue_splice_tail(&req->cmd_q, &hdev->cmd_q);
2474 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
2475
2476 queue_work(hdev->workqueue, &hdev->cmd_work);
2477
2478 return 0;
2479}
2480
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002481static struct sk_buff *hci_prepare_cmd(struct hci_dev *hdev, u16 opcode,
2482 u32 plen, void *param)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002483{
2484 int len = HCI_COMMAND_HDR_SIZE + plen;
2485 struct hci_command_hdr *hdr;
2486 struct sk_buff *skb;
2487
Linus Torvalds1da177e2005-04-16 15:20:36 -07002488 skb = bt_skb_alloc(len, GFP_ATOMIC);
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002489 if (!skb)
2490 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002491
2492 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002493 hdr->opcode = cpu_to_le16(opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002494 hdr->plen = plen;
2495
2496 if (plen)
2497 memcpy(skb_put(skb, plen), param, plen);
2498
2499 BT_DBG("skb len %d", skb->len);
2500
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002501 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002502 skb->dev = (void *) hdev;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002503
Johan Hedberg1ca3a9d2013-03-05 20:37:45 +02002504 return skb;
2505}
2506
2507/* Send HCI command */
2508int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2509{
2510 struct sk_buff *skb;
2511
2512 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2513
2514 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2515 if (!skb) {
2516 BT_ERR("%s no memory for command", hdev->name);
2517 return -ENOMEM;
2518 }
2519
Johan Hedberg11714b32013-03-05 20:37:47 +02002520 /* Stand-alone HCI commands must be flaged as
2521 * single-command requests.
2522 */
2523 bt_cb(skb)->req.start = true;
2524
Linus Torvalds1da177e2005-04-16 15:20:36 -07002525 skb_queue_tail(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02002526 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002527
2528 return 0;
2529}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002530
Johan Hedberg71c76a12013-03-05 20:37:46 +02002531/* Queue a command to an asynchronous HCI request */
2532int hci_req_add(struct hci_request *req, u16 opcode, u32 plen, void *param)
2533{
2534 struct hci_dev *hdev = req->hdev;
2535 struct sk_buff *skb;
2536
2537 BT_DBG("%s opcode 0x%4.4x plen %d", hdev->name, opcode, plen);
2538
2539 skb = hci_prepare_cmd(hdev, opcode, plen, param);
2540 if (!skb) {
Andre Guedes5d73e032013-03-08 11:20:16 -03002541 BT_ERR("%s no memory for command (opcode 0x%4.4x)",
2542 hdev->name, opcode);
2543 req->err = -ENOMEM;
Johan Hedberg71c76a12013-03-05 20:37:46 +02002544 return -ENOMEM;
2545 }
2546
2547 if (skb_queue_empty(&req->cmd_q))
2548 bt_cb(skb)->req.start = true;
2549
2550 skb_queue_tail(&req->cmd_q, skb);
2551
2552 return 0;
2553}
2554
Linus Torvalds1da177e2005-04-16 15:20:36 -07002555/* Get data from the previously sent command */
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002556void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002557{
2558 struct hci_command_hdr *hdr;
2559
2560 if (!hdev->sent_cmd)
2561 return NULL;
2562
2563 hdr = (void *) hdev->sent_cmd->data;
2564
Marcel Holtmanna9de9242007-10-20 13:33:56 +02002565 if (hdr->opcode != cpu_to_le16(opcode))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002566 return NULL;
2567
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002568 BT_DBG("%s opcode 0x%4.4x", hdev->name, opcode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002569
2570 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2571}
2572
2573/* Send ACL data */
2574static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2575{
2576 struct hci_acl_hdr *hdr;
2577 int len = skb->len;
2578
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002579 skb_push(skb, HCI_ACL_HDR_SIZE);
2580 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002581 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002582 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2583 hdr->dlen = cpu_to_le16(len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002584}
2585
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002586static void hci_queue_acl(struct hci_chan *chan, struct sk_buff_head *queue,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002587 struct sk_buff *skb, __u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002588{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002589 struct hci_conn *conn = chan->conn;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002590 struct hci_dev *hdev = conn->hdev;
2591 struct sk_buff *list;
2592
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002593 skb->len = skb_headlen(skb);
2594 skb->data_len = 0;
2595
2596 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenko204a6e52012-10-15 11:58:39 +03002597
2598 switch (hdev->dev_type) {
2599 case HCI_BREDR:
2600 hci_add_acl_hdr(skb, conn->handle, flags);
2601 break;
2602 case HCI_AMP:
2603 hci_add_acl_hdr(skb, chan->handle, flags);
2604 break;
2605 default:
2606 BT_ERR("%s unknown dev_type %d", hdev->name, hdev->dev_type);
2607 return;
2608 }
Gustavo Padovan087bfd92012-05-11 13:16:11 -03002609
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02002610 list = skb_shinfo(skb)->frag_list;
2611 if (!list) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002612 /* Non fragmented */
2613 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2614
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002615 skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002616 } else {
2617 /* Fragmented */
2618 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2619
2620 skb_shinfo(skb)->frag_list = NULL;
2621
2622 /* Queue all fragments atomically */
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002623 spin_lock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002624
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002625 __skb_queue_tail(queue, skb);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002626
2627 flags &= ~ACL_START;
2628 flags |= ACL_CONT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002629 do {
2630 skb = list; list = list->next;
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002631
Linus Torvalds1da177e2005-04-16 15:20:36 -07002632 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002633 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +02002634 hci_add_acl_hdr(skb, conn->handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002635
2636 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2637
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002638 __skb_queue_tail(queue, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002639 } while (list);
2640
Gustavo F. Padovanaf3e6352011-12-22 16:35:05 -02002641 spin_unlock(&queue->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002642 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002643}
2644
2645void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2646{
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002647 struct hci_dev *hdev = chan->conn->hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002648
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03002649 BT_DBG("%s chan %p flags 0x%4.4x", hdev->name, chan, flags);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002650
2651 skb->dev = (void *) hdev;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002652
Andrei Emeltchenkoee22be72012-09-21 12:30:04 +03002653 hci_queue_acl(chan, &chan->data_q, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002654
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002655 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002656}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002657
2658/* Send SCO data */
Gustavo F. Padovan0d861d82010-05-01 16:15:35 -03002659void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002660{
2661 struct hci_dev *hdev = conn->hdev;
2662 struct hci_sco_hdr hdr;
2663
2664 BT_DBG("%s len %d", hdev->name, skb->len);
2665
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002666 hdr.handle = cpu_to_le16(conn->handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002667 hdr.dlen = skb->len;
2668
Arnaldo Carvalho de Melobadff6d2007-03-13 13:06:52 -03002669 skb_push(skb, HCI_SCO_HDR_SIZE);
2670 skb_reset_transport_header(skb);
Arnaldo Carvalho de Melo9c702202007-04-25 18:04:18 -07002671 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002672
2673 skb->dev = (void *) hdev;
Marcel Holtmann0d48d932005-08-09 20:30:28 -07002674 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
Marcel Holtmannc78ae282009-11-18 01:02:54 +01002675
Linus Torvalds1da177e2005-04-16 15:20:36 -07002676 skb_queue_tail(&conn->data_q, skb);
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02002677 queue_work(hdev->workqueue, &hdev->tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002679
2680/* ---- HCI TX task (outgoing data) ---- */
2681
2682/* HCI Connection scheduler */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002683static struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type,
2684 int *quote)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002685{
2686 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002687 struct hci_conn *conn = NULL, *c;
Mikel Astizabc5de82012-04-11 08:48:47 +02002688 unsigned int num = 0, min = ~0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09002690 /* We don't have to lock device here. Connections are always
Linus Torvalds1da177e2005-04-16 15:20:36 -07002691 * added and removed with TX task disabled. */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002692
2693 rcu_read_lock();
2694
2695 list_for_each_entry_rcu(c, &h->list, list) {
Marcel Holtmann769be972008-07-14 20:13:49 +02002696 if (c->type != type || skb_queue_empty(&c->data_q))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 continue;
Marcel Holtmann769be972008-07-14 20:13:49 +02002698
2699 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2700 continue;
2701
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 num++;
2703
2704 if (c->sent < min) {
2705 min = c->sent;
2706 conn = c;
2707 }
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03002708
2709 if (hci_conn_num(hdev, type) == num)
2710 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002711 }
2712
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002713 rcu_read_unlock();
2714
Linus Torvalds1da177e2005-04-16 15:20:36 -07002715 if (conn) {
Ville Tervo6ed58ec2011-02-10 22:38:48 -03002716 int cnt, q;
2717
2718 switch (conn->type) {
2719 case ACL_LINK:
2720 cnt = hdev->acl_cnt;
2721 break;
2722 case SCO_LINK:
2723 case ESCO_LINK:
2724 cnt = hdev->sco_cnt;
2725 break;
2726 case LE_LINK:
2727 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2728 break;
2729 default:
2730 cnt = 0;
2731 BT_ERR("Unknown link type");
2732 }
2733
2734 q = cnt / num;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002735 *quote = q ? q : 1;
2736 } else
2737 *quote = 0;
2738
2739 BT_DBG("conn %p quote %d", conn, *quote);
2740 return conn;
2741}
2742
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002743static void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002744{
2745 struct hci_conn_hash *h = &hdev->conn_hash;
Luiz Augusto von Dentz8035ded2011-11-01 10:58:56 +02002746 struct hci_conn *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
Ville Tervobae1f5d92011-02-10 22:38:53 -03002748 BT_ERR("%s link tx timeout", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002750 rcu_read_lock();
2751
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752 /* Kill stalled connections */
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002753 list_for_each_entry_rcu(c, &h->list, list) {
Ville Tervobae1f5d92011-02-10 22:38:53 -03002754 if (c->type == type && c->sent) {
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03002755 BT_ERR("%s killing stalled connection %pMR",
2756 hdev->name, &c->dst);
Andre Guedesbed71742013-01-30 11:50:56 -03002757 hci_disconnect(c, HCI_ERROR_REMOTE_USER_TERM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002758 }
2759 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002760
2761 rcu_read_unlock();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002762}
2763
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002764static struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2765 int *quote)
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002766{
2767 struct hci_conn_hash *h = &hdev->conn_hash;
2768 struct hci_chan *chan = NULL;
Mikel Astizabc5de82012-04-11 08:48:47 +02002769 unsigned int num = 0, min = ~0, cur_prio = 0;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002770 struct hci_conn *conn;
2771 int cnt, q, conn_num = 0;
2772
2773 BT_DBG("%s", hdev->name);
2774
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002775 rcu_read_lock();
2776
2777 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002778 struct hci_chan *tmp;
2779
2780 if (conn->type != type)
2781 continue;
2782
2783 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2784 continue;
2785
2786 conn_num++;
2787
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002788 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002789 struct sk_buff *skb;
2790
2791 if (skb_queue_empty(&tmp->data_q))
2792 continue;
2793
2794 skb = skb_peek(&tmp->data_q);
2795 if (skb->priority < cur_prio)
2796 continue;
2797
2798 if (skb->priority > cur_prio) {
2799 num = 0;
2800 min = ~0;
2801 cur_prio = skb->priority;
2802 }
2803
2804 num++;
2805
2806 if (conn->sent < min) {
2807 min = conn->sent;
2808 chan = tmp;
2809 }
2810 }
2811
2812 if (hci_conn_num(hdev, type) == conn_num)
2813 break;
2814 }
2815
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002816 rcu_read_unlock();
2817
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002818 if (!chan)
2819 return NULL;
2820
2821 switch (chan->conn->type) {
2822 case ACL_LINK:
2823 cnt = hdev->acl_cnt;
2824 break;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002825 case AMP_LINK:
2826 cnt = hdev->block_cnt;
2827 break;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002828 case SCO_LINK:
2829 case ESCO_LINK:
2830 cnt = hdev->sco_cnt;
2831 break;
2832 case LE_LINK:
2833 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2834 break;
2835 default:
2836 cnt = 0;
2837 BT_ERR("Unknown link type");
2838 }
2839
2840 q = cnt / num;
2841 *quote = q ? q : 1;
2842 BT_DBG("chan %p quote %d", chan, *quote);
2843 return chan;
2844}
2845
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002846static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2847{
2848 struct hci_conn_hash *h = &hdev->conn_hash;
2849 struct hci_conn *conn;
2850 int num = 0;
2851
2852 BT_DBG("%s", hdev->name);
2853
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002854 rcu_read_lock();
2855
2856 list_for_each_entry_rcu(conn, &h->list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002857 struct hci_chan *chan;
2858
2859 if (conn->type != type)
2860 continue;
2861
2862 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2863 continue;
2864
2865 num++;
2866
Gustavo F. Padovan8192ede2011-12-14 15:08:48 -02002867 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002868 struct sk_buff *skb;
2869
2870 if (chan->sent) {
2871 chan->sent = 0;
2872 continue;
2873 }
2874
2875 if (skb_queue_empty(&chan->data_q))
2876 continue;
2877
2878 skb = skb_peek(&chan->data_q);
2879 if (skb->priority >= HCI_PRIO_MAX - 1)
2880 continue;
2881
2882 skb->priority = HCI_PRIO_MAX - 1;
2883
2884 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002885 skb->priority);
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002886 }
2887
2888 if (hci_conn_num(hdev, type) == num)
2889 break;
2890 }
Gustavo F. Padovanbf4c6322011-12-14 22:54:12 -02002891
2892 rcu_read_unlock();
2893
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002894}
2895
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002896static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2897{
2898 /* Calculate count of blocks used by this packet */
2899 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2900}
2901
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002902static void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002903{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002904 if (!test_bit(HCI_RAW, &hdev->flags)) {
2905 /* ACL tx timeout must be longer than maximum
2906 * link supervision timeout (40.9 seconds) */
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002907 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03002908 HCI_ACL_TX_TIMEOUT))
Ville Tervobae1f5d92011-02-10 22:38:53 -03002909 hci_link_tx_to(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002910 }
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002911}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002912
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002913static void hci_sched_acl_pkt(struct hci_dev *hdev)
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002914{
2915 unsigned int cnt = hdev->acl_cnt;
2916 struct hci_chan *chan;
2917 struct sk_buff *skb;
2918 int quote;
2919
2920 __check_timeout(hdev, cnt);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002921
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002922 while (hdev->acl_cnt &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002923 (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002924 u32 priority = (skb_peek(&chan->data_q))->priority;
2925 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002926 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002927 skb->len, skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002928
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02002929 /* Stop if priority has changed */
2930 if (skb->priority < priority)
2931 break;
2932
2933 skb = skb_dequeue(&chan->data_q);
2934
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002935 hci_conn_enter_active_mode(chan->conn,
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002936 bt_cb(skb)->force_active);
Marcel Holtmann04837f62006-07-03 10:02:33 +02002937
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 hci_send_frame(skb);
2939 hdev->acl_last_tx = jiffies;
2940
2941 hdev->acl_cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02002942 chan->sent++;
2943 chan->conn->sent++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002944 }
2945 }
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02002946
2947 if (cnt != hdev->acl_cnt)
2948 hci_prio_recalculate(hdev, ACL_LINK);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949}
2950
Gustavo Padovan6039aa732012-05-23 04:04:18 -03002951static void hci_sched_acl_blk(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002952{
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002953 unsigned int cnt = hdev->block_cnt;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002954 struct hci_chan *chan;
2955 struct sk_buff *skb;
2956 int quote;
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002957 u8 type;
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002958
Andrei Emeltchenko63d2bc12012-02-03 16:27:55 +02002959 __check_timeout(hdev, cnt);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002960
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002961 BT_DBG("%s", hdev->name);
2962
2963 if (hdev->dev_type == HCI_AMP)
2964 type = AMP_LINK;
2965 else
2966 type = ACL_LINK;
2967
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002968 while (hdev->block_cnt > 0 &&
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03002969 (chan = hci_chan_sent(hdev, type, &quote))) {
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002970 u32 priority = (skb_peek(&chan->data_q))->priority;
2971 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2972 int blocks;
2973
2974 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002975 skb->len, skb->priority);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002976
2977 /* Stop if priority has changed */
2978 if (skb->priority < priority)
2979 break;
2980
2981 skb = skb_dequeue(&chan->data_q);
2982
2983 blocks = __get_blocks(hdev, skb);
2984 if (blocks > hdev->block_cnt)
2985 return;
2986
2987 hci_conn_enter_active_mode(chan->conn,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03002988 bt_cb(skb)->force_active);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02002989
2990 hci_send_frame(skb);
2991 hdev->acl_last_tx = jiffies;
2992
2993 hdev->block_cnt -= blocks;
2994 quote -= blocks;
2995
2996 chan->sent += blocks;
2997 chan->conn->sent += blocks;
2998 }
2999 }
3000
3001 if (cnt != hdev->block_cnt)
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003002 hci_prio_recalculate(hdev, type);
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003003}
3004
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003005static void hci_sched_acl(struct hci_dev *hdev)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003006{
3007 BT_DBG("%s", hdev->name);
3008
Andrei Emeltchenkobd1eb662012-10-10 17:38:30 +03003009 /* No ACL link over BR/EDR controller */
3010 if (!hci_conn_num(hdev, ACL_LINK) && hdev->dev_type == HCI_BREDR)
3011 return;
3012
3013 /* No AMP link over AMP controller */
3014 if (!hci_conn_num(hdev, AMP_LINK) && hdev->dev_type == HCI_AMP)
Andrei Emeltchenkob71d3852012-02-03 16:27:54 +02003015 return;
3016
3017 switch (hdev->flow_ctl_mode) {
3018 case HCI_FLOW_CTL_MODE_PACKET_BASED:
3019 hci_sched_acl_pkt(hdev);
3020 break;
3021
3022 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
3023 hci_sched_acl_blk(hdev);
3024 break;
3025 }
3026}
3027
Linus Torvalds1da177e2005-04-16 15:20:36 -07003028/* Schedule SCO */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003029static void hci_sched_sco(struct hci_dev *hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003030{
3031 struct hci_conn *conn;
3032 struct sk_buff *skb;
3033 int quote;
3034
3035 BT_DBG("%s", hdev->name);
3036
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003037 if (!hci_conn_num(hdev, SCO_LINK))
3038 return;
3039
Linus Torvalds1da177e2005-04-16 15:20:36 -07003040 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
3041 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3042 BT_DBG("skb %p len %d", skb, skb->len);
3043 hci_send_frame(skb);
3044
3045 conn->sent++;
3046 if (conn->sent == ~0)
3047 conn->sent = 0;
3048 }
3049 }
3050}
3051
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003052static void hci_sched_esco(struct hci_dev *hdev)
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003053{
3054 struct hci_conn *conn;
3055 struct sk_buff *skb;
3056 int quote;
3057
3058 BT_DBG("%s", hdev->name);
3059
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003060 if (!hci_conn_num(hdev, ESCO_LINK))
3061 return;
3062
Gustavo Padovan8fc9ced2012-05-23 04:04:21 -03003063 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK,
3064 &quote))) {
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003065 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
3066 BT_DBG("skb %p len %d", skb, skb->len);
3067 hci_send_frame(skb);
3068
3069 conn->sent++;
3070 if (conn->sent == ~0)
3071 conn->sent = 0;
3072 }
3073 }
3074}
3075
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003076static void hci_sched_le(struct hci_dev *hdev)
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003077{
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003078 struct hci_chan *chan;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003079 struct sk_buff *skb;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003080 int quote, cnt, tmp;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003081
3082 BT_DBG("%s", hdev->name);
3083
Luiz Augusto von Dentz52087a72011-08-17 16:23:00 +03003084 if (!hci_conn_num(hdev, LE_LINK))
3085 return;
3086
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003087 if (!test_bit(HCI_RAW, &hdev->flags)) {
3088 /* LE tx timeout must be longer than maximum
3089 * link supervision timeout (40.9 seconds) */
Ville Tervobae1f5d92011-02-10 22:38:53 -03003090 if (!hdev->le_cnt && hdev->le_pkts &&
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003091 time_after(jiffies, hdev->le_last_tx + HZ * 45))
Ville Tervobae1f5d92011-02-10 22:38:53 -03003092 hci_link_tx_to(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003093 }
3094
3095 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003096 tmp = cnt;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003097 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003098 u32 priority = (skb_peek(&chan->data_q))->priority;
3099 while (quote-- && (skb = skb_peek(&chan->data_q))) {
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003100 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003101 skb->len, skb->priority);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003102
Luiz Augusto von Dentzec1cce22011-11-02 15:52:02 +02003103 /* Stop if priority has changed */
3104 if (skb->priority < priority)
3105 break;
3106
3107 skb = skb_dequeue(&chan->data_q);
3108
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003109 hci_send_frame(skb);
3110 hdev->le_last_tx = jiffies;
3111
3112 cnt--;
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003113 chan->sent++;
3114 chan->conn->sent++;
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003115 }
3116 }
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02003117
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003118 if (hdev->le_pkts)
3119 hdev->le_cnt = cnt;
3120 else
3121 hdev->acl_cnt = cnt;
Luiz Augusto von Dentz02b20f02011-11-02 15:52:03 +02003122
3123 if (cnt != tmp)
3124 hci_prio_recalculate(hdev, LE_LINK);
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003125}
3126
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003127static void hci_tx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003128{
Gustavo F. Padovan3eff45e2011-12-15 00:50:02 -02003129 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003130 struct sk_buff *skb;
3131
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003132 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003133 hdev->sco_cnt, hdev->le_cnt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003134
3135 /* Schedule queues and send stuff to HCI driver */
3136
3137 hci_sched_acl(hdev);
3138
3139 hci_sched_sco(hdev);
3140
Marcel Holtmannb6a0dc82007-10-20 14:55:10 +02003141 hci_sched_esco(hdev);
3142
Ville Tervo6ed58ec2011-02-10 22:38:48 -03003143 hci_sched_le(hdev);
3144
Linus Torvalds1da177e2005-04-16 15:20:36 -07003145 /* Send next queued raw (unknown type) packet */
3146 while ((skb = skb_dequeue(&hdev->raw_q)))
3147 hci_send_frame(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003148}
3149
Lucas De Marchi25985ed2011-03-30 22:57:33 -03003150/* ----- HCI RX task (incoming data processing) ----- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
3152/* ACL data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003153static void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154{
3155 struct hci_acl_hdr *hdr = (void *) skb->data;
3156 struct hci_conn *conn;
3157 __u16 handle, flags;
3158
3159 skb_pull(skb, HCI_ACL_HDR_SIZE);
3160
3161 handle = __le16_to_cpu(hdr->handle);
3162 flags = hci_flags(handle);
3163 handle = hci_handle(handle);
3164
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003165 BT_DBG("%s len %d handle 0x%4.4x flags 0x%4.4x", hdev->name, skb->len,
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003166 handle, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003167
3168 hdev->stat.acl_rx++;
3169
3170 hci_dev_lock(hdev);
3171 conn = hci_conn_hash_lookup_handle(hdev, handle);
3172 hci_dev_unlock(hdev);
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003173
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174 if (conn) {
Mat Martineau65983fc2011-12-13 15:06:02 -08003175 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
Marcel Holtmann04837f62006-07-03 10:02:33 +02003176
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003178 l2cap_recv_acldata(conn, skb, flags);
3179 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003180 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003181 BT_ERR("%s ACL packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003182 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003183 }
3184
3185 kfree_skb(skb);
3186}
3187
3188/* SCO data packet */
Gustavo Padovan6039aa732012-05-23 04:04:18 -03003189static void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003190{
3191 struct hci_sco_hdr *hdr = (void *) skb->data;
3192 struct hci_conn *conn;
3193 __u16 handle;
3194
3195 skb_pull(skb, HCI_SCO_HDR_SIZE);
3196
3197 handle = __le16_to_cpu(hdr->handle);
3198
Andrei Emeltchenkof0e09512012-06-11 11:13:09 +03003199 BT_DBG("%s len %d handle 0x%4.4x", hdev->name, skb->len, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003200
3201 hdev->stat.sco_rx++;
3202
3203 hci_dev_lock(hdev);
3204 conn = hci_conn_hash_lookup_handle(hdev, handle);
3205 hci_dev_unlock(hdev);
3206
3207 if (conn) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003208 /* Send to upper protocol */
Ulisses Furquim686ebf22011-12-21 10:11:33 -02003209 sco_recv_scodata(conn, skb);
3210 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211 } else {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003212 BT_ERR("%s SCO packet for unknown connection handle %d",
Gustavo Padovana8c5fb12012-05-17 00:36:26 -03003213 hdev->name, handle);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 }
3215
3216 kfree_skb(skb);
3217}
3218
Johan Hedberg9238f362013-03-05 20:37:48 +02003219static bool hci_req_is_complete(struct hci_dev *hdev)
3220{
3221 struct sk_buff *skb;
3222
3223 skb = skb_peek(&hdev->cmd_q);
3224 if (!skb)
3225 return true;
3226
3227 return bt_cb(skb)->req.start;
3228}
3229
Johan Hedberg42c6b122013-03-05 20:37:49 +02003230static void hci_resend_last(struct hci_dev *hdev)
3231{
3232 struct hci_command_hdr *sent;
3233 struct sk_buff *skb;
3234 u16 opcode;
3235
3236 if (!hdev->sent_cmd)
3237 return;
3238
3239 sent = (void *) hdev->sent_cmd->data;
3240 opcode = __le16_to_cpu(sent->opcode);
3241 if (opcode == HCI_OP_RESET)
3242 return;
3243
3244 skb = skb_clone(hdev->sent_cmd, GFP_KERNEL);
3245 if (!skb)
3246 return;
3247
3248 skb_queue_head(&hdev->cmd_q, skb);
3249 queue_work(hdev->workqueue, &hdev->cmd_work);
3250}
3251
Johan Hedberg9238f362013-03-05 20:37:48 +02003252void hci_req_cmd_complete(struct hci_dev *hdev, u16 opcode, u8 status)
3253{
3254 hci_req_complete_t req_complete = NULL;
3255 struct sk_buff *skb;
3256 unsigned long flags;
3257
3258 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3259
Johan Hedberg42c6b122013-03-05 20:37:49 +02003260 /* If the completed command doesn't match the last one that was
3261 * sent we need to do special handling of it.
Johan Hedberg9238f362013-03-05 20:37:48 +02003262 */
Johan Hedberg42c6b122013-03-05 20:37:49 +02003263 if (!hci_sent_cmd_data(hdev, opcode)) {
3264 /* Some CSR based controllers generate a spontaneous
3265 * reset complete event during init and any pending
3266 * command will never be completed. In such a case we
3267 * need to resend whatever was the last sent
3268 * command.
3269 */
3270 if (test_bit(HCI_INIT, &hdev->flags) && opcode == HCI_OP_RESET)
3271 hci_resend_last(hdev);
3272
Johan Hedberg9238f362013-03-05 20:37:48 +02003273 return;
Johan Hedberg42c6b122013-03-05 20:37:49 +02003274 }
Johan Hedberg9238f362013-03-05 20:37:48 +02003275
3276 /* If the command succeeded and there's still more commands in
3277 * this request the request is not yet complete.
3278 */
3279 if (!status && !hci_req_is_complete(hdev))
3280 return;
3281
3282 /* If this was the last command in a request the complete
3283 * callback would be found in hdev->sent_cmd instead of the
3284 * command queue (hdev->cmd_q).
3285 */
3286 if (hdev->sent_cmd) {
3287 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3288 if (req_complete)
3289 goto call_complete;
3290 }
3291
3292 /* Remove all pending commands belonging to this request */
3293 spin_lock_irqsave(&hdev->cmd_q.lock, flags);
3294 while ((skb = __skb_dequeue(&hdev->cmd_q))) {
3295 if (bt_cb(skb)->req.start) {
3296 __skb_queue_head(&hdev->cmd_q, skb);
3297 break;
3298 }
3299
3300 req_complete = bt_cb(skb)->req.complete;
3301 kfree_skb(skb);
3302 }
3303 spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
3304
3305call_complete:
3306 if (req_complete)
3307 req_complete(hdev, status);
3308}
3309
3310void hci_req_cmd_status(struct hci_dev *hdev, u16 opcode, u8 status)
3311{
3312 hci_req_complete_t req_complete = NULL;
3313
3314 BT_DBG("opcode 0x%04x status 0x%02x", opcode, status);
3315
3316 if (status) {
3317 hci_req_cmd_complete(hdev, opcode, status);
3318 return;
3319 }
3320
3321 /* No need to handle success status if there are more commands */
3322 if (!hci_req_is_complete(hdev))
3323 return;
3324
3325 if (hdev->sent_cmd)
3326 req_complete = bt_cb(hdev->sent_cmd)->req.complete;
3327
3328 /* If the request doesn't have a complete callback or there
3329 * are other commands/requests in the hdev queue we consider
3330 * this request as completed.
3331 */
3332 if (!req_complete || !skb_queue_empty(&hdev->cmd_q))
3333 hci_req_cmd_complete(hdev, opcode, status);
3334}
3335
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003336static void hci_rx_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003337{
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003338 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003339 struct sk_buff *skb;
3340
3341 BT_DBG("%s", hdev->name);
3342
Linus Torvalds1da177e2005-04-16 15:20:36 -07003343 while ((skb = skb_dequeue(&hdev->rx_q))) {
Marcel Holtmanncd82e612012-02-20 20:34:38 +01003344 /* Send copy to monitor */
3345 hci_send_to_monitor(hdev, skb);
3346
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347 if (atomic_read(&hdev->promisc)) {
3348 /* Send copy to the sockets */
Marcel Holtmann470fe1b2012-02-20 14:50:30 +01003349 hci_send_to_sock(hdev, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003350 }
3351
3352 if (test_bit(HCI_RAW, &hdev->flags)) {
3353 kfree_skb(skb);
3354 continue;
3355 }
3356
3357 if (test_bit(HCI_INIT, &hdev->flags)) {
3358 /* Don't process data packets in this states. */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003359 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003360 case HCI_ACLDATA_PKT:
3361 case HCI_SCODATA_PKT:
3362 kfree_skb(skb);
3363 continue;
Stephen Hemminger3ff50b72007-04-20 17:09:22 -07003364 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003365 }
3366
3367 /* Process frame */
Marcel Holtmann0d48d932005-08-09 20:30:28 -07003368 switch (bt_cb(skb)->pkt_type) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003369 case HCI_EVENT_PKT:
Marcel Holtmannb78752c2010-08-08 23:06:53 -04003370 BT_DBG("%s Event packet", hdev->name);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003371 hci_event_packet(hdev, skb);
3372 break;
3373
3374 case HCI_ACLDATA_PKT:
3375 BT_DBG("%s ACL data packet", hdev->name);
3376 hci_acldata_packet(hdev, skb);
3377 break;
3378
3379 case HCI_SCODATA_PKT:
3380 BT_DBG("%s SCO data packet", hdev->name);
3381 hci_scodata_packet(hdev, skb);
3382 break;
3383
3384 default:
3385 kfree_skb(skb);
3386 break;
3387 }
3388 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003389}
3390
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003391static void hci_cmd_work(struct work_struct *work)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003392{
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003393 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003394 struct sk_buff *skb;
3395
Andrei Emeltchenko21047862012-07-10 15:27:47 +03003396 BT_DBG("%s cmd_cnt %d cmd queued %d", hdev->name,
3397 atomic_read(&hdev->cmd_cnt), skb_queue_len(&hdev->cmd_q));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003398
Linus Torvalds1da177e2005-04-16 15:20:36 -07003399 /* Send queued commands */
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02003400 if (atomic_read(&hdev->cmd_cnt)) {
3401 skb = skb_dequeue(&hdev->cmd_q);
3402 if (!skb)
3403 return;
3404
Wei Yongjun7585b972009-02-25 18:29:52 +08003405 kfree_skb(hdev->sent_cmd);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003406
Andrei Emeltchenko70f230202010-12-01 16:58:25 +02003407 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
3408 if (hdev->sent_cmd) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003409 atomic_dec(&hdev->cmd_cnt);
3410 hci_send_frame(skb);
Szymon Janc7bdb8a52011-07-26 22:46:54 +02003411 if (test_bit(HCI_RESET, &hdev->flags))
3412 del_timer(&hdev->cmd_timer);
3413 else
3414 mod_timer(&hdev->cmd_timer,
Andrei Emeltchenko5f246e82012-06-11 11:13:07 +03003415 jiffies + HCI_CMD_TIMEOUT);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003416 } else {
3417 skb_queue_head(&hdev->cmd_q, skb);
Gustavo F. Padovanc347b762011-12-14 23:53:47 -02003418 queue_work(hdev->workqueue, &hdev->cmd_work);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003419 }
3420 }
3421}
Andre Guedes2519a1f2011-11-07 11:45:24 -03003422
3423int hci_do_inquiry(struct hci_dev *hdev, u8 length)
3424{
3425 /* General inquiry access code (GIAC) */
3426 u8 lap[3] = { 0x33, 0x8b, 0x9e };
3427 struct hci_cp_inquiry cp;
3428
3429 BT_DBG("%s", hdev->name);
3430
3431 if (test_bit(HCI_INQUIRY, &hdev->flags))
3432 return -EINPROGRESS;
3433
Johan Hedberg46632622012-01-02 16:06:08 +02003434 inquiry_cache_flush(hdev);
3435
Andre Guedes2519a1f2011-11-07 11:45:24 -03003436 memset(&cp, 0, sizeof(cp));
3437 memcpy(&cp.lap, lap, sizeof(cp.lap));
3438 cp.length = length;
3439
3440 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
3441}
Andre Guedes023d50492011-11-04 14:16:52 -03003442
3443int hci_cancel_inquiry(struct hci_dev *hdev)
3444{
3445 BT_DBG("%s", hdev->name);
3446
3447 if (!test_bit(HCI_INQUIRY, &hdev->flags))
Andre Guedes7537e5c2012-03-20 00:13:38 -03003448 return -EALREADY;
Andre Guedes023d50492011-11-04 14:16:52 -03003449
3450 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3451}
Andre Guedes31f79562012-04-24 21:02:53 -03003452
3453u8 bdaddr_to_le(u8 bdaddr_type)
3454{
3455 switch (bdaddr_type) {
3456 case BDADDR_LE_PUBLIC:
3457 return ADDR_LE_DEV_PUBLIC;
3458
3459 default:
3460 /* Fallback to LE Random address type */
3461 return ADDR_LE_DEV_RANDOM;
3462 }
3463}