blob: b6f9777e057da11105d63deabf9ebd0dc897436c [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Marcel Holtmanna6801ca2014-07-11 06:03:08 +020049static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -030061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Marcel Holtmann01394182006-07-03 10:02:46 +020079/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030080
Gustavo Padovan2d792812012-10-06 10:07:01 +010081static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020083{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020084 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030085
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020086 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020089 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020090 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +020091}
92
Gustavo Padovan2d792812012-10-06 10:07:01 +010093static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020095{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030097
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020098 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200102 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
105/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700106 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300110 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300111
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300113 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700114 if (c)
115 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200116 mutex_unlock(&conn->chan_lock);
117
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300118 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200119}
120
Mat Martineaub1a130b2012-10-23 15:24:09 -0700121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
Gustavo Padovan2d792812012-10-06 10:07:01 +0100138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200140{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200141 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300142
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200146 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200147 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148}
149
Mat Martineau5b155ef2012-10-23 15:24:14 -0700150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300165{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300166 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300167
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300168 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700169 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100170 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300171 }
Szymon Janc250938c2011-11-16 09:32:22 +0100172 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300177 int err;
178
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200179 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300182 err = -EADDRINUSE;
183 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300184 }
185
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300192
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200204 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300206}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300207EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200211 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Johan Hedberg14824302014-08-07 22:56:50 +0300213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217 chan->scid = scid;
218
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200219 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300220
221 return 0;
222}
223
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200225{
Johan Hedberge77af752013-10-08 10:31:00 +0200226 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200227
Johan Hedberge77af752013-10-08 10:31:00 +0200228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300234 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200235 return cid;
236 }
237
238 return 0;
239}
240
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200241static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300242{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100244 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200245
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300246 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300247 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300248}
249
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200252{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300253 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300254 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200260}
261
Mat Martineau4239d162012-05-17 20:53:49 -0700262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
Mat Martineau608bcc62012-05-17 20:53:32 -0700280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
Mat Martineau3c588192012-04-11 10:48:42 -0700293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
Mat Martineau3c588192012-04-11 10:48:42 -0700339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300357 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700358
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700377
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700385}
386
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300387static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300388{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100390 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200391 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300392 int reason;
393
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300395
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200396 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200397 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300398
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300400 reason = ECONNREFUSED;
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300401 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100402 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300407 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200409 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300410
Gustavo Padovan80b98022012-05-27 22:27:51 -0300411 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200412 mutex_unlock(&conn->chan_lock);
413
Ulisses Furquim371fd832011-12-21 20:02:36 -0200414 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415}
416
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300417struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200418{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300419 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200420
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200424
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200425 mutex_init(&chan->lock);
426
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200427 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300428 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200429 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300430
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300432
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300433 chan->state = BT_OPEN;
434
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530435 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300436
Mat Martineau28270112012-05-17 21:14:09 -0700437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300440 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100441
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300442 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200443}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300444EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200445
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530446static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300447{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530450 BT_DBG("chan %p", chan);
451
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200452 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300453 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200454 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300455
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530456 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300457}
458
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530459void l2cap_chan_hold(struct l2cap_chan *c)
460{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530462
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530463 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530464}
465
466void l2cap_chan_put(struct l2cap_chan *c)
467{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530469
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530470 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530471}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300472EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530473
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300474void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475{
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300483 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300491EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300492
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200493static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300494{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300498 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200499 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200501
502 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300503}
504
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300505void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200506{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200508 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200509
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100511
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300512 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200513
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300519 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200523 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300526 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200527 break;
528
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300531 break;
532
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200533 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300537 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200538 }
539
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300546
Ulisses Furquim371fd832011-12-21 20:02:36 -0200547 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300548
Johan Hedbergc16900c2014-08-15 21:17:06 +0300549 /* Only keep a reference for fixed channels if they requested it */
550 if (chan->chan_type != L2CAP_CHAN_FIXED ||
551 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
552 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300553
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200554 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200555}
556
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300557void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200558{
559 mutex_lock(&conn->chan_lock);
560 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200561 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200562}
563
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300564void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200565{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300566 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200567
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300568 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200569
Gustavo F. Padovan49208c92011-04-04 15:59:54 -0300570 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200571
Johan Hedberg72847ce2014-08-08 09:28:03 +0300572 chan->ops->teardown(chan, err);
573
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900574 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300575 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300576 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200577 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200578
Ulisses Furquim371fd832011-12-21 20:02:36 -0200579 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300580
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300581 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300582
Johan Hedbergc16900c2014-08-15 21:17:06 +0300583 /* Reference was only held for non-fixed channels or
584 * fixed channels that explicitly requested it using the
585 * FLAG_HOLD_HCI_CONN flag.
586 */
587 if (chan->chan_type != L2CAP_CHAN_FIXED ||
588 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200589 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300590
591 if (mgr && mgr->bredr_chan == chan)
592 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200593 }
594
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200595 if (chan->hs_hchan) {
596 struct hci_chan *hs_hchan = chan->hs_hchan;
597
598 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
599 amp_disconnect_logical_link(hs_hchan);
600 }
601
Mat Martineau28270112012-05-17 21:14:09 -0700602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300603 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300604
Gustavo Padovanee556f62012-05-18 20:22:38 -0300605 switch(chan->mode) {
606 case L2CAP_MODE_BASIC:
607 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300608
Johan Hedberg38319712013-05-17 12:49:23 +0300609 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300610 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300611 break;
612
Gustavo Padovanee556f62012-05-18 20:22:38 -0300613 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300617
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300618 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300619
Mat Martineau3c588192012-04-11 10:48:42 -0700620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300622
623 /* fall through */
624
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
627 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300628 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300629
630 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200631}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300632EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200633
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300634static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200635{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300636 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
637 id_addr_update_work);
638 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200639 struct l2cap_chan *chan;
640
641 mutex_lock(&conn->chan_lock);
642
643 list_for_each_entry(chan, &conn->chan_l, list) {
644 l2cap_chan_lock(chan);
645 bacpy(&chan->dst, &hcon->dst);
646 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
647 l2cap_chan_unlock(chan);
648 }
649
650 mutex_unlock(&conn->chan_lock);
651}
652
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300653static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
654{
655 struct l2cap_conn *conn = chan->conn;
656 struct l2cap_le_conn_rsp rsp;
657 u16 result;
658
659 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
660 result = L2CAP_CR_AUTHORIZATION;
661 else
662 result = L2CAP_CR_BAD_PSM;
663
664 l2cap_state_change(chan, BT_DISCONN);
665
666 rsp.dcid = cpu_to_le16(chan->scid);
667 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200668 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300669 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300670 rsp.result = cpu_to_le16(result);
671
672 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
673 &rsp);
674}
675
Johan Hedberg791d60f2013-05-14 22:24:44 +0300676static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
677{
678 struct l2cap_conn *conn = chan->conn;
679 struct l2cap_conn_rsp rsp;
680 u16 result;
681
682 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 result = L2CAP_CR_SEC_BLOCK;
684 else
685 result = L2CAP_CR_BAD_PSM;
686
687 l2cap_state_change(chan, BT_DISCONN);
688
689 rsp.scid = cpu_to_le16(chan->dcid);
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700692 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300693
694 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
695}
696
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300697void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300698{
699 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300700
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700701 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300702
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300703 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300704 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100705 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300706 break;
707
708 case BT_CONNECTED:
709 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800710 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300711 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200712 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300713 } else
714 l2cap_chan_del(chan, reason);
715 break;
716
717 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300718 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
719 if (conn->hcon->type == ACL_LINK)
720 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300721 else if (conn->hcon->type == LE_LINK)
722 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300723 }
724
725 l2cap_chan_del(chan, reason);
726 break;
727
728 case BT_CONNECT:
729 case BT_DISCONN:
730 l2cap_chan_del(chan, reason);
731 break;
732
733 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100734 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300735 break;
736 }
737}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300738EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300739
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300740static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530741{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700742 switch (chan->chan_type) {
743 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300744 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530745 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800746 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530747 return HCI_AT_DEDICATED_BONDING_MITM;
748 case BT_SECURITY_MEDIUM:
749 return HCI_AT_DEDICATED_BONDING;
750 default:
751 return HCI_AT_NO_BONDING;
752 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700753 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700754 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700755 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700756 if (chan->sec_level == BT_SECURITY_LOW)
757 chan->sec_level = BT_SECURITY_SDP;
758 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800759 if (chan->sec_level == BT_SECURITY_HIGH ||
760 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700761 return HCI_AT_NO_BONDING_MITM;
762 else
763 return HCI_AT_NO_BONDING;
764 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700765 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700766 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700767 if (chan->sec_level == BT_SECURITY_LOW)
768 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530769
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800770 if (chan->sec_level == BT_SECURITY_HIGH ||
771 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700772 return HCI_AT_NO_BONDING_MITM;
773 else
774 return HCI_AT_NO_BONDING;
775 }
776 /* fall through */
777 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300778 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530779 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800780 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530781 return HCI_AT_GENERAL_BONDING_MITM;
782 case BT_SECURITY_MEDIUM:
783 return HCI_AT_GENERAL_BONDING;
784 default:
785 return HCI_AT_NO_BONDING;
786 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700787 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530788 }
789}
790
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200791/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300792int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200793{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300794 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100795 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200796
Johan Hedberga17de2f2013-05-14 13:25:37 +0300797 if (conn->hcon->type == LE_LINK)
798 return smp_conn_security(conn->hcon, chan->sec_level);
799
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300800 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100801
Johan Hedberge7cafc42014-07-17 15:35:38 +0300802 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
803 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200804}
805
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200806static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200807{
808 u8 id;
809
810 /* Get next available identificator.
811 * 1 - 128 are used by kernel.
812 * 129 - 199 are reserved.
813 * 200 - 254 are used by utilities like l2ping, etc.
814 */
815
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200816 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817
818 if (++conn->tx_ident > 128)
819 conn->tx_ident = 1;
820
821 id = conn->tx_ident;
822
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200823 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200824
825 return id;
826}
827
Gustavo Padovan2d792812012-10-06 10:07:01 +0100828static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
829 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200830{
831 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200832 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200833
834 BT_DBG("code 0x%2.2x", code);
835
836 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300837 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200838
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200839 if (lmp_no_flush_capable(conn->hcon->hdev))
840 flags = ACL_START_NO_FLUSH;
841 else
842 flags = ACL_START;
843
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700844 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200845 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700846
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200847 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200848}
849
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700850static bool __chan_is_moving(struct l2cap_chan *chan)
851{
852 return chan->move_state != L2CAP_MOVE_STABLE &&
853 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
854}
855
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200856static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
857{
858 struct hci_conn *hcon = chan->conn->hcon;
859 u16 flags;
860
861 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100862 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200863
Mat Martineaud5f8a752012-10-23 15:24:18 -0700864 if (chan->hs_hcon && !__chan_is_moving(chan)) {
865 if (chan->hs_hchan)
866 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
867 else
868 kfree_skb(skb);
869
870 return;
871 }
872
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200873 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100874 lmp_no_flush_capable(hcon->hdev))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200875 flags = ACL_START_NO_FLUSH;
876 else
877 flags = ACL_START;
878
879 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
880 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700883static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
884{
885 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
886 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
887
888 if (enh & L2CAP_CTRL_FRAME_TYPE) {
889 /* S-Frame */
890 control->sframe = 1;
891 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
892 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
893
894 control->sar = 0;
895 control->txseq = 0;
896 } else {
897 /* I-Frame */
898 control->sframe = 0;
899 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
900 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
901
902 control->poll = 0;
903 control->super = 0;
904 }
905}
906
907static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
908{
909 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
910 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
911
912 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
913 /* S-Frame */
914 control->sframe = 1;
915 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
916 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
917
918 control->sar = 0;
919 control->txseq = 0;
920 } else {
921 /* I-Frame */
922 control->sframe = 0;
923 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
924 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
925
926 control->poll = 0;
927 control->super = 0;
928 }
929}
930
931static inline void __unpack_control(struct l2cap_chan *chan,
932 struct sk_buff *skb)
933{
934 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
935 __unpack_extended_control(get_unaligned_le32(skb->data),
936 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700937 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700938 } else {
939 __unpack_enhanced_control(get_unaligned_le16(skb->data),
940 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700941 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700942 }
943}
944
945static u32 __pack_extended_control(struct l2cap_ctrl *control)
946{
947 u32 packed;
948
949 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
950 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
951
952 if (control->sframe) {
953 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
954 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
955 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
956 } else {
957 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
958 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
959 }
960
961 return packed;
962}
963
964static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
965{
966 u16 packed;
967
968 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
969 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
970
971 if (control->sframe) {
972 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
973 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
974 packed |= L2CAP_CTRL_FRAME_TYPE;
975 } else {
976 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
977 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
978 }
979
980 return packed;
981}
982
983static inline void __pack_control(struct l2cap_chan *chan,
984 struct l2cap_ctrl *control,
985 struct sk_buff *skb)
986{
987 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
988 put_unaligned_le32(__pack_extended_control(control),
989 skb->data + L2CAP_HDR_SIZE);
990 } else {
991 put_unaligned_le16(__pack_enhanced_control(control),
992 skb->data + L2CAP_HDR_SIZE);
993 }
994}
995
Gustavo Padovanba7aa642012-05-29 13:29:16 -0300996static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
997{
998 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
999 return L2CAP_EXT_HDR_SIZE;
1000 else
1001 return L2CAP_ENH_HDR_SIZE;
1002}
1003
Mat Martineaua67d7f62012-05-17 20:53:35 -07001004static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1005 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001006{
1007 struct sk_buff *skb;
1008 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001009 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001010
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001011 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001012 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001013
Mat Martineaua67d7f62012-05-17 20:53:35 -07001014 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001015
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001016 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001017 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001018
1019 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001020 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001021 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001022
Mat Martineaua67d7f62012-05-17 20:53:35 -07001023 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1024 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1025 else
1026 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001027
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001028 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001029 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001030 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001031 }
1032
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001033 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001034 return skb;
1035}
1036
1037static void l2cap_send_sframe(struct l2cap_chan *chan,
1038 struct l2cap_ctrl *control)
1039{
1040 struct sk_buff *skb;
1041 u32 control_field;
1042
1043 BT_DBG("chan %p, control %p", chan, control);
1044
1045 if (!control->sframe)
1046 return;
1047
Mat Martineaub99e13a2012-10-23 15:24:19 -07001048 if (__chan_is_moving(chan))
1049 return;
1050
Mat Martineaua67d7f62012-05-17 20:53:35 -07001051 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1052 !control->poll)
1053 control->final = 1;
1054
1055 if (control->super == L2CAP_SUPER_RR)
1056 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1057 else if (control->super == L2CAP_SUPER_RNR)
1058 set_bit(CONN_RNR_SENT, &chan->conn_state);
1059
1060 if (control->super != L2CAP_SUPER_SREJ) {
1061 chan->last_acked_seq = control->reqseq;
1062 __clear_ack_timer(chan);
1063 }
1064
1065 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1066 control->final, control->poll, control->super);
1067
1068 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1069 control_field = __pack_extended_control(control);
1070 else
1071 control_field = __pack_enhanced_control(control);
1072
1073 skb = l2cap_create_sframe_pdu(chan, control_field);
1074 if (!IS_ERR(skb))
1075 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001076}
1077
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001078static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001079{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001080 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001081
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001082 BT_DBG("chan %p, poll %d", chan, poll);
1083
1084 memset(&control, 0, sizeof(control));
1085 control.sframe = 1;
1086 control.poll = poll;
1087
1088 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1089 control.super = L2CAP_SUPER_RNR;
1090 else
1091 control.super = L2CAP_SUPER_RR;
1092
1093 control.reqseq = chan->buffer_seq;
1094 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001095}
1096
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001097static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001098{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001099 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1100 return true;
1101
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001102 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001103}
1104
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001105static bool __amp_capable(struct l2cap_chan *chan)
1106{
1107 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001108 struct hci_dev *hdev;
1109 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001110
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001111 if (!conn->hs_enabled)
1112 return false;
1113
1114 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1115 return false;
1116
1117 read_lock(&hci_dev_list_lock);
1118 list_for_each_entry(hdev, &hci_dev_list, list) {
1119 if (hdev->amp_type != AMP_TYPE_BREDR &&
1120 test_bit(HCI_UP, &hdev->flags)) {
1121 amp_available = true;
1122 break;
1123 }
1124 }
1125 read_unlock(&hci_dev_list_lock);
1126
1127 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1128 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001129
1130 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001131}
1132
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001133static bool l2cap_check_efs(struct l2cap_chan *chan)
1134{
1135 /* Check EFS parameters */
1136 return true;
1137}
1138
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001139void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001140{
1141 struct l2cap_conn *conn = chan->conn;
1142 struct l2cap_conn_req req;
1143
1144 req.scid = cpu_to_le16(chan->scid);
1145 req.psm = chan->psm;
1146
1147 chan->ident = l2cap_get_ident(conn);
1148
1149 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1150
1151 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1152}
1153
Mat Martineau8eb200b2012-10-23 15:24:17 -07001154static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1155{
1156 struct l2cap_create_chan_req req;
1157 req.scid = cpu_to_le16(chan->scid);
1158 req.psm = chan->psm;
1159 req.amp_id = amp_id;
1160
1161 chan->ident = l2cap_get_ident(chan->conn);
1162
1163 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1164 sizeof(req), &req);
1165}
1166
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001167static void l2cap_move_setup(struct l2cap_chan *chan)
1168{
1169 struct sk_buff *skb;
1170
1171 BT_DBG("chan %p", chan);
1172
1173 if (chan->mode != L2CAP_MODE_ERTM)
1174 return;
1175
1176 __clear_retrans_timer(chan);
1177 __clear_monitor_timer(chan);
1178 __clear_ack_timer(chan);
1179
1180 chan->retry_count = 0;
1181 skb_queue_walk(&chan->tx_q, skb) {
1182 if (bt_cb(skb)->control.retries)
1183 bt_cb(skb)->control.retries = 1;
1184 else
1185 break;
1186 }
1187
1188 chan->expected_tx_seq = chan->buffer_seq;
1189
1190 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1191 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1192 l2cap_seq_list_clear(&chan->retrans_list);
1193 l2cap_seq_list_clear(&chan->srej_list);
1194 skb_queue_purge(&chan->srej_q);
1195
1196 chan->tx_state = L2CAP_TX_STATE_XMIT;
1197 chan->rx_state = L2CAP_RX_STATE_MOVE;
1198
1199 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1200}
1201
Mat Martineau5f3847a2012-10-23 15:24:12 -07001202static void l2cap_move_done(struct l2cap_chan *chan)
1203{
1204 u8 move_role = chan->move_role;
1205 BT_DBG("chan %p", chan);
1206
1207 chan->move_state = L2CAP_MOVE_STABLE;
1208 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1209
1210 if (chan->mode != L2CAP_MODE_ERTM)
1211 return;
1212
1213 switch (move_role) {
1214 case L2CAP_MOVE_ROLE_INITIATOR:
1215 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1216 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1217 break;
1218 case L2CAP_MOVE_ROLE_RESPONDER:
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1220 break;
1221 }
1222}
1223
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001224static void l2cap_chan_ready(struct l2cap_chan *chan)
1225{
Mat Martineau28270112012-05-17 21:14:09 -07001226 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001227 chan->conf_state = 0;
1228 __clear_chan_timer(chan);
1229
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001230 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1231 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001232
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001233 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001234
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001235 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001236}
1237
Johan Hedbergf1496de2013-05-13 14:15:56 +03001238static void l2cap_le_connect(struct l2cap_chan *chan)
1239{
1240 struct l2cap_conn *conn = chan->conn;
1241 struct l2cap_le_conn_req req;
1242
Johan Hedberg595177f2013-12-02 22:12:22 +02001243 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1244 return;
1245
Johan Hedbergf1496de2013-05-13 14:15:56 +03001246 req.psm = chan->psm;
1247 req.scid = cpu_to_le16(chan->scid);
1248 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001249 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001250 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001251
1252 chan->ident = l2cap_get_ident(conn);
1253
1254 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1255 sizeof(req), &req);
1256}
1257
1258static void l2cap_le_start(struct l2cap_chan *chan)
1259{
1260 struct l2cap_conn *conn = chan->conn;
1261
1262 if (!smp_conn_security(conn->hcon, chan->sec_level))
1263 return;
1264
1265 if (!chan->psm) {
1266 l2cap_chan_ready(chan);
1267 return;
1268 }
1269
1270 if (chan->state == BT_CONNECT)
1271 l2cap_le_connect(chan);
1272}
1273
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001274static void l2cap_start_connection(struct l2cap_chan *chan)
1275{
1276 if (__amp_capable(chan)) {
1277 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1278 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001279 } else if (chan->conn->hcon->type == LE_LINK) {
1280 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001281 } else {
1282 l2cap_send_conn_req(chan);
1283 }
1284}
1285
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001286static void l2cap_request_info(struct l2cap_conn *conn)
1287{
1288 struct l2cap_info_req req;
1289
1290 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1291 return;
1292
1293 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1294
1295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1296 conn->info_ident = l2cap_get_ident(conn);
1297
1298 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1299
1300 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1301 sizeof(req), &req);
1302}
1303
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001304static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001305{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001306 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001307
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001308 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001309 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001310 return;
1311 }
1312
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001313 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1314 l2cap_request_info(conn);
1315 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001316 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001317
1318 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1319 return;
1320
1321 if (l2cap_chan_check_security(chan, true) &&
1322 __l2cap_no_conn_pending(chan))
1323 l2cap_start_connection(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001324}
1325
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001326static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1327{
1328 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001329 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001330 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1331
1332 switch (mode) {
1333 case L2CAP_MODE_ERTM:
1334 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1335 case L2CAP_MODE_STREAMING:
1336 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1337 default:
1338 return 0x00;
1339 }
1340}
1341
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001342static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001343{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001344 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001345 struct l2cap_disconn_req req;
1346
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001347 if (!conn)
1348 return;
1349
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001350 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001351 __clear_retrans_timer(chan);
1352 __clear_monitor_timer(chan);
1353 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001354 }
1355
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001356 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001357 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001358 return;
1359 }
1360
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001361 req.dcid = cpu_to_le16(chan->dcid);
1362 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001363 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1364 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001365
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001366 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001367}
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001370static void l2cap_conn_start(struct l2cap_conn *conn)
1371{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001372 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001373
1374 BT_DBG("conn %p", conn);
1375
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001376 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001377
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001378 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001379 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001380
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001381 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001382 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001383 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001384 continue;
1385 }
1386
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001387 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001388 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001389 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001390 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001391 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001392 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001393
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001394 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001395 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001396 &chan->conf_state)) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001397 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001398 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001399 continue;
1400 }
1401
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001402 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001403
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001404 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001405 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001406 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001407 rsp.scid = cpu_to_le16(chan->dcid);
1408 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001409
Johan Hedberge7cafc42014-07-17 15:35:38 +03001410 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001411 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001412 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1413 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001414 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001415
1416 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001417 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001418 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1419 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001420 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001421 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001422 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1423 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001424 }
1425
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001426 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001427 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001428
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001429 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001430 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001431 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001432 continue;
1433 }
1434
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001435 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001436 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001437 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001438 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001439 }
1440
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001441 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001442 }
1443
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001444 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001445}
1446
Ville Tervob62f3282011-02-10 22:38:50 -03001447static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1448{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001449 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001450 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001451
Johan Hedberge760ec12014-08-07 22:56:47 +03001452 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001453
Johan Hedberge760ec12014-08-07 22:56:47 +03001454 /* For outgoing pairing which doesn't necessarily have an
1455 * associated socket (e.g. mgmt_pair_device).
1456 */
1457 if (hcon->out)
1458 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001459
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001460 /* For LE slave connections, make sure the connection interval
1461 * is in the range of the minium and maximum interval that has
1462 * been configured for this connection. If not, then trigger
1463 * the connection update procedure.
1464 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001465 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001466 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1467 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1468 struct l2cap_conn_param_update_req req;
1469
1470 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1471 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1472 req.latency = cpu_to_le16(hcon->le_conn_latency);
1473 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1474
1475 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1476 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1477 }
Ville Tervob62f3282011-02-10 22:38:50 -03001478}
1479
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001480static void l2cap_conn_ready(struct l2cap_conn *conn)
1481{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001482 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001483 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001484
1485 BT_DBG("conn %p", conn);
1486
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001487 if (hcon->type == ACL_LINK)
1488 l2cap_request_info(conn);
1489
Johan Hedberge760ec12014-08-07 22:56:47 +03001490 mutex_lock(&conn->chan_lock);
1491
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001492 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001493
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001494 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001495
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001496 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001497 l2cap_chan_unlock(chan);
1498 continue;
1499 }
1500
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001501 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001502 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001503 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001504 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1505 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001506 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001507 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001508 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001509
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001510 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001511 }
1512
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001513 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001514
Johan Hedberg79a05722014-08-08 09:28:04 +03001515 if (hcon->type == LE_LINK)
1516 l2cap_le_conn_ready(conn);
1517
Johan Hedberg61a939c2014-01-17 20:45:11 +02001518 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001519}
1520
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001521/* Notify sockets that we cannot guaranty reliability anymore */
1522static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1523{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001524 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001525
1526 BT_DBG("conn %p", conn);
1527
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001528 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001529
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001530 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001531 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001532 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001533 }
1534
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001535 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001536}
1537
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001538static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001539{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001540 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001541 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001542
Marcel Holtmann984947d2009-02-06 23:35:19 +01001543 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001544 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001545
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001546 l2cap_conn_start(conn);
1547}
1548
David Herrmann2c8e1412013-04-06 20:28:45 +02001549/*
1550 * l2cap_user
1551 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1552 * callback is called during registration. The ->remove callback is called
1553 * during unregistration.
1554 * An l2cap_user object can either be explicitly unregistered or when the
1555 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1556 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1557 * External modules must own a reference to the l2cap_conn object if they intend
1558 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1559 * any time if they don't.
1560 */
1561
1562int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1563{
1564 struct hci_dev *hdev = conn->hcon->hdev;
1565 int ret;
1566
1567 /* We need to check whether l2cap_conn is registered. If it is not, we
1568 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1569 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1570 * relies on the parent hci_conn object to be locked. This itself relies
1571 * on the hci_dev object to be locked. So we must lock the hci device
1572 * here, too. */
1573
1574 hci_dev_lock(hdev);
1575
1576 if (user->list.next || user->list.prev) {
1577 ret = -EINVAL;
1578 goto out_unlock;
1579 }
1580
1581 /* conn->hchan is NULL after l2cap_conn_del() was called */
1582 if (!conn->hchan) {
1583 ret = -ENODEV;
1584 goto out_unlock;
1585 }
1586
1587 ret = user->probe(conn, user);
1588 if (ret)
1589 goto out_unlock;
1590
1591 list_add(&user->list, &conn->users);
1592 ret = 0;
1593
1594out_unlock:
1595 hci_dev_unlock(hdev);
1596 return ret;
1597}
1598EXPORT_SYMBOL(l2cap_register_user);
1599
1600void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1601{
1602 struct hci_dev *hdev = conn->hcon->hdev;
1603
1604 hci_dev_lock(hdev);
1605
1606 if (!user->list.next || !user->list.prev)
1607 goto out_unlock;
1608
1609 list_del(&user->list);
1610 user->list.next = NULL;
1611 user->list.prev = NULL;
1612 user->remove(conn, user);
1613
1614out_unlock:
1615 hci_dev_unlock(hdev);
1616}
1617EXPORT_SYMBOL(l2cap_unregister_user);
1618
1619static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1620{
1621 struct l2cap_user *user;
1622
1623 while (!list_empty(&conn->users)) {
1624 user = list_first_entry(&conn->users, struct l2cap_user, list);
1625 list_del(&user->list);
1626 user->list.next = NULL;
1627 user->list.prev = NULL;
1628 user->remove(conn, user);
1629 }
1630}
1631
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001632static void l2cap_conn_del(struct hci_conn *hcon, int err)
1633{
1634 struct l2cap_conn *conn = hcon->l2cap_data;
1635 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001636
1637 if (!conn)
1638 return;
1639
1640 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1641
1642 kfree_skb(conn->rx_skb);
1643
Johan Hedberg61a939c2014-01-17 20:45:11 +02001644 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001645
1646 /* We can not call flush_work(&conn->pending_rx_work) here since we
1647 * might block if we are running on a worker from the same workqueue
1648 * pending_rx_work is waiting on.
1649 */
1650 if (work_pending(&conn->pending_rx_work))
1651 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001652
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001653 if (work_pending(&conn->id_addr_update_work))
1654 cancel_work_sync(&conn->id_addr_update_work);
1655
David Herrmann2c8e1412013-04-06 20:28:45 +02001656 l2cap_unregister_all_users(conn);
1657
Johan Hedberge31fb862014-08-18 20:33:28 +03001658 /* Force the connection to be immediately dropped */
1659 hcon->disc_timeout = 0;
1660
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001661 mutex_lock(&conn->chan_lock);
1662
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001663 /* Kill channels */
1664 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001665 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001666 l2cap_chan_lock(chan);
1667
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001668 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001669
1670 l2cap_chan_unlock(chan);
1671
Gustavo Padovan80b98022012-05-27 22:27:51 -03001672 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001673 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001674 }
1675
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001676 mutex_unlock(&conn->chan_lock);
1677
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001678 hci_chan_del(conn->hchan);
1679
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001680 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001681 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001682
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001683 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001684 conn->hchan = NULL;
1685 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001686}
1687
David Herrmann9c903e32013-04-06 20:28:44 +02001688static void l2cap_conn_free(struct kref *ref)
1689{
1690 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1691
1692 hci_conn_put(conn->hcon);
1693 kfree(conn);
1694}
1695
Johan Hedberg51bb84572014-08-15 21:06:57 +03001696struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001697{
1698 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001699 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001700}
1701EXPORT_SYMBOL(l2cap_conn_get);
1702
1703void l2cap_conn_put(struct l2cap_conn *conn)
1704{
1705 kref_put(&conn->ref, l2cap_conn_free);
1706}
1707EXPORT_SYMBOL(l2cap_conn_put);
1708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
Ido Yarivc2287682012-04-20 15:46:07 -03001711/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 * Returns closest match.
1713 */
Ido Yarivc2287682012-04-20 15:46:07 -03001714static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1715 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001716 bdaddr_t *dst,
1717 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001719 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001721 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001722
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001723 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001724 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 continue;
1726
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001727 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1728 continue;
1729
1730 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1731 continue;
1732
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001733 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001734 int src_match, dst_match;
1735 int src_any, dst_any;
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001738 src_match = !bacmp(&c->src, src);
1739 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001740 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001741 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001742 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001743 return c;
1744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001747 src_any = !bacmp(&c->src, BDADDR_ANY);
1748 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001749 if ((src_match && dst_any) || (src_any && dst_match) ||
1750 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001751 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 }
1753 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Johan Hedberga24cce12014-08-07 22:56:42 +03001755 if (c1)
1756 l2cap_chan_hold(c1);
1757
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001758 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001759
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001760 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761}
1762
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001763static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001764{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001765 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001766 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001767
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001768 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001769
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001770 l2cap_chan_lock(chan);
1771
Mat Martineau80909e02012-05-17 20:53:50 -07001772 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001773 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001774 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001775 return;
1776 }
1777
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001778 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001779
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001780 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001781 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001782}
1783
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001784static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001785{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001786 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001787 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001788
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001789 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001790
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001791 l2cap_chan_lock(chan);
1792
Mat Martineau80909e02012-05-17 20:53:50 -07001793 if (!chan->conn) {
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1796 return;
1797 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001798
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001799 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001800 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001801 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001802}
1803
Gustavo Padovand6603662012-05-21 13:58:22 -03001804static void l2cap_streaming_send(struct l2cap_chan *chan,
1805 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001806{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001807 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001808 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001809
Mat Martineau37339372012-05-17 20:53:33 -07001810 BT_DBG("chan %p, skbs %p", chan, skbs);
1811
Mat Martineaub99e13a2012-10-23 15:24:19 -07001812 if (__chan_is_moving(chan))
1813 return;
1814
Mat Martineau37339372012-05-17 20:53:33 -07001815 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1816
1817 while (!skb_queue_empty(&chan->tx_q)) {
1818
1819 skb = skb_dequeue(&chan->tx_q);
1820
1821 bt_cb(skb)->control.retries = 1;
1822 control = &bt_cb(skb)->control;
1823
1824 control->reqseq = 0;
1825 control->txseq = chan->next_tx_seq;
1826
1827 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001828
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001829 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001830 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1831 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001832 }
1833
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001834 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001835
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001836 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001837
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001838 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001839 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001840 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001841}
1842
Szymon Janc67c9e842011-07-28 16:24:33 +02001843static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001844{
1845 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001846 struct l2cap_ctrl *control;
1847 int sent = 0;
1848
1849 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001850
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001851 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001852 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001853
Mat Martineau94122bb2012-05-02 09:42:02 -07001854 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1855 return 0;
1856
Mat Martineaub99e13a2012-10-23 15:24:19 -07001857 if (__chan_is_moving(chan))
1858 return 0;
1859
Mat Martineau18a48e72012-05-17 20:53:34 -07001860 while (chan->tx_send_head &&
1861 chan->unacked_frames < chan->remote_tx_win &&
1862 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001863
Mat Martineau18a48e72012-05-17 20:53:34 -07001864 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001865
Mat Martineau18a48e72012-05-17 20:53:34 -07001866 bt_cb(skb)->control.retries = 1;
1867 control = &bt_cb(skb)->control;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001868
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001869 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001870 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001871
Mat Martineau18a48e72012-05-17 20:53:34 -07001872 control->reqseq = chan->buffer_seq;
1873 chan->last_acked_seq = chan->buffer_seq;
1874 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001875
Mat Martineau18a48e72012-05-17 20:53:34 -07001876 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001877
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001878 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001879 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1880 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001881 }
1882
Mat Martineau18a48e72012-05-17 20:53:34 -07001883 /* Clone after data has been modified. Data is assumed to be
1884 read-only (for locking purposes) on cloned sk_buffs.
1885 */
1886 tx_skb = skb_clone(skb, GFP_KERNEL);
1887
1888 if (!tx_skb)
1889 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001890
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001891 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001892
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001893 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001894 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001895 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001896 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001897
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001898 if (skb_queue_is_last(&chan->tx_q, skb))
1899 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001900 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001901 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001902
1903 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001904 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001905 }
1906
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001907 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1908 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001909
1910 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001911}
1912
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001913static void l2cap_ertm_resend(struct l2cap_chan *chan)
1914{
1915 struct l2cap_ctrl control;
1916 struct sk_buff *skb;
1917 struct sk_buff *tx_skb;
1918 u16 seq;
1919
1920 BT_DBG("chan %p", chan);
1921
1922 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1923 return;
1924
Mat Martineaub99e13a2012-10-23 15:24:19 -07001925 if (__chan_is_moving(chan))
1926 return;
1927
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001928 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1929 seq = l2cap_seq_list_pop(&chan->retrans_list);
1930
1931 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1932 if (!skb) {
1933 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001934 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001935 continue;
1936 }
1937
1938 bt_cb(skb)->control.retries++;
1939 control = bt_cb(skb)->control;
1940
1941 if (chan->max_tx != 0 &&
1942 bt_cb(skb)->control.retries > chan->max_tx) {
1943 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001944 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001945 l2cap_seq_list_clear(&chan->retrans_list);
1946 break;
1947 }
1948
1949 control.reqseq = chan->buffer_seq;
1950 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1951 control.final = 1;
1952 else
1953 control.final = 0;
1954
1955 if (skb_cloned(skb)) {
1956 /* Cloned sk_buffs are read-only, so we need a
1957 * writeable copy
1958 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001959 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001960 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001961 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001962 }
1963
1964 if (!tx_skb) {
1965 l2cap_seq_list_clear(&chan->retrans_list);
1966 break;
1967 }
1968
1969 /* Update skb contents */
1970 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1971 put_unaligned_le32(__pack_extended_control(&control),
1972 tx_skb->data + L2CAP_HDR_SIZE);
1973 } else {
1974 put_unaligned_le16(__pack_enhanced_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1976 }
1977
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001978 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001979 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001980 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1981 tx_skb->len - L2CAP_FCS_SIZE);
1982 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1983 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001984 }
1985
1986 l2cap_do_send(chan, tx_skb);
1987
1988 BT_DBG("Resent txseq %d", control.txseq);
1989
1990 chan->last_acked_seq = chan->buffer_seq;
1991 }
1992}
1993
Mat Martineauf80842a2012-05-17 20:53:46 -07001994static void l2cap_retransmit(struct l2cap_chan *chan,
1995 struct l2cap_ctrl *control)
1996{
1997 BT_DBG("chan %p, control %p", chan, control);
1998
1999 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2000 l2cap_ertm_resend(chan);
2001}
2002
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002003static void l2cap_retransmit_all(struct l2cap_chan *chan,
2004 struct l2cap_ctrl *control)
2005{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002006 struct sk_buff *skb;
2007
2008 BT_DBG("chan %p, control %p", chan, control);
2009
2010 if (control->poll)
2011 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2012
2013 l2cap_seq_list_clear(&chan->retrans_list);
2014
2015 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2016 return;
2017
2018 if (chan->unacked_frames) {
2019 skb_queue_walk(&chan->tx_q, skb) {
2020 if (bt_cb(skb)->control.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002021 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002022 break;
2023 }
2024
2025 skb_queue_walk_from(&chan->tx_q, skb) {
2026 if (skb == chan->tx_send_head)
2027 break;
2028
2029 l2cap_seq_list_append(&chan->retrans_list,
2030 bt_cb(skb)->control.txseq);
2031 }
2032
2033 l2cap_ertm_resend(chan);
2034 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002035}
2036
Szymon Jancb17e73b2012-01-11 10:59:47 +01002037static void l2cap_send_ack(struct l2cap_chan *chan)
2038{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002039 struct l2cap_ctrl control;
2040 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2041 chan->last_acked_seq);
2042 int threshold;
2043
2044 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2045 chan, chan->last_acked_seq, chan->buffer_seq);
2046
2047 memset(&control, 0, sizeof(control));
2048 control.sframe = 1;
2049
2050 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2051 chan->rx_state == L2CAP_RX_STATE_RECV) {
2052 __clear_ack_timer(chan);
2053 control.super = L2CAP_SUPER_RNR;
2054 control.reqseq = chan->buffer_seq;
2055 l2cap_send_sframe(chan, &control);
2056 } else {
2057 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2058 l2cap_ertm_send(chan);
2059 /* If any i-frames were sent, they included an ack */
2060 if (chan->buffer_seq == chan->last_acked_seq)
2061 frames_to_ack = 0;
2062 }
2063
Mat Martineauc20f8e32012-07-10 05:47:07 -07002064 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002065 * Calculate without mul or div
2066 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002067 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002068 threshold += threshold << 1;
2069 threshold >>= 2;
2070
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002071 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002072 threshold);
2073
2074 if (frames_to_ack >= threshold) {
2075 __clear_ack_timer(chan);
2076 control.super = L2CAP_SUPER_RR;
2077 control.reqseq = chan->buffer_seq;
2078 l2cap_send_sframe(chan, &control);
2079 frames_to_ack = 0;
2080 }
2081
2082 if (frames_to_ack)
2083 __set_ack_timer(chan);
2084 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002085}
2086
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002087static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2088 struct msghdr *msg, int len,
2089 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002091 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002092 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002093 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Jukka Rissanen04988782014-06-18 16:37:07 +03002095 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2096 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002097 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 sent += count;
2100 len -= count;
2101
2102 /* Continuation fragments (no L2CAP header) */
2103 frag = &skb_shinfo(skb)->frag_list;
2104 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002105 struct sk_buff *tmp;
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 count = min_t(unsigned int, conn->mtu, len);
2108
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002109 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002110 msg->msg_flags & MSG_DONTWAIT);
2111 if (IS_ERR(tmp))
2112 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002113
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002114 *frag = tmp;
2115
Jukka Rissanen04988782014-06-18 16:37:07 +03002116 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2117 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002118 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 sent += count;
2121 len -= count;
2122
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002123 skb->len += (*frag)->len;
2124 skb->data_len += (*frag)->len;
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 frag = &(*frag)->next;
2127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002132static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002133 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002134{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002135 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002136 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002137 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002138 struct l2cap_hdr *lh;
2139
Marcel Holtmann8d463212014-06-05 15:22:51 +02002140 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2141 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002142
2143 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002144
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002145 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002146 msg->msg_flags & MSG_DONTWAIT);
2147 if (IS_ERR(skb))
2148 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002149
2150 /* Create L2CAP header */
2151 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002152 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002153 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002154 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002155
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002156 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002157 if (unlikely(err < 0)) {
2158 kfree_skb(skb);
2159 return ERR_PTR(err);
2160 }
2161 return skb;
2162}
2163
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002164static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002165 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002166{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002167 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002168 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002169 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002170 struct l2cap_hdr *lh;
2171
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002172 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002173
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002174 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002175
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002176 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002177 msg->msg_flags & MSG_DONTWAIT);
2178 if (IS_ERR(skb))
2179 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002180
2181 /* Create L2CAP header */
2182 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002183 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002184 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002185
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002186 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002187 if (unlikely(err < 0)) {
2188 kfree_skb(skb);
2189 return ERR_PTR(err);
2190 }
2191 return skb;
2192}
2193
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002194static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002195 struct msghdr *msg, size_t len,
2196 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002197{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002198 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002199 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002200 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002201 struct l2cap_hdr *lh;
2202
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002203 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002204
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002205 if (!conn)
2206 return ERR_PTR(-ENOTCONN);
2207
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002208 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002209
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002210 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002211 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002212
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002213 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002214 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002215
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002216 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002217
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002218 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002219 msg->msg_flags & MSG_DONTWAIT);
2220 if (IS_ERR(skb))
2221 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002222
2223 /* Create L2CAP header */
2224 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002225 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002226 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002227
Mat Martineau18a48e72012-05-17 20:53:34 -07002228 /* Control header is populated later */
2229 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2230 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2231 else
2232 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002233
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002234 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002235 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002236
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002237 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002238 if (unlikely(err < 0)) {
2239 kfree_skb(skb);
2240 return ERR_PTR(err);
2241 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002242
Mat Martineau18a48e72012-05-17 20:53:34 -07002243 bt_cb(skb)->control.fcs = chan->fcs;
Mat Martineau3ce35142012-04-25 16:36:14 -07002244 bt_cb(skb)->control.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002245 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
Mat Martineau94122bb2012-05-02 09:42:02 -07002248static int l2cap_segment_sdu(struct l2cap_chan *chan,
2249 struct sk_buff_head *seg_queue,
2250 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002251{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002252 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002253 u16 sdu_len;
2254 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002255 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002256
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002257 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002258
Mat Martineau94122bb2012-05-02 09:42:02 -07002259 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2260 * so fragmented skbs are not used. The HCI layer's handling
2261 * of fragmented skbs is not compatible with ERTM's queueing.
2262 */
2263
2264 /* PDU size is derived from the HCI MTU */
2265 pdu_len = chan->conn->mtu;
2266
Mat Martineaua5495742012-10-23 15:24:21 -07002267 /* Constrain PDU size for BR/EDR connections */
2268 if (!chan->hs_hcon)
2269 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002270
2271 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002272 if (chan->fcs)
2273 pdu_len -= L2CAP_FCS_SIZE;
2274
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002275 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002276
2277 /* Remote device may have requested smaller PDUs */
2278 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2279
2280 if (len <= pdu_len) {
2281 sar = L2CAP_SAR_UNSEGMENTED;
2282 sdu_len = 0;
2283 pdu_len = len;
2284 } else {
2285 sar = L2CAP_SAR_START;
2286 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002287 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002288
2289 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002290 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002291
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002292 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002293 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002294 return PTR_ERR(skb);
2295 }
2296
Mat Martineau94122bb2012-05-02 09:42:02 -07002297 bt_cb(skb)->control.sar = sar;
2298 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002299
Mat Martineau94122bb2012-05-02 09:42:02 -07002300 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002301 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002302 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002303
2304 if (len <= pdu_len) {
2305 sar = L2CAP_SAR_END;
2306 pdu_len = len;
2307 } else {
2308 sar = L2CAP_SAR_CONTINUE;
2309 }
2310 }
2311
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002312 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002313}
2314
Johan Hedberg177f8f22013-05-31 17:54:51 +03002315static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2316 struct msghdr *msg,
2317 size_t len, u16 sdulen)
2318{
2319 struct l2cap_conn *conn = chan->conn;
2320 struct sk_buff *skb;
2321 int err, count, hlen;
2322 struct l2cap_hdr *lh;
2323
2324 BT_DBG("chan %p len %zu", chan, len);
2325
2326 if (!conn)
2327 return ERR_PTR(-ENOTCONN);
2328
2329 hlen = L2CAP_HDR_SIZE;
2330
2331 if (sdulen)
2332 hlen += L2CAP_SDULEN_SIZE;
2333
2334 count = min_t(unsigned int, (conn->mtu - hlen), len);
2335
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002336 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002337 msg->msg_flags & MSG_DONTWAIT);
2338 if (IS_ERR(skb))
2339 return skb;
2340
2341 /* Create L2CAP header */
2342 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2343 lh->cid = cpu_to_le16(chan->dcid);
2344 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2345
2346 if (sdulen)
2347 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2348
2349 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2350 if (unlikely(err < 0)) {
2351 kfree_skb(skb);
2352 return ERR_PTR(err);
2353 }
2354
2355 return skb;
2356}
2357
2358static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2359 struct sk_buff_head *seg_queue,
2360 struct msghdr *msg, size_t len)
2361{
2362 struct sk_buff *skb;
2363 size_t pdu_len;
2364 u16 sdu_len;
2365
2366 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367
Johan Hedberg177f8f22013-05-31 17:54:51 +03002368 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002369 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002370
2371 while (len > 0) {
2372 if (len <= pdu_len)
2373 pdu_len = len;
2374
2375 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2376 if (IS_ERR(skb)) {
2377 __skb_queue_purge(seg_queue);
2378 return PTR_ERR(skb);
2379 }
2380
2381 __skb_queue_tail(seg_queue, skb);
2382
2383 len -= pdu_len;
2384
2385 if (sdu_len) {
2386 sdu_len = 0;
2387 pdu_len += L2CAP_SDULEN_SIZE;
2388 }
2389 }
2390
2391 return 0;
2392}
2393
Marcel Holtmann8d463212014-06-05 15:22:51 +02002394int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002395{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002396 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002397 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002398 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002399
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002400 if (!chan->conn)
2401 return -ENOTCONN;
2402
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002403 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002404 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002405 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002406 if (IS_ERR(skb))
2407 return PTR_ERR(skb);
2408
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002409 /* Channel lock is released before requesting new skb and then
2410 * reacquired thus we need to recheck channel state.
2411 */
2412 if (chan->state != BT_CONNECTED) {
2413 kfree_skb(skb);
2414 return -ENOTCONN;
2415 }
2416
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002417 l2cap_do_send(chan, skb);
2418 return len;
2419 }
2420
2421 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002422 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002423 /* Check outgoing MTU */
2424 if (len > chan->omtu)
2425 return -EMSGSIZE;
2426
Johan Hedbergfad5fc892013-12-05 09:45:01 +02002427 if (!chan->tx_credits)
2428 return -EAGAIN;
2429
Johan Hedberg177f8f22013-05-31 17:54:51 +03002430 __skb_queue_head_init(&seg_queue);
2431
2432 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2433
2434 if (chan->state != BT_CONNECTED) {
2435 __skb_queue_purge(&seg_queue);
2436 err = -ENOTCONN;
2437 }
2438
2439 if (err)
2440 return err;
2441
2442 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2443
2444 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2445 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2446 chan->tx_credits--;
2447 }
2448
2449 if (!chan->tx_credits)
2450 chan->ops->suspend(chan);
2451
2452 err = len;
2453
2454 break;
2455
Johan Hedbergfad5fc892013-12-05 09:45:01 +02002456 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002457 /* Check outgoing MTU */
2458 if (len > chan->omtu)
2459 return -EMSGSIZE;
2460
2461 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002462 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002463 if (IS_ERR(skb))
2464 return PTR_ERR(skb);
2465
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2468 */
2469 if (chan->state != BT_CONNECTED) {
2470 kfree_skb(skb);
2471 return -ENOTCONN;
2472 }
2473
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002474 l2cap_do_send(chan, skb);
2475 err = len;
2476 break;
2477
2478 case L2CAP_MODE_ERTM:
2479 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002480 /* Check outgoing MTU */
2481 if (len > chan->omtu) {
2482 err = -EMSGSIZE;
2483 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002484 }
2485
Mat Martineau94122bb2012-05-02 09:42:02 -07002486 __skb_queue_head_init(&seg_queue);
2487
2488 /* Do segmentation before calling in to the state machine,
2489 * since it's possible to block while waiting for memory
2490 * allocation.
2491 */
2492 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2493
2494 /* The channel could have been closed while segmenting,
2495 * check that it is still connected.
2496 */
2497 if (chan->state != BT_CONNECTED) {
2498 __skb_queue_purge(&seg_queue);
2499 err = -ENOTCONN;
2500 }
2501
2502 if (err)
2503 break;
2504
Mat Martineau37339372012-05-17 20:53:33 -07002505 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002506 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002507 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002508 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002509
Gustavo Padovand6603662012-05-21 13:58:22 -03002510 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002511
Mat Martineau94122bb2012-05-02 09:42:02 -07002512 /* If the skbs were not queued for sending, they'll still be in
2513 * seg_queue and need to be purged.
2514 */
2515 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002516 break;
2517
2518 default:
2519 BT_DBG("bad state %1.1x", chan->mode);
2520 err = -EBADFD;
2521 }
2522
2523 return err;
2524}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002525EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002526
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002527static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2528{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002529 struct l2cap_ctrl control;
2530 u16 seq;
2531
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002532 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002533
2534 memset(&control, 0, sizeof(control));
2535 control.sframe = 1;
2536 control.super = L2CAP_SUPER_SREJ;
2537
2538 for (seq = chan->expected_tx_seq; seq != txseq;
2539 seq = __next_seq(chan, seq)) {
2540 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2541 control.reqseq = seq;
2542 l2cap_send_sframe(chan, &control);
2543 l2cap_seq_list_append(&chan->srej_list, seq);
2544 }
2545 }
2546
2547 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002548}
2549
2550static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2551{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002552 struct l2cap_ctrl control;
2553
2554 BT_DBG("chan %p", chan);
2555
2556 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2557 return;
2558
2559 memset(&control, 0, sizeof(control));
2560 control.sframe = 1;
2561 control.super = L2CAP_SUPER_SREJ;
2562 control.reqseq = chan->srej_list.tail;
2563 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002564}
2565
2566static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2567{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002568 struct l2cap_ctrl control;
2569 u16 initial_head;
2570 u16 seq;
2571
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002572 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002573
2574 memset(&control, 0, sizeof(control));
2575 control.sframe = 1;
2576 control.super = L2CAP_SUPER_SREJ;
2577
2578 /* Capture initial list head to allow only one pass through the list. */
2579 initial_head = chan->srej_list.head;
2580
2581 do {
2582 seq = l2cap_seq_list_pop(&chan->srej_list);
2583 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2584 break;
2585
2586 control.reqseq = seq;
2587 l2cap_send_sframe(chan, &control);
2588 l2cap_seq_list_append(&chan->srej_list, seq);
2589 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002590}
2591
Mat Martineau608bcc62012-05-17 20:53:32 -07002592static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2593{
2594 struct sk_buff *acked_skb;
2595 u16 ackseq;
2596
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002597 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002598
2599 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2600 return;
2601
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002602 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002603 chan->expected_ack_seq, chan->unacked_frames);
2604
2605 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2606 ackseq = __next_seq(chan, ackseq)) {
2607
2608 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2609 if (acked_skb) {
2610 skb_unlink(acked_skb, &chan->tx_q);
2611 kfree_skb(acked_skb);
2612 chan->unacked_frames--;
2613 }
2614 }
2615
2616 chan->expected_ack_seq = reqseq;
2617
2618 if (chan->unacked_frames == 0)
2619 __clear_retrans_timer(chan);
2620
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002621 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002622}
2623
2624static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2625{
2626 BT_DBG("chan %p", chan);
2627
2628 chan->expected_tx_seq = chan->buffer_seq;
2629 l2cap_seq_list_clear(&chan->srej_list);
2630 skb_queue_purge(&chan->srej_q);
2631 chan->rx_state = L2CAP_RX_STATE_RECV;
2632}
2633
Gustavo Padovand6603662012-05-21 13:58:22 -03002634static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2635 struct l2cap_ctrl *control,
2636 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002637{
Mat Martineau608bcc62012-05-17 20:53:32 -07002638 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2639 event);
2640
2641 switch (event) {
2642 case L2CAP_EV_DATA_REQUEST:
2643 if (chan->tx_send_head == NULL)
2644 chan->tx_send_head = skb_peek(skbs);
2645
2646 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2647 l2cap_ertm_send(chan);
2648 break;
2649 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2650 BT_DBG("Enter LOCAL_BUSY");
2651 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2652
2653 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2654 /* The SREJ_SENT state must be aborted if we are to
2655 * enter the LOCAL_BUSY state.
2656 */
2657 l2cap_abort_rx_srej_sent(chan);
2658 }
2659
2660 l2cap_send_ack(chan);
2661
2662 break;
2663 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2664 BT_DBG("Exit LOCAL_BUSY");
2665 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2666
2667 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2668 struct l2cap_ctrl local_control;
2669
2670 memset(&local_control, 0, sizeof(local_control));
2671 local_control.sframe = 1;
2672 local_control.super = L2CAP_SUPER_RR;
2673 local_control.poll = 1;
2674 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002675 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002676
2677 chan->retry_count = 1;
2678 __set_monitor_timer(chan);
2679 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 }
2681 break;
2682 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2683 l2cap_process_reqseq(chan, control->reqseq);
2684 break;
2685 case L2CAP_EV_EXPLICIT_POLL:
2686 l2cap_send_rr_or_rnr(chan, 1);
2687 chan->retry_count = 1;
2688 __set_monitor_timer(chan);
2689 __clear_ack_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 break;
2692 case L2CAP_EV_RETRANS_TO:
2693 l2cap_send_rr_or_rnr(chan, 1);
2694 chan->retry_count = 1;
2695 __set_monitor_timer(chan);
2696 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2697 break;
2698 case L2CAP_EV_RECV_FBIT:
2699 /* Nothing to process */
2700 break;
2701 default:
2702 break;
2703 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002704}
2705
Gustavo Padovand6603662012-05-21 13:58:22 -03002706static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2707 struct l2cap_ctrl *control,
2708 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002709{
Mat Martineau608bcc62012-05-17 20:53:32 -07002710 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2711 event);
2712
2713 switch (event) {
2714 case L2CAP_EV_DATA_REQUEST:
2715 if (chan->tx_send_head == NULL)
2716 chan->tx_send_head = skb_peek(skbs);
2717 /* Queue data, but don't send. */
2718 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2719 break;
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723
2724 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2727 */
2728 l2cap_abort_rx_srej_sent(chan);
2729 }
2730
2731 l2cap_send_ack(chan);
2732
2733 break;
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737
2738 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 struct l2cap_ctrl local_control;
2740 memset(&local_control, 0, sizeof(local_control));
2741 local_control.sframe = 1;
2742 local_control.super = L2CAP_SUPER_RR;
2743 local_control.poll = 1;
2744 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002745 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002746
2747 chan->retry_count = 1;
2748 __set_monitor_timer(chan);
2749 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 }
2751 break;
2752 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 l2cap_process_reqseq(chan, control->reqseq);
2754
2755 /* Fall through */
2756
2757 case L2CAP_EV_RECV_FBIT:
2758 if (control && control->final) {
2759 __clear_monitor_timer(chan);
2760 if (chan->unacked_frames > 0)
2761 __set_retrans_timer(chan);
2762 chan->retry_count = 0;
2763 chan->tx_state = L2CAP_TX_STATE_XMIT;
2764 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2765 }
2766 break;
2767 case L2CAP_EV_EXPLICIT_POLL:
2768 /* Ignore */
2769 break;
2770 case L2CAP_EV_MONITOR_TO:
2771 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 __set_monitor_timer(chan);
2774 chan->retry_count++;
2775 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002776 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002777 }
2778 break;
2779 default:
2780 break;
2781 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002782}
2783
Gustavo Padovand6603662012-05-21 13:58:22 -03002784static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2785 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002786{
Mat Martineau608bcc62012-05-17 20:53:32 -07002787 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 chan, control, skbs, event, chan->tx_state);
2789
2790 switch (chan->tx_state) {
2791 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002792 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002793 break;
2794 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002795 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002796 break;
2797 default:
2798 /* Ignore event */
2799 break;
2800 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002801}
2802
Mat Martineau4b51dae92012-05-17 20:53:37 -07002803static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2804 struct l2cap_ctrl *control)
2805{
2806 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002807 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002808}
2809
Mat Martineauf80842a2012-05-17 20:53:46 -07002810static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2811 struct l2cap_ctrl *control)
2812{
2813 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002814 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002815}
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817/* Copy frame to all raw sockets on that connection */
2818static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2819{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002821 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
2823 BT_DBG("conn %p", conn);
2824
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002825 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002826
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002827 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002828 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 continue;
2830
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002831 /* Don't send frame to the channel it came from */
2832 if (bt_cb(skb)->chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002834
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002835 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002836 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002838 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 kfree_skb(nskb);
2840 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002841
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002842 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843}
2844
2845/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002846static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2847 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848{
2849 struct sk_buff *skb, **frag;
2850 struct l2cap_cmd_hdr *cmd;
2851 struct l2cap_hdr *lh;
2852 int len, count;
2853
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002854 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2855 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Anderson Lizardo300b9622013-06-02 16:30:40 -04002857 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2858 return NULL;
2859
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2861 count = min_t(unsigned int, conn->mtu, len);
2862
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002863 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 if (!skb)
2865 return NULL;
2866
2867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002868 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002869
2870 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002871 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002872 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002873 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
2875 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2876 cmd->code = code;
2877 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002878 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
2880 if (dlen) {
2881 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2882 memcpy(skb_put(skb, count), data, count);
2883 data += count;
2884 }
2885
2886 len -= skb->len;
2887
2888 /* Continuation fragments (no L2CAP header) */
2889 frag = &skb_shinfo(skb)->frag_list;
2890 while (len) {
2891 count = min_t(unsigned int, conn->mtu, len);
2892
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002893 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 if (!*frag)
2895 goto fail;
2896
2897 memcpy(skb_put(*frag, count), data, count);
2898
2899 len -= count;
2900 data += count;
2901
2902 frag = &(*frag)->next;
2903 }
2904
2905 return skb;
2906
2907fail:
2908 kfree_skb(skb);
2909 return NULL;
2910}
2911
Gustavo Padovan2d792812012-10-06 10:07:01 +01002912static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2913 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914{
2915 struct l2cap_conf_opt *opt = *ptr;
2916 int len;
2917
2918 len = L2CAP_CONF_OPT_SIZE + opt->len;
2919 *ptr += len;
2920
2921 *type = opt->type;
2922 *olen = opt->len;
2923
2924 switch (opt->len) {
2925 case 1:
2926 *val = *((u8 *) opt->val);
2927 break;
2928
2929 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002930 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 break;
2932
2933 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002934 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 break;
2936
2937 default:
2938 *val = (unsigned long) opt->val;
2939 break;
2940 }
2941
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002942 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 return len;
2944}
2945
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2947{
2948 struct l2cap_conf_opt *opt = *ptr;
2949
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002950 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
2952 opt->type = type;
2953 opt->len = len;
2954
2955 switch (len) {
2956 case 1:
2957 *((u8 *) opt->val) = val;
2958 break;
2959
2960 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002961 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 break;
2963
2964 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002965 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 break;
2967
2968 default:
2969 memcpy(opt->val, (void *) val, len);
2970 break;
2971 }
2972
2973 *ptr += L2CAP_CONF_OPT_SIZE + len;
2974}
2975
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002976static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2977{
2978 struct l2cap_conf_efs efs;
2979
Szymon Janc1ec918c2011-11-16 09:32:21 +01002980 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002981 case L2CAP_MODE_ERTM:
2982 efs.id = chan->local_id;
2983 efs.stype = chan->local_stype;
2984 efs.msdu = cpu_to_le16(chan->local_msdu);
2985 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002986 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2987 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002988 break;
2989
2990 case L2CAP_MODE_STREAMING:
2991 efs.id = 1;
2992 efs.stype = L2CAP_SERV_BESTEFFORT;
2993 efs.msdu = cpu_to_le16(chan->local_msdu);
2994 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2995 efs.acc_lat = 0;
2996 efs.flush_to = 0;
2997 break;
2998
2999 default:
3000 return;
3001 }
3002
3003 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003004 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003005}
3006
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003007static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003008{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003009 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003010 ack_timer.work);
3011 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003012
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003013 BT_DBG("chan %p", chan);
3014
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003015 l2cap_chan_lock(chan);
3016
Mat Martineau03625202012-05-17 20:53:51 -07003017 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3018 chan->last_acked_seq);
3019
3020 if (frames_to_ack)
3021 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003022
3023 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003024 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003025}
3026
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003027int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003028{
Mat Martineau3c588192012-04-11 10:48:42 -07003029 int err;
3030
Mat Martineau105bdf92012-04-27 16:50:48 -07003031 chan->next_tx_seq = 0;
3032 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003033 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003034 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003035 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003036 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003037 chan->last_acked_seq = 0;
3038 chan->sdu = NULL;
3039 chan->sdu_last_frag = NULL;
3040 chan->sdu_len = 0;
3041
Mat Martineaud34c34f2012-05-14 14:49:27 -07003042 skb_queue_head_init(&chan->tx_q);
3043
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003044 chan->local_amp_id = AMP_ID_BREDR;
3045 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003046 chan->move_state = L2CAP_MOVE_STABLE;
3047 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3048
Mat Martineau105bdf92012-04-27 16:50:48 -07003049 if (chan->mode != L2CAP_MODE_ERTM)
3050 return 0;
3051
3052 chan->rx_state = L2CAP_RX_STATE_RECV;
3053 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003054
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003055 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3056 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3057 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003058
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003059 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003060
Mat Martineau3c588192012-04-11 10:48:42 -07003061 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3062 if (err < 0)
3063 return err;
3064
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003065 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3066 if (err < 0)
3067 l2cap_seq_list_free(&chan->srej_list);
3068
3069 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003070}
3071
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003072static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3073{
3074 switch (mode) {
3075 case L2CAP_MODE_STREAMING:
3076 case L2CAP_MODE_ERTM:
3077 if (l2cap_mode_supported(mode, remote_feat_mask))
3078 return mode;
3079 /* fall through */
3080 default:
3081 return L2CAP_MODE_BASIC;
3082 }
3083}
3084
Marcel Holtmann848566b2013-10-01 22:59:22 -07003085static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003086{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003087 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003088}
3089
Marcel Holtmann848566b2013-10-01 22:59:22 -07003090static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003091{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003092 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003093}
3094
Mat Martineau36c86c82012-10-23 15:24:20 -07003095static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3096 struct l2cap_conf_rfc *rfc)
3097{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003098 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003099 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3100
3101 /* Class 1 devices have must have ERTM timeouts
3102 * exceeding the Link Supervision Timeout. The
3103 * default Link Supervision Timeout for AMP
3104 * controllers is 10 seconds.
3105 *
3106 * Class 1 devices use 0xffffffff for their
3107 * best-effort flush timeout, so the clamping logic
3108 * will result in a timeout that meets the above
3109 * requirement. ERTM timeouts are 16-bit values, so
3110 * the maximum timeout is 65.535 seconds.
3111 */
3112
3113 /* Convert timeout to milliseconds and round */
3114 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3115
3116 /* This is the recommended formula for class 2 devices
3117 * that start ERTM timers when packets are sent to the
3118 * controller.
3119 */
3120 ertm_to = 3 * ertm_to + 500;
3121
3122 if (ertm_to > 0xffff)
3123 ertm_to = 0xffff;
3124
3125 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3126 rfc->monitor_timeout = rfc->retrans_timeout;
3127 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003128 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3129 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003130 }
3131}
3132
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003133static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3134{
3135 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003136 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003137 /* use extended control field */
3138 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003139 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3140 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003141 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003142 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003143 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3144 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003145 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003146}
3147
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003148static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003151 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003153 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003155 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003157 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003158 goto done;
3159
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003160 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003161 case L2CAP_MODE_STREAMING:
3162 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003163 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003164 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003165
Marcel Holtmann848566b2013-10-01 22:59:22 -07003166 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003167 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3168
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003169 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003170 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003171 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003172 break;
3173 }
3174
3175done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003176 if (chan->imtu != L2CAP_DEFAULT_MTU)
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003178
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003179 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003180 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003181 if (disable_ertm)
3182 break;
3183
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003184 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003185 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003186 break;
3187
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003188 rfc.mode = L2CAP_MODE_BASIC;
3189 rfc.txwin_size = 0;
3190 rfc.max_transmit = 0;
3191 rfc.retrans_timeout = 0;
3192 rfc.monitor_timeout = 0;
3193 rfc.max_pdu_size = 0;
3194
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003196 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003197 break;
3198
3199 case L2CAP_MODE_ERTM:
3200 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003201 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003202
3203 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003204
3205 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003206 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3207 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003208 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003209
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003210 l2cap_txwin_setup(chan);
3211
3212 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003213 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003214
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003215 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003216 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003217
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003218 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3219 l2cap_add_opt_efs(&ptr, chan);
3220
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003221 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003223 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003224
3225 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3226 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003227 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003228 chan->fcs = L2CAP_FCS_NONE;
3229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3230 chan->fcs);
3231 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003232 break;
3233
3234 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003235 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003236 rfc.mode = L2CAP_MODE_STREAMING;
3237 rfc.txwin_size = 0;
3238 rfc.max_transmit = 0;
3239 rfc.retrans_timeout = 0;
3240 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003241
3242 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003243 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3244 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003245 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003246
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003248 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003249
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003250 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3251 l2cap_add_opt_efs(&ptr, chan);
3252
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003253 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3254 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003255 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003256 chan->fcs = L2CAP_FCS_NONE;
3257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3258 chan->fcs);
3259 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003260 break;
3261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003263 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003264 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 return ptr - data;
3267}
3268
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003269static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003271 struct l2cap_conf_rsp *rsp = data;
3272 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003273 void *req = chan->conf_req;
3274 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003275 int type, hint, olen;
3276 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003277 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003278 struct l2cap_conf_efs efs;
3279 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003280 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003281 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003282 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003284 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003285
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003286 while (len >= L2CAP_CONF_OPT_SIZE) {
3287 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003289 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003290 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003291
3292 switch (type) {
3293 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003294 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003295 break;
3296
3297 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003298 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003299 break;
3300
3301 case L2CAP_CONF_QOS:
3302 break;
3303
Marcel Holtmann6464f352007-10-20 13:39:51 +02003304 case L2CAP_CONF_RFC:
3305 if (olen == sizeof(rfc))
3306 memcpy(&rfc, (void *) val, olen);
3307 break;
3308
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003309 case L2CAP_CONF_FCS:
3310 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003311 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003312 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003313
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003314 case L2CAP_CONF_EFS:
3315 remote_efs = 1;
3316 if (olen == sizeof(efs))
3317 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003318 break;
3319
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003320 case L2CAP_CONF_EWS:
Marcel Holtmann848566b2013-10-01 22:59:22 -07003321 if (!chan->conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003322 return -ECONNREFUSED;
3323
3324 set_bit(FLAG_EXT_CTRL, &chan->flags);
3325 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003326 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003327 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003328 break;
3329
3330 default:
3331 if (hint)
3332 break;
3333
3334 result = L2CAP_CONF_UNKNOWN;
3335 *((u8 *) ptr++) = type;
3336 break;
3337 }
3338 }
3339
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003340 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003341 goto done;
3342
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003343 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003344 case L2CAP_MODE_STREAMING:
3345 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003346 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003347 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003348 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003349 break;
3350 }
3351
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003352 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003353 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003354 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3355 else
3356 return -ECONNREFUSED;
3357 }
3358
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003359 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003360 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003361
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003362 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003363 }
3364
3365done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003366 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003367 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003368 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003369
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003370 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003371 return -ECONNREFUSED;
3372
Gustavo Padovan2d792812012-10-06 10:07:01 +01003373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3374 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003375 }
3376
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003377 if (result == L2CAP_CONF_SUCCESS) {
3378 /* Configure output options and let the other side know
3379 * which ones we don't like. */
3380
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003381 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3382 result = L2CAP_CONF_UNACCEPT;
3383 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003384 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003385 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003386 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003388
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003389 if (remote_efs) {
3390 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003391 efs.stype != L2CAP_SERV_NOTRAFIC &&
3392 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003393
3394 result = L2CAP_CONF_UNACCEPT;
3395
3396 if (chan->num_conf_req >= 1)
3397 return -ECONNREFUSED;
3398
3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003400 sizeof(efs),
3401 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003402 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003403 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003404 result = L2CAP_CONF_PENDING;
3405 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003406 }
3407 }
3408
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003409 switch (rfc.mode) {
3410 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003411 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003412 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003413 break;
3414
3415 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003416 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3417 chan->remote_tx_win = rfc.txwin_size;
3418 else
3419 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3420
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003421 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003422
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003423 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003424 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3425 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003426 rfc.max_pdu_size = cpu_to_le16(size);
3427 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003428
Mat Martineau36c86c82012-10-23 15:24:20 -07003429 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003430
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003431 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003432
3433 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003434 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003435
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003436 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3437 chan->remote_id = efs.id;
3438 chan->remote_stype = efs.stype;
3439 chan->remote_msdu = le16_to_cpu(efs.msdu);
3440 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003441 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003442 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003443 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003444 chan->remote_sdu_itime =
3445 le32_to_cpu(efs.sdu_itime);
3446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003447 sizeof(efs),
3448 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003449 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003450 break;
3451
3452 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003453 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003454 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3455 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003456 rfc.max_pdu_size = cpu_to_le16(size);
3457 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003458
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003459 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003460
Gustavo Padovan2d792812012-10-06 10:07:01 +01003461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3462 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003463
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003464 break;
3465
3466 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003467 result = L2CAP_CONF_UNACCEPT;
3468
3469 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003470 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003471 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003472
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003473 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003474 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003475 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003476 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003477 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003478 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003479
3480 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481}
3482
Gustavo Padovan2d792812012-10-06 10:07:01 +01003483static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3484 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003486 struct l2cap_conf_req *req = data;
3487 void *ptr = req->data;
3488 int type, olen;
3489 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003490 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003491 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003492
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003493 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003494
3495 while (len >= L2CAP_CONF_OPT_SIZE) {
3496 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3497
3498 switch (type) {
3499 case L2CAP_CONF_MTU:
3500 if (val < L2CAP_DEFAULT_MIN_MTU) {
3501 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003502 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003503 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003504 chan->imtu = val;
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003506 break;
3507
3508 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003509 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003511 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003512 break;
3513
3514 case L2CAP_CONF_RFC:
3515 if (olen == sizeof(rfc))
3516 memcpy(&rfc, (void *)val, olen);
3517
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003518 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003519 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003520 return -ECONNREFUSED;
3521
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003522 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003523
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003525 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003526 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003527
3528 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003529 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003531 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003532 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003533
3534 case L2CAP_CONF_EFS:
3535 if (olen == sizeof(efs))
3536 memcpy(&efs, (void *)val, olen);
3537
3538 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003539 efs.stype != L2CAP_SERV_NOTRAFIC &&
3540 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003541 return -ECONNREFUSED;
3542
Gustavo Padovan2d792812012-10-06 10:07:01 +01003543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3544 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003545 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003546
3547 case L2CAP_CONF_FCS:
3548 if (*result == L2CAP_CONF_PENDING)
3549 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003550 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003551 &chan->conf_state);
3552 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003553 }
3554 }
3555
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003556 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003557 return -ECONNREFUSED;
3558
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003559 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003560
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003561 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003562 switch (rfc.mode) {
3563 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003564 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3565 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3566 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003567 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3568 chan->ack_win = min_t(u16, chan->ack_win,
3569 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003570
3571 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3572 chan->local_msdu = le16_to_cpu(efs.msdu);
3573 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003574 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003575 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3576 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003577 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003578 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003579 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003580
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003581 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003582 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003583 }
3584 }
3585
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003586 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003587 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003588
3589 return ptr - data;
3590}
3591
Gustavo Padovan2d792812012-10-06 10:07:01 +01003592static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3593 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594{
3595 struct l2cap_conf_rsp *rsp = data;
3596 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003598 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003600 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003601 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003602 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
3604 return ptr - data;
3605}
3606
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003607void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3608{
3609 struct l2cap_le_conn_rsp rsp;
3610 struct l2cap_conn *conn = chan->conn;
3611
3612 BT_DBG("chan %p", chan);
3613
3614 rsp.dcid = cpu_to_le16(chan->scid);
3615 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003616 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003617 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003618 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003619
3620 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3621 &rsp);
3622}
3623
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003624void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003625{
3626 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003627 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003628 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003629 u8 rsp_code;
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003630
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003631 rsp.scid = cpu_to_le16(chan->dcid);
3632 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003633 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3634 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003635
3636 if (chan->hs_hcon)
3637 rsp_code = L2CAP_CREATE_CHAN_RSP;
3638 else
3639 rsp_code = L2CAP_CONN_RSP;
3640
3641 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3642
3643 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003644
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003645 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003646 return;
3647
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003648 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003649 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003650 chan->num_conf_req++;
3651}
3652
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003653static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003654{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003655 int type, olen;
3656 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003657 /* Use sane default values in case a misbehaving remote device
3658 * did not send an RFC or extended window size option.
3659 */
3660 u16 txwin_ext = chan->ack_win;
3661 struct l2cap_conf_rfc rfc = {
3662 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003663 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3664 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003665 .max_pdu_size = cpu_to_le16(chan->imtu),
3666 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3667 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003668
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003669 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003670
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003671 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003672 return;
3673
3674 while (len >= L2CAP_CONF_OPT_SIZE) {
3675 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3676
Mat Martineauc20f8e32012-07-10 05:47:07 -07003677 switch (type) {
3678 case L2CAP_CONF_RFC:
3679 if (olen == sizeof(rfc))
3680 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003681 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003682 case L2CAP_CONF_EWS:
3683 txwin_ext = val;
3684 break;
3685 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003686 }
3687
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003688 switch (rfc.mode) {
3689 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003690 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3691 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003692 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3693 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3694 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3695 else
3696 chan->ack_win = min_t(u16, chan->ack_win,
3697 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003698 break;
3699 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003701 }
3702}
3703
Gustavo Padovan2d792812012-10-06 10:07:01 +01003704static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003705 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3706 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003707{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003708 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003709
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003710 if (cmd_len < sizeof(*rej))
3711 return -EPROTO;
3712
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003713 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003714 return 0;
3715
3716 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003717 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003718 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003719
3720 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003721 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003722
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003723 l2cap_conn_start(conn);
3724 }
3725
3726 return 0;
3727}
3728
Mat Martineau17009152012-10-23 15:24:07 -07003729static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3730 struct l2cap_cmd_hdr *cmd,
3731 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3734 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003735 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003736 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003739 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003741 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
3743 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003744 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003745 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003746 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 result = L2CAP_CR_BAD_PSM;
3748 goto sendresp;
3749 }
3750
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003751 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003752 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003753
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003754 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003755 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003756 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003757 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003758 result = L2CAP_CR_SEC_BLOCK;
3759 goto response;
3760 }
3761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 result = L2CAP_CR_NO_MEM;
3763
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003764 /* Check if we already have channel with that dcid */
3765 if (__l2cap_get_chan_by_dcid(conn, scid))
3766 goto response;
3767
Gustavo Padovan80b98022012-05-27 22:27:51 -03003768 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003769 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 goto response;
3771
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003772 /* For certain devices (ex: HID mouse), support for authentication,
3773 * pairing and bonding is optional. For such devices, inorder to avoid
3774 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3775 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3776 */
3777 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3778
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003779 bacpy(&chan->src, &conn->hcon->src);
3780 bacpy(&chan->dst, &conn->hcon->dst);
Marcel Holtmann4f1654e2013-10-13 08:50:41 -07003781 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3782 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003783 chan->psm = psm;
3784 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003785 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003787 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003788
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003789 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003791 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003793 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
Marcel Holtmann984947d2009-02-06 23:35:19 +01003795 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003796 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003797 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003798 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003799 result = L2CAP_CR_PEND;
3800 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003801 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003802 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003803 /* Force pending result for AMP controllers.
3804 * The connection will succeed after the
3805 * physical link is up.
3806 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003807 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003808 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003809 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003810 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003811 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003812 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003813 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003814 status = L2CAP_CS_NO_INFO;
3815 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003816 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003817 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003818 result = L2CAP_CR_PEND;
3819 status = L2CAP_CS_AUTHEN_PEND;
3820 }
3821 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003822 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003823 result = L2CAP_CR_PEND;
3824 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 }
3826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003828 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003829 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003830 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
3832sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003833 rsp.scid = cpu_to_le16(scid);
3834 rsp.dcid = cpu_to_le16(dcid);
3835 rsp.result = cpu_to_le16(result);
3836 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003837 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003838
3839 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3840 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003841 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003842
3843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3844 conn->info_ident = l2cap_get_ident(conn);
3845
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003846 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003847
Gustavo Padovan2d792812012-10-06 10:07:01 +01003848 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3849 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003850 }
3851
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003852 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003853 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003854 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003855 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003856 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003857 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003858 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003859 }
Mat Martineau17009152012-10-23 15:24:07 -07003860
3861 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003862}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003863
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003864static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003865 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003866{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303867 struct hci_dev *hdev = conn->hcon->hdev;
3868 struct hci_conn *hcon = conn->hcon;
3869
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003870 if (cmd_len < sizeof(struct l2cap_conn_req))
3871 return -EPROTO;
3872
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303873 hci_dev_lock(hdev);
3874 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3875 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3876 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3877 hcon->dst_type, 0, NULL, 0,
3878 hcon->dev_class);
3879 hci_dev_unlock(hdev);
3880
Gustavo Padovan300229f2012-10-12 19:40:40 +08003881 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882 return 0;
3883}
3884
Mat Martineau5909cf32012-10-23 15:24:08 -07003885static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003886 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3887 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003888{
3889 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3890 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003891 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003893 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003895 if (cmd_len < sizeof(*rsp))
3896 return -EPROTO;
3897
Linus Torvalds1da177e2005-04-16 15:20:36 -07003898 scid = __le16_to_cpu(rsp->scid);
3899 dcid = __le16_to_cpu(rsp->dcid);
3900 result = __le16_to_cpu(rsp->result);
3901 status = __le16_to_cpu(rsp->status);
3902
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003903 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003904 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003906 mutex_lock(&conn->chan_lock);
3907
Linus Torvalds1da177e2005-04-16 15:20:36 -07003908 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003909 chan = __l2cap_get_chan_by_scid(conn, scid);
3910 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003911 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003912 goto unlock;
3913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003914 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003915 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3916 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003917 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003918 goto unlock;
3919 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003920 }
3921
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003922 err = 0;
3923
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003924 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003925
Linus Torvalds1da177e2005-04-16 15:20:36 -07003926 switch (result) {
3927 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03003928 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003929 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003930 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003931 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003932
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003933 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003934 break;
3935
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003937 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003938 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003939 break;
3940
3941 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003942 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 break;
3944
3945 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003946 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003947 break;
3948 }
3949
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003950 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003951
3952unlock:
3953 mutex_unlock(&conn->chan_lock);
3954
3955 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956}
3957
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003958static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003959{
3960 /* FCS is enabled only in ERTM or streaming mode, if one or both
3961 * sides request it.
3962 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003963 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003964 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003965 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003966 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003967}
3968
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003969static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3970 u8 ident, u16 flags)
3971{
3972 struct l2cap_conn *conn = chan->conn;
3973
3974 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3975 flags);
3976
3977 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3978 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3979
3980 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3981 l2cap_build_conf_rsp(chan, data,
3982 L2CAP_CONF_SUCCESS, flags), data);
3983}
3984
Johan Hedberg662d6522013-10-16 11:20:47 +03003985static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3986 u16 scid, u16 dcid)
3987{
3988 struct l2cap_cmd_rej_cid rej;
3989
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003990 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03003991 rej.scid = __cpu_to_le16(scid);
3992 rej.dcid = __cpu_to_le16(dcid);
3993
3994 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3995}
3996
Gustavo Padovan2d792812012-10-06 10:07:01 +01003997static inline int l2cap_config_req(struct l2cap_conn *conn,
3998 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3999 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000{
4001 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4002 u16 dcid, flags;
4003 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004004 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004005 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004007 if (cmd_len < sizeof(*req))
4008 return -EPROTO;
4009
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 dcid = __le16_to_cpu(req->dcid);
4011 flags = __le16_to_cpu(req->flags);
4012
4013 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4014
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004015 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004016 if (!chan) {
4017 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4018 return 0;
4019 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004020
David S. Miller033b1142011-07-21 13:38:42 -07004021 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004022 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4023 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004024 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004025 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004026
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004027 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004028 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004029 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004030 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004031 l2cap_build_conf_rsp(chan, rsp,
4032 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004033 goto unlock;
4034 }
4035
4036 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004037 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4038 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004040 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041 /* Incomplete config. Send empty response. */
4042 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004043 l2cap_build_conf_rsp(chan, rsp,
4044 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004045 goto unlock;
4046 }
4047
4048 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004049 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004050 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004051 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004053 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054
Mat Martineau1500109b2012-10-23 15:24:15 -07004055 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004056 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004057 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004058
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004059 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004060 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004061
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004062 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004063 goto unlock;
4064
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004065 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004066 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004067
Mat Martineau105bdf92012-04-27 16:50:48 -07004068 if (chan->mode == L2CAP_MODE_ERTM ||
4069 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004070 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004071
Mat Martineau3c588192012-04-11 10:48:42 -07004072 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004073 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004074 else
4075 l2cap_chan_ready(chan);
4076
Marcel Holtmann876d9482007-10-20 13:35:42 +02004077 goto unlock;
4078 }
4079
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004080 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004081 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004083 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004084 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004085 }
4086
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004087 /* Got Conf Rsp PENDING from remote side and asume we sent
4088 Conf Rsp PENDING in the code above */
4089 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004090 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004091
4092 /* check compatibility */
4093
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004094 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004095 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004096 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4097 else
4098 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004099 }
4100
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004102 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004103 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004104}
4105
Gustavo Padovan2d792812012-10-06 10:07:01 +01004106static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004107 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4108 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004109{
4110 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4111 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004112 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004113 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004114 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004115
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004116 if (cmd_len < sizeof(*rsp))
4117 return -EPROTO;
4118
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 scid = __le16_to_cpu(rsp->scid);
4120 flags = __le16_to_cpu(rsp->flags);
4121 result = __le16_to_cpu(rsp->result);
4122
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004123 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4124 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004126 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004127 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 return 0;
4129
4130 switch (result) {
4131 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004132 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004133 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004134 break;
4135
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004136 case L2CAP_CONF_PENDING:
4137 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4138
4139 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4140 char buf[64];
4141
4142 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004143 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004144 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004145 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004146 goto done;
4147 }
4148
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004149 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004150 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4151 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004152 } else {
4153 if (l2cap_check_efs(chan)) {
4154 amp_create_logical_link(chan);
4155 chan->ident = cmd->ident;
4156 }
4157 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004158 }
4159 goto done;
4160
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004162 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004163 char req[64];
4164
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004165 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004166 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004167 goto done;
4168 }
4169
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004170 /* throw out any old stored conf requests */
4171 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004172 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004173 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004174 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004175 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004176 goto done;
4177 }
4178
4179 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004180 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004181 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004182 if (result != L2CAP_CONF_SUCCESS)
4183 goto done;
4184 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004187 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004188 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004189
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004190 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004191 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004192 goto done;
4193 }
4194
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004195 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 goto done;
4197
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004198 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004200 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004201 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004202
Mat Martineau105bdf92012-04-27 16:50:48 -07004203 if (chan->mode == L2CAP_MODE_ERTM ||
4204 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004205 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004206
Mat Martineau3c588192012-04-11 10:48:42 -07004207 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004208 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004209 else
4210 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211 }
4212
4213done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004214 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004215 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004216}
4217
Gustavo Padovan2d792812012-10-06 10:07:01 +01004218static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004219 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4220 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004221{
4222 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4223 struct l2cap_disconn_rsp rsp;
4224 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004225 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004227 if (cmd_len != sizeof(*req))
4228 return -EPROTO;
4229
Linus Torvalds1da177e2005-04-16 15:20:36 -07004230 scid = __le16_to_cpu(req->scid);
4231 dcid = __le16_to_cpu(req->dcid);
4232
4233 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4234
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004235 mutex_lock(&conn->chan_lock);
4236
4237 chan = __l2cap_get_chan_by_scid(conn, dcid);
4238 if (!chan) {
4239 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004240 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4241 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004242 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004244 l2cap_chan_lock(chan);
4245
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004246 rsp.dcid = cpu_to_le16(chan->scid);
4247 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4249
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004250 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004251
Mat Martineau61d6ef32012-04-27 16:50:50 -07004252 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004253 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004254
4255 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004256
Gustavo Padovan80b98022012-05-27 22:27:51 -03004257 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004258 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004259
4260 mutex_unlock(&conn->chan_lock);
4261
Linus Torvalds1da177e2005-04-16 15:20:36 -07004262 return 0;
4263}
4264
Gustavo Padovan2d792812012-10-06 10:07:01 +01004265static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004266 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4267 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004268{
4269 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4270 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004271 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004272
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004273 if (cmd_len != sizeof(*rsp))
4274 return -EPROTO;
4275
Linus Torvalds1da177e2005-04-16 15:20:36 -07004276 scid = __le16_to_cpu(rsp->scid);
4277 dcid = __le16_to_cpu(rsp->dcid);
4278
4279 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4280
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004281 mutex_lock(&conn->chan_lock);
4282
4283 chan = __l2cap_get_chan_by_scid(conn, scid);
4284 if (!chan) {
4285 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004289 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004290
Mat Martineau61d6ef32012-04-27 16:50:50 -07004291 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004292 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004293
4294 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295
Gustavo Padovan80b98022012-05-27 22:27:51 -03004296 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004297 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004298
4299 mutex_unlock(&conn->chan_lock);
4300
Linus Torvalds1da177e2005-04-16 15:20:36 -07004301 return 0;
4302}
4303
Gustavo Padovan2d792812012-10-06 10:07:01 +01004304static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004305 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4306 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
4308 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 u16 type;
4310
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004311 if (cmd_len != sizeof(*req))
4312 return -EPROTO;
4313
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 type = __le16_to_cpu(req->type);
4315
4316 BT_DBG("type 0x%4.4x", type);
4317
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004318 if (type == L2CAP_IT_FEAT_MASK) {
4319 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004320 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004321 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004322 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4323 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004324 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004325 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004326 | L2CAP_FEAT_FCS;
Marcel Holtmann848566b2013-10-01 22:59:22 -07004327 if (conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004328 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004329 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004330
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004331 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004332 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4333 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004334 } else if (type == L2CAP_IT_FIXED_CHAN) {
4335 u8 buf[12];
4336 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004337
Marcel Holtmann848566b2013-10-01 22:59:22 -07004338 if (conn->hs_enabled)
Mat Martineau50a147c2011-11-02 16:18:34 -07004339 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4340 else
4341 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4342
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004343 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4344 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Andrei Emeltchenkoc6337ea2011-10-20 17:02:44 +03004345 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
Gustavo Padovan2d792812012-10-06 10:07:01 +01004346 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4347 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004348 } else {
4349 struct l2cap_info_rsp rsp;
4350 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004351 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004352 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4353 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004354 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004355
4356 return 0;
4357}
4358
Gustavo Padovan2d792812012-10-06 10:07:01 +01004359static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004360 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4361 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362{
4363 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4364 u16 type, result;
4365
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304366 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004367 return -EPROTO;
4368
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 type = __le16_to_cpu(rsp->type);
4370 result = __le16_to_cpu(rsp->result);
4371
4372 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4373
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004374 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4375 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004376 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004377 return 0;
4378
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004379 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004380
Ville Tervoadb08ed2010-08-04 09:43:33 +03004381 if (result != L2CAP_IR_SUCCESS) {
4382 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4383 conn->info_ident = 0;
4384
4385 l2cap_conn_start(conn);
4386
4387 return 0;
4388 }
4389
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004390 switch (type) {
4391 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004392 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004393
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004394 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004395 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004396 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004397
4398 conn->info_ident = l2cap_get_ident(conn);
4399
4400 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004401 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004402 } else {
4403 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4404 conn->info_ident = 0;
4405
4406 l2cap_conn_start(conn);
4407 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004408 break;
4409
4410 case L2CAP_IT_FIXED_CHAN:
4411 conn->fixed_chan_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004412 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004413 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004414
4415 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004416 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004417 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004418
Linus Torvalds1da177e2005-04-16 15:20:36 -07004419 return 0;
4420}
4421
Mat Martineau17009152012-10-23 15:24:07 -07004422static int l2cap_create_channel_req(struct l2cap_conn *conn,
4423 struct l2cap_cmd_hdr *cmd,
4424 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004425{
4426 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004427 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004428 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004429 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004430 u16 psm, scid;
4431
4432 if (cmd_len != sizeof(*req))
4433 return -EPROTO;
4434
Marcel Holtmann848566b2013-10-01 22:59:22 -07004435 if (!conn->hs_enabled)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004436 return -EINVAL;
4437
4438 psm = le16_to_cpu(req->psm);
4439 scid = le16_to_cpu(req->scid);
4440
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004441 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004442
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004443 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004444 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004445 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4446 req->amp_id);
4447 return 0;
4448 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004449
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004450 /* Validate AMP controller id */
4451 hdev = hci_dev_get(req->amp_id);
4452 if (!hdev)
4453 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004454
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004455 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004456 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004457 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004458 }
4459
4460 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4461 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004462 if (chan) {
4463 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4464 struct hci_conn *hs_hcon;
4465
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004466 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4467 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004468 if (!hs_hcon) {
4469 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004470 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4471 chan->dcid);
4472 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004473 }
4474
4475 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4476
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004477 mgr->bredr_chan = chan;
4478 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004479 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004480 conn->mtu = hdev->block_mtu;
4481 }
4482
4483 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004484
4485 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004486
4487error:
4488 rsp.dcid = 0;
4489 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004490 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4491 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004492
4493 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4494 sizeof(rsp), &rsp);
4495
Johan Hedbergdc280802013-09-16 13:05:13 +03004496 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004497}
4498
Mat Martineau8eb200b2012-10-23 15:24:17 -07004499static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4500{
4501 struct l2cap_move_chan_req req;
4502 u8 ident;
4503
4504 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4505
4506 ident = l2cap_get_ident(chan->conn);
4507 chan->ident = ident;
4508
4509 req.icid = cpu_to_le16(chan->scid);
4510 req.dest_amp_id = dest_amp_id;
4511
4512 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4513 &req);
4514
4515 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4516}
4517
Mat Martineau1500109b2012-10-23 15:24:15 -07004518static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004519{
4520 struct l2cap_move_chan_rsp rsp;
4521
Mat Martineau1500109b2012-10-23 15:24:15 -07004522 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004523
Mat Martineau1500109b2012-10-23 15:24:15 -07004524 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004525 rsp.result = cpu_to_le16(result);
4526
Mat Martineau1500109b2012-10-23 15:24:15 -07004527 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4528 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004529}
4530
Mat Martineau5b155ef2012-10-23 15:24:14 -07004531static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004532{
4533 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004534
Mat Martineau5b155ef2012-10-23 15:24:14 -07004535 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004536
Mat Martineau5b155ef2012-10-23 15:24:14 -07004537 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004538
Mat Martineau5b155ef2012-10-23 15:24:14 -07004539 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004540 cfm.result = cpu_to_le16(result);
4541
Mat Martineau5b155ef2012-10-23 15:24:14 -07004542 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4543 sizeof(cfm), &cfm);
4544
4545 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4546}
4547
4548static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4549{
4550 struct l2cap_move_chan_cfm cfm;
4551
4552 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4553
4554 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004555 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004556
4557 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4558 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004559}
4560
4561static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004562 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004563{
4564 struct l2cap_move_chan_cfm_rsp rsp;
4565
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004566 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004567
4568 rsp.icid = cpu_to_le16(icid);
4569 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4570}
4571
Mat Martineau5f3847a2012-10-23 15:24:12 -07004572static void __release_logical_link(struct l2cap_chan *chan)
4573{
4574 chan->hs_hchan = NULL;
4575 chan->hs_hcon = NULL;
4576
4577 /* Placeholder - release the logical link */
4578}
4579
Mat Martineau1500109b2012-10-23 15:24:15 -07004580static void l2cap_logical_fail(struct l2cap_chan *chan)
4581{
4582 /* Logical link setup failed */
4583 if (chan->state != BT_CONNECTED) {
4584 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004585 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004586 return;
4587 }
4588
4589 switch (chan->move_role) {
4590 case L2CAP_MOVE_ROLE_RESPONDER:
4591 l2cap_move_done(chan);
4592 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4593 break;
4594 case L2CAP_MOVE_ROLE_INITIATOR:
4595 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4596 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4597 /* Remote has only sent pending or
4598 * success responses, clean up
4599 */
4600 l2cap_move_done(chan);
4601 }
4602
4603 /* Other amp move states imply that the move
4604 * has already aborted
4605 */
4606 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4607 break;
4608 }
4609}
4610
4611static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4612 struct hci_chan *hchan)
4613{
4614 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004615
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004616 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004617 chan->hs_hcon->l2cap_data = chan->conn;
4618
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004619 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004620
4621 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004622 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004623
4624 set_default_fcs(chan);
4625
4626 err = l2cap_ertm_init(chan);
4627 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004628 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004629 else
4630 l2cap_chan_ready(chan);
4631 }
4632}
4633
4634static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4635 struct hci_chan *hchan)
4636{
4637 chan->hs_hcon = hchan->conn;
4638 chan->hs_hcon->l2cap_data = chan->conn;
4639
4640 BT_DBG("move_state %d", chan->move_state);
4641
4642 switch (chan->move_state) {
4643 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4644 /* Move confirm will be sent after a success
4645 * response is received
4646 */
4647 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4648 break;
4649 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4650 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4651 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4652 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4653 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4654 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4655 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4656 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4657 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4658 }
4659 break;
4660 default:
4661 /* Move was not in expected state, free the channel */
4662 __release_logical_link(chan);
4663
4664 chan->move_state = L2CAP_MOVE_STABLE;
4665 }
4666}
4667
4668/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004669void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4670 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004671{
Mat Martineau1500109b2012-10-23 15:24:15 -07004672 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4673
4674 if (status) {
4675 l2cap_logical_fail(chan);
4676 __release_logical_link(chan);
4677 return;
4678 }
4679
4680 if (chan->state != BT_CONNECTED) {
4681 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004682 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004683 l2cap_logical_finish_create(chan, hchan);
4684 } else {
4685 l2cap_logical_finish_move(chan, hchan);
4686 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004687}
4688
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004689void l2cap_move_start(struct l2cap_chan *chan)
4690{
4691 BT_DBG("chan %p", chan);
4692
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004693 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004694 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4695 return;
4696 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4697 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4698 /* Placeholder - start physical link setup */
4699 } else {
4700 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4701 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4702 chan->move_id = 0;
4703 l2cap_move_setup(chan);
4704 l2cap_send_move_chan_req(chan, 0);
4705 }
4706}
4707
Mat Martineau8eb200b2012-10-23 15:24:17 -07004708static void l2cap_do_create(struct l2cap_chan *chan, int result,
4709 u8 local_amp_id, u8 remote_amp_id)
4710{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004711 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4712 local_amp_id, remote_amp_id);
4713
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004714 chan->fcs = L2CAP_FCS_NONE;
4715
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004716 /* Outgoing channel on AMP */
4717 if (chan->state == BT_CONNECT) {
4718 if (result == L2CAP_CR_SUCCESS) {
4719 chan->local_amp_id = local_amp_id;
4720 l2cap_send_create_chan_req(chan, remote_amp_id);
4721 } else {
4722 /* Revert to BR/EDR connect */
4723 l2cap_send_conn_req(chan);
4724 }
4725
4726 return;
4727 }
4728
4729 /* Incoming channel on AMP */
4730 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004731 struct l2cap_conn_rsp rsp;
4732 char buf[128];
4733 rsp.scid = cpu_to_le16(chan->dcid);
4734 rsp.dcid = cpu_to_le16(chan->scid);
4735
Mat Martineau8eb200b2012-10-23 15:24:17 -07004736 if (result == L2CAP_CR_SUCCESS) {
4737 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004738 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4739 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004740 } else {
4741 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004742 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4743 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004744 }
4745
4746 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4747 sizeof(rsp), &rsp);
4748
4749 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004750 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004751 set_bit(CONF_REQ_SENT, &chan->conf_state);
4752 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4753 L2CAP_CONF_REQ,
4754 l2cap_build_conf_req(chan, buf), buf);
4755 chan->num_conf_req++;
4756 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004757 }
4758}
4759
4760static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4761 u8 remote_amp_id)
4762{
4763 l2cap_move_setup(chan);
4764 chan->move_id = local_amp_id;
4765 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4766
4767 l2cap_send_move_chan_req(chan, remote_amp_id);
4768}
4769
4770static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4771{
4772 struct hci_chan *hchan = NULL;
4773
4774 /* Placeholder - get hci_chan for logical link */
4775
4776 if (hchan) {
4777 if (hchan->state == BT_CONNECTED) {
4778 /* Logical link is ready to go */
4779 chan->hs_hcon = hchan->conn;
4780 chan->hs_hcon->l2cap_data = chan->conn;
4781 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4782 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4783
4784 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4785 } else {
4786 /* Wait for logical link to be ready */
4787 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4788 }
4789 } else {
4790 /* Logical link not available */
4791 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4792 }
4793}
4794
4795static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4796{
4797 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4798 u8 rsp_result;
4799 if (result == -EINVAL)
4800 rsp_result = L2CAP_MR_BAD_ID;
4801 else
4802 rsp_result = L2CAP_MR_NOT_ALLOWED;
4803
4804 l2cap_send_move_chan_rsp(chan, rsp_result);
4805 }
4806
4807 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4808 chan->move_state = L2CAP_MOVE_STABLE;
4809
4810 /* Restart data transmission */
4811 l2cap_ertm_send(chan);
4812}
4813
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004814/* Invoke with locked chan */
4815void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004816{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004817 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004818 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004819
Mat Martineau8eb200b2012-10-23 15:24:17 -07004820 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4821 chan, result, local_amp_id, remote_amp_id);
4822
Mat Martineau8eb200b2012-10-23 15:24:17 -07004823 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4824 l2cap_chan_unlock(chan);
4825 return;
4826 }
4827
4828 if (chan->state != BT_CONNECTED) {
4829 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4830 } else if (result != L2CAP_MR_SUCCESS) {
4831 l2cap_do_move_cancel(chan, result);
4832 } else {
4833 switch (chan->move_role) {
4834 case L2CAP_MOVE_ROLE_INITIATOR:
4835 l2cap_do_move_initiate(chan, local_amp_id,
4836 remote_amp_id);
4837 break;
4838 case L2CAP_MOVE_ROLE_RESPONDER:
4839 l2cap_do_move_respond(chan, result);
4840 break;
4841 default:
4842 l2cap_do_move_cancel(chan, result);
4843 break;
4844 }
4845 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004846}
4847
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004848static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004849 struct l2cap_cmd_hdr *cmd,
4850 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004851{
4852 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004853 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004854 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004855 u16 icid = 0;
4856 u16 result = L2CAP_MR_NOT_ALLOWED;
4857
4858 if (cmd_len != sizeof(*req))
4859 return -EPROTO;
4860
4861 icid = le16_to_cpu(req->icid);
4862
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004863 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004864
Marcel Holtmann848566b2013-10-01 22:59:22 -07004865 if (!conn->hs_enabled)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004866 return -EINVAL;
4867
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004868 chan = l2cap_get_chan_by_dcid(conn, icid);
4869 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004870 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004871 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004872 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4873 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004874 return 0;
4875 }
4876
Mat Martineau1500109b2012-10-23 15:24:15 -07004877 chan->ident = cmd->ident;
4878
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004879 if (chan->scid < L2CAP_CID_DYN_START ||
4880 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4881 (chan->mode != L2CAP_MODE_ERTM &&
4882 chan->mode != L2CAP_MODE_STREAMING)) {
4883 result = L2CAP_MR_NOT_ALLOWED;
4884 goto send_move_response;
4885 }
4886
4887 if (chan->local_amp_id == req->dest_amp_id) {
4888 result = L2CAP_MR_SAME_ID;
4889 goto send_move_response;
4890 }
4891
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004892 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004893 struct hci_dev *hdev;
4894 hdev = hci_dev_get(req->dest_amp_id);
4895 if (!hdev || hdev->dev_type != HCI_AMP ||
4896 !test_bit(HCI_UP, &hdev->flags)) {
4897 if (hdev)
4898 hci_dev_put(hdev);
4899
4900 result = L2CAP_MR_BAD_ID;
4901 goto send_move_response;
4902 }
4903 hci_dev_put(hdev);
4904 }
4905
4906 /* Detect a move collision. Only send a collision response
4907 * if this side has "lost", otherwise proceed with the move.
4908 * The winner has the larger bd_addr.
4909 */
4910 if ((__chan_is_moving(chan) ||
4911 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004912 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004913 result = L2CAP_MR_COLLISION;
4914 goto send_move_response;
4915 }
4916
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004917 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4918 l2cap_move_setup(chan);
4919 chan->move_id = req->dest_amp_id;
4920 icid = chan->dcid;
4921
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004922 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004923 /* Moving to BR/EDR */
4924 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4925 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4926 result = L2CAP_MR_PEND;
4927 } else {
4928 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4929 result = L2CAP_MR_SUCCESS;
4930 }
4931 } else {
4932 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4933 /* Placeholder - uncomment when amp functions are available */
4934 /*amp_accept_physical(chan, req->dest_amp_id);*/
4935 result = L2CAP_MR_PEND;
4936 }
4937
4938send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004939 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004940
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004941 l2cap_chan_unlock(chan);
4942
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004943 return 0;
4944}
4945
Mat Martineau5b155ef2012-10-23 15:24:14 -07004946static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4947{
4948 struct l2cap_chan *chan;
4949 struct hci_chan *hchan = NULL;
4950
4951 chan = l2cap_get_chan_by_scid(conn, icid);
4952 if (!chan) {
4953 l2cap_send_move_chan_cfm_icid(conn, icid);
4954 return;
4955 }
4956
4957 __clear_chan_timer(chan);
4958 if (result == L2CAP_MR_PEND)
4959 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4960
4961 switch (chan->move_state) {
4962 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4963 /* Move confirm will be sent when logical link
4964 * is complete.
4965 */
4966 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4967 break;
4968 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4969 if (result == L2CAP_MR_PEND) {
4970 break;
4971 } else if (test_bit(CONN_LOCAL_BUSY,
4972 &chan->conn_state)) {
4973 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4974 } else {
4975 /* Logical link is up or moving to BR/EDR,
4976 * proceed with move
4977 */
4978 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4979 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4980 }
4981 break;
4982 case L2CAP_MOVE_WAIT_RSP:
4983 /* Moving to AMP */
4984 if (result == L2CAP_MR_SUCCESS) {
4985 /* Remote is ready, send confirm immediately
4986 * after logical link is ready
4987 */
4988 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4989 } else {
4990 /* Both logical link and move success
4991 * are required to confirm
4992 */
4993 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4994 }
4995
4996 /* Placeholder - get hci_chan for logical link */
4997 if (!hchan) {
4998 /* Logical link not available */
4999 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5000 break;
5001 }
5002
5003 /* If the logical link is not yet connected, do not
5004 * send confirmation.
5005 */
5006 if (hchan->state != BT_CONNECTED)
5007 break;
5008
5009 /* Logical link is already ready to go */
5010
5011 chan->hs_hcon = hchan->conn;
5012 chan->hs_hcon->l2cap_data = chan->conn;
5013
5014 if (result == L2CAP_MR_SUCCESS) {
5015 /* Can confirm now */
5016 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5017 } else {
5018 /* Now only need move success
5019 * to confirm
5020 */
5021 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5022 }
5023
5024 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5025 break;
5026 default:
5027 /* Any other amp move state means the move failed. */
5028 chan->move_id = chan->local_amp_id;
5029 l2cap_move_done(chan);
5030 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5031 }
5032
5033 l2cap_chan_unlock(chan);
5034}
5035
5036static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5037 u16 result)
5038{
5039 struct l2cap_chan *chan;
5040
5041 chan = l2cap_get_chan_by_ident(conn, ident);
5042 if (!chan) {
5043 /* Could not locate channel, icid is best guess */
5044 l2cap_send_move_chan_cfm_icid(conn, icid);
5045 return;
5046 }
5047
5048 __clear_chan_timer(chan);
5049
5050 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5051 if (result == L2CAP_MR_COLLISION) {
5052 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5053 } else {
5054 /* Cleanup - cancel move */
5055 chan->move_id = chan->local_amp_id;
5056 l2cap_move_done(chan);
5057 }
5058 }
5059
5060 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5061
5062 l2cap_chan_unlock(chan);
5063}
5064
5065static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5066 struct l2cap_cmd_hdr *cmd,
5067 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005068{
5069 struct l2cap_move_chan_rsp *rsp = data;
5070 u16 icid, result;
5071
5072 if (cmd_len != sizeof(*rsp))
5073 return -EPROTO;
5074
5075 icid = le16_to_cpu(rsp->icid);
5076 result = le16_to_cpu(rsp->result);
5077
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005078 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005079
Mat Martineau5b155ef2012-10-23 15:24:14 -07005080 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5081 l2cap_move_continue(conn, icid, result);
5082 else
5083 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005084
5085 return 0;
5086}
5087
Mat Martineau5f3847a2012-10-23 15:24:12 -07005088static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5089 struct l2cap_cmd_hdr *cmd,
5090 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005091{
5092 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005093 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005094 u16 icid, result;
5095
5096 if (cmd_len != sizeof(*cfm))
5097 return -EPROTO;
5098
5099 icid = le16_to_cpu(cfm->icid);
5100 result = le16_to_cpu(cfm->result);
5101
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005102 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005103
Mat Martineau5f3847a2012-10-23 15:24:12 -07005104 chan = l2cap_get_chan_by_dcid(conn, icid);
5105 if (!chan) {
5106 /* Spec requires a response even if the icid was not found */
5107 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5108 return 0;
5109 }
5110
5111 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5112 if (result == L2CAP_MC_CONFIRMED) {
5113 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005114 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005115 __release_logical_link(chan);
5116 } else {
5117 chan->move_id = chan->local_amp_id;
5118 }
5119
5120 l2cap_move_done(chan);
5121 }
5122
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005123 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5124
Mat Martineau5f3847a2012-10-23 15:24:12 -07005125 l2cap_chan_unlock(chan);
5126
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005127 return 0;
5128}
5129
5130static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005131 struct l2cap_cmd_hdr *cmd,
5132 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005133{
5134 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005135 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005136 u16 icid;
5137
5138 if (cmd_len != sizeof(*rsp))
5139 return -EPROTO;
5140
5141 icid = le16_to_cpu(rsp->icid);
5142
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005143 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005144
Mat Martineau3fd71a02012-10-23 15:24:16 -07005145 chan = l2cap_get_chan_by_scid(conn, icid);
5146 if (!chan)
5147 return 0;
5148
5149 __clear_chan_timer(chan);
5150
5151 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5152 chan->local_amp_id = chan->move_id;
5153
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005154 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005155 __release_logical_link(chan);
5156
5157 l2cap_move_done(chan);
5158 }
5159
5160 l2cap_chan_unlock(chan);
5161
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005162 return 0;
5163}
5164
Claudio Takahaside731152011-02-11 19:28:55 -02005165static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005166 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005167 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005168{
5169 struct hci_conn *hcon = conn->hcon;
5170 struct l2cap_conn_param_update_req *req;
5171 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005172 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005173 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005174
Johan Hedberg40bef302014-07-16 11:42:27 +03005175 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005176 return -EINVAL;
5177
Claudio Takahaside731152011-02-11 19:28:55 -02005178 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5179 return -EPROTO;
5180
5181 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005182 min = __le16_to_cpu(req->min);
5183 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005184 latency = __le16_to_cpu(req->latency);
5185 to_multiplier = __le16_to_cpu(req->to_multiplier);
5186
5187 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005188 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005189
5190 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005191
Andre Guedesd4905f22014-06-25 21:52:52 -03005192 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005193 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005194 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005195 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005196 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005197
5198 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005199 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005200
Andre Guedesffb5a8272014-07-01 18:10:11 -03005201 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005202 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005203
Johan Hedbergf4869e22014-07-02 17:37:32 +03005204 store_hint = hci_le_conn_update(hcon, min, max, latency,
5205 to_multiplier);
5206 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5207 store_hint, min, max, latency,
5208 to_multiplier);
5209
Andre Guedesffb5a8272014-07-01 18:10:11 -03005210 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005211
Claudio Takahaside731152011-02-11 19:28:55 -02005212 return 0;
5213}
5214
Johan Hedbergf1496de2013-05-13 14:15:56 +03005215static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5216 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5217 u8 *data)
5218{
5219 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5220 u16 dcid, mtu, mps, credits, result;
5221 struct l2cap_chan *chan;
5222 int err;
5223
5224 if (cmd_len < sizeof(*rsp))
5225 return -EPROTO;
5226
5227 dcid = __le16_to_cpu(rsp->dcid);
5228 mtu = __le16_to_cpu(rsp->mtu);
5229 mps = __le16_to_cpu(rsp->mps);
5230 credits = __le16_to_cpu(rsp->credits);
5231 result = __le16_to_cpu(rsp->result);
5232
5233 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5234 return -EPROTO;
5235
5236 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5237 dcid, mtu, mps, credits, result);
5238
5239 mutex_lock(&conn->chan_lock);
5240
5241 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5242 if (!chan) {
5243 err = -EBADSLT;
5244 goto unlock;
5245 }
5246
5247 err = 0;
5248
5249 l2cap_chan_lock(chan);
5250
5251 switch (result) {
5252 case L2CAP_CR_SUCCESS:
5253 chan->ident = 0;
5254 chan->dcid = dcid;
5255 chan->omtu = mtu;
5256 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005257 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005258 l2cap_chan_ready(chan);
5259 break;
5260
5261 default:
5262 l2cap_chan_del(chan, ECONNREFUSED);
5263 break;
5264 }
5265
5266 l2cap_chan_unlock(chan);
5267
5268unlock:
5269 mutex_unlock(&conn->chan_lock);
5270
5271 return err;
5272}
5273
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005274static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005275 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5276 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005277{
5278 int err = 0;
5279
5280 switch (cmd->code) {
5281 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005282 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005283 break;
5284
5285 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005286 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005287 break;
5288
5289 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005290 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005291 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005292 break;
5293
5294 case L2CAP_CONF_REQ:
5295 err = l2cap_config_req(conn, cmd, cmd_len, data);
5296 break;
5297
5298 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005299 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005300 break;
5301
5302 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005303 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005304 break;
5305
5306 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005307 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005308 break;
5309
5310 case L2CAP_ECHO_REQ:
5311 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5312 break;
5313
5314 case L2CAP_ECHO_RSP:
5315 break;
5316
5317 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005318 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005319 break;
5320
5321 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005322 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005323 break;
5324
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005325 case L2CAP_CREATE_CHAN_REQ:
5326 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5327 break;
5328
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005329 case L2CAP_MOVE_CHAN_REQ:
5330 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5331 break;
5332
5333 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005334 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005335 break;
5336
5337 case L2CAP_MOVE_CHAN_CFM:
5338 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5339 break;
5340
5341 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005342 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005343 break;
5344
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005345 default:
5346 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5347 err = -EINVAL;
5348 break;
5349 }
5350
5351 return err;
5352}
5353
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005354static int l2cap_le_connect_req(struct l2cap_conn *conn,
5355 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5356 u8 *data)
5357{
5358 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5359 struct l2cap_le_conn_rsp rsp;
5360 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005361 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005362 __le16 psm;
5363 u8 result;
5364
5365 if (cmd_len != sizeof(*req))
5366 return -EPROTO;
5367
5368 scid = __le16_to_cpu(req->scid);
5369 mtu = __le16_to_cpu(req->mtu);
5370 mps = __le16_to_cpu(req->mps);
5371 psm = req->psm;
5372 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005373 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005374
5375 if (mtu < 23 || mps < 23)
5376 return -EPROTO;
5377
5378 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5379 scid, mtu, mps);
5380
5381 /* Check if we have socket listening on psm */
5382 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5383 &conn->hcon->dst, LE_LINK);
5384 if (!pchan) {
5385 result = L2CAP_CR_BAD_PSM;
5386 chan = NULL;
5387 goto response;
5388 }
5389
5390 mutex_lock(&conn->chan_lock);
5391 l2cap_chan_lock(pchan);
5392
5393 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5394 result = L2CAP_CR_AUTHENTICATION;
5395 chan = NULL;
5396 goto response_unlock;
5397 }
5398
5399 /* Check if we already have channel with that dcid */
5400 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5401 result = L2CAP_CR_NO_MEM;
5402 chan = NULL;
5403 goto response_unlock;
5404 }
5405
5406 chan = pchan->ops->new_connection(pchan);
5407 if (!chan) {
5408 result = L2CAP_CR_NO_MEM;
5409 goto response_unlock;
5410 }
5411
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005412 l2cap_le_flowctl_init(chan);
5413
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005414 bacpy(&chan->src, &conn->hcon->src);
5415 bacpy(&chan->dst, &conn->hcon->dst);
5416 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5417 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5418 chan->psm = psm;
5419 chan->dcid = scid;
5420 chan->omtu = mtu;
5421 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005422 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005423
5424 __l2cap_chan_add(conn, chan);
5425 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005426 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005427
5428 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5429
5430 chan->ident = cmd->ident;
5431
5432 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5433 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005434 /* The following result value is actually not defined
5435 * for LE CoC but we use it to let the function know
5436 * that it should bail out after doing its cleanup
5437 * instead of sending a response.
5438 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005439 result = L2CAP_CR_PEND;
5440 chan->ops->defer(chan);
5441 } else {
5442 l2cap_chan_ready(chan);
5443 result = L2CAP_CR_SUCCESS;
5444 }
5445
5446response_unlock:
5447 l2cap_chan_unlock(pchan);
5448 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005449 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005450
5451 if (result == L2CAP_CR_PEND)
5452 return 0;
5453
5454response:
5455 if (chan) {
5456 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005457 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005458 } else {
5459 rsp.mtu = 0;
5460 rsp.mps = 0;
5461 }
5462
5463 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005464 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005465 rsp.result = cpu_to_le16(result);
5466
5467 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5468
5469 return 0;
5470}
5471
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005472static inline int l2cap_le_credits(struct l2cap_conn *conn,
5473 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5474 u8 *data)
5475{
5476 struct l2cap_le_credits *pkt;
5477 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005478 u16 cid, credits, max_credits;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005479
5480 if (cmd_len != sizeof(*pkt))
5481 return -EPROTO;
5482
5483 pkt = (struct l2cap_le_credits *) data;
5484 cid = __le16_to_cpu(pkt->cid);
5485 credits = __le16_to_cpu(pkt->credits);
5486
5487 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5488
5489 chan = l2cap_get_chan_by_dcid(conn, cid);
5490 if (!chan)
5491 return -EBADSLT;
5492
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005493 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5494 if (credits > max_credits) {
5495 BT_ERR("LE credits overflow");
5496 l2cap_send_disconn_req(chan, ECONNRESET);
5497
5498 /* Return 0 so that we don't trigger an unnecessary
5499 * command reject packet.
5500 */
5501 return 0;
5502 }
5503
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005504 chan->tx_credits += credits;
5505
5506 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5507 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5508 chan->tx_credits--;
5509 }
5510
5511 if (chan->tx_credits)
5512 chan->ops->resume(chan);
5513
5514 l2cap_chan_unlock(chan);
5515
5516 return 0;
5517}
5518
Johan Hedberg71fb4192013-12-10 10:52:48 +02005519static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5520 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5521 u8 *data)
5522{
5523 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5524 struct l2cap_chan *chan;
5525
5526 if (cmd_len < sizeof(*rej))
5527 return -EPROTO;
5528
5529 mutex_lock(&conn->chan_lock);
5530
5531 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5532 if (!chan)
5533 goto done;
5534
5535 l2cap_chan_lock(chan);
5536 l2cap_chan_del(chan, ECONNREFUSED);
5537 l2cap_chan_unlock(chan);
5538
5539done:
5540 mutex_unlock(&conn->chan_lock);
5541 return 0;
5542}
5543
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005544static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005545 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5546 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005547{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005548 int err = 0;
5549
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005550 switch (cmd->code) {
5551 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005552 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005553 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005554
5555 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005556 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5557 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005558
5559 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005560 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005561
Johan Hedbergf1496de2013-05-13 14:15:56 +03005562 case L2CAP_LE_CONN_RSP:
5563 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005564 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005565
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005566 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005567 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5568 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005569
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005570 case L2CAP_LE_CREDITS:
5571 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5572 break;
5573
Johan Hedberg3defe012013-05-15 10:16:06 +03005574 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005575 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5576 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005577
5578 case L2CAP_DISCONN_RSP:
5579 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005580 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005581
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005582 default:
5583 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005584 err = -EINVAL;
5585 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005586 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005587
5588 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005589}
5590
Johan Hedbergc5623552013-04-29 19:35:33 +03005591static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5592 struct sk_buff *skb)
5593{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005594 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005595 struct l2cap_cmd_hdr *cmd;
5596 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005597 int err;
5598
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005599 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005600 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005601
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005602 if (skb->len < L2CAP_CMD_HDR_SIZE)
5603 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005604
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005605 cmd = (void *) skb->data;
5606 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005607
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005608 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005609
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005610 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005611
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005612 if (len != skb->len || !cmd->ident) {
5613 BT_DBG("corrupted command");
5614 goto drop;
5615 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005616
Johan Hedberg203e6392013-05-15 10:07:15 +03005617 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005618 if (err) {
5619 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005620
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005621 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005622
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005623 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005624 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5625 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005626 }
5627
Marcel Holtmann3b166292013-10-02 08:28:21 -07005628drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005629 kfree_skb(skb);
5630}
5631
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005632static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005633 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005634{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005635 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636 u8 *data = skb->data;
5637 int len = skb->len;
5638 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005639 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640
5641 l2cap_raw_recv(conn, skb);
5642
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005643 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005644 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005645
Linus Torvalds1da177e2005-04-16 15:20:36 -07005646 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005647 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5649 data += L2CAP_CMD_HDR_SIZE;
5650 len -= L2CAP_CMD_HDR_SIZE;
5651
Al Viro88219a02007-07-29 00:17:25 -07005652 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005653
Gustavo Padovan2d792812012-10-06 10:07:01 +01005654 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5655 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005656
Al Viro88219a02007-07-29 00:17:25 -07005657 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658 BT_DBG("corrupted command");
5659 break;
5660 }
5661
Johan Hedbergc5623552013-04-29 19:35:33 +03005662 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005664 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005665
5666 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005668 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005669 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5670 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 }
5672
Al Viro88219a02007-07-29 00:17:25 -07005673 data += cmd_len;
5674 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005675 }
5676
Marcel Holtmann3b166292013-10-02 08:28:21 -07005677drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678 kfree_skb(skb);
5679}
5680
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005681static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005682{
5683 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005684 int hdr_size;
5685
5686 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5687 hdr_size = L2CAP_EXT_HDR_SIZE;
5688 else
5689 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005690
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005691 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005692 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005693 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5694 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5695
5696 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005697 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005698 }
5699 return 0;
5700}
5701
Mat Martineau6ea00482012-05-17 20:53:52 -07005702static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005703{
Mat Martineaue31f7632012-05-17 20:53:41 -07005704 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005705
Mat Martineaue31f7632012-05-17 20:53:41 -07005706 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005707
Mat Martineaue31f7632012-05-17 20:53:41 -07005708 memset(&control, 0, sizeof(control));
5709 control.sframe = 1;
5710 control.final = 1;
5711 control.reqseq = chan->buffer_seq;
5712 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005713
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005714 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005715 control.super = L2CAP_SUPER_RNR;
5716 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005717 }
5718
Mat Martineaue31f7632012-05-17 20:53:41 -07005719 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5720 chan->unacked_frames > 0)
5721 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005722
Mat Martineaue31f7632012-05-17 20:53:41 -07005723 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005724 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005725
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005726 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005727 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5728 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5729 * send it now.
5730 */
5731 control.super = L2CAP_SUPER_RR;
5732 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005733 }
5734}
5735
Gustavo Padovan2d792812012-10-06 10:07:01 +01005736static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5737 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005738{
Mat Martineau84084a32011-07-22 14:54:00 -07005739 /* skb->len reflects data in skb as well as all fragments
5740 * skb->data_len reflects only data in fragments
5741 */
5742 if (!skb_has_frag_list(skb))
5743 skb_shinfo(skb)->frag_list = new_frag;
5744
5745 new_frag->next = NULL;
5746
5747 (*last_frag)->next = new_frag;
5748 *last_frag = new_frag;
5749
5750 skb->len += new_frag->len;
5751 skb->data_len += new_frag->len;
5752 skb->truesize += new_frag->truesize;
5753}
5754
Mat Martineau4b51dae92012-05-17 20:53:37 -07005755static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5756 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005757{
5758 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005759
Mat Martineau4b51dae92012-05-17 20:53:37 -07005760 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005761 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005762 if (chan->sdu)
5763 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005764
Gustavo Padovan80b98022012-05-27 22:27:51 -03005765 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005766 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005767
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005768 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005769 if (chan->sdu)
5770 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005771
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005772 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005773 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005774
Mat Martineau84084a32011-07-22 14:54:00 -07005775 if (chan->sdu_len > chan->imtu) {
5776 err = -EMSGSIZE;
5777 break;
5778 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005779
Mat Martineau84084a32011-07-22 14:54:00 -07005780 if (skb->len >= chan->sdu_len)
5781 break;
5782
5783 chan->sdu = skb;
5784 chan->sdu_last_frag = skb;
5785
5786 skb = NULL;
5787 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005788 break;
5789
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005790 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005791 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005792 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005793
Mat Martineau84084a32011-07-22 14:54:00 -07005794 append_skb_frag(chan->sdu, skb,
5795 &chan->sdu_last_frag);
5796 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005797
Mat Martineau84084a32011-07-22 14:54:00 -07005798 if (chan->sdu->len >= chan->sdu_len)
5799 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005800
Mat Martineau84084a32011-07-22 14:54:00 -07005801 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005802 break;
5803
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005804 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005805 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005806 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005807
Mat Martineau84084a32011-07-22 14:54:00 -07005808 append_skb_frag(chan->sdu, skb,
5809 &chan->sdu_last_frag);
5810 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005811
Mat Martineau84084a32011-07-22 14:54:00 -07005812 if (chan->sdu->len != chan->sdu_len)
5813 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005814
Gustavo Padovan80b98022012-05-27 22:27:51 -03005815 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005816
Mat Martineau84084a32011-07-22 14:54:00 -07005817 if (!err) {
5818 /* Reassembly complete */
5819 chan->sdu = NULL;
5820 chan->sdu_last_frag = NULL;
5821 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005822 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005823 break;
5824 }
5825
Mat Martineau84084a32011-07-22 14:54:00 -07005826 if (err) {
5827 kfree_skb(skb);
5828 kfree_skb(chan->sdu);
5829 chan->sdu = NULL;
5830 chan->sdu_last_frag = NULL;
5831 chan->sdu_len = 0;
5832 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005833
Mat Martineau84084a32011-07-22 14:54:00 -07005834 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005835}
5836
Mat Martineau32b32732012-10-23 15:24:11 -07005837static int l2cap_resegment(struct l2cap_chan *chan)
5838{
5839 /* Placeholder */
5840 return 0;
5841}
5842
Mat Martineaue3281402011-07-07 09:39:02 -07005843void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132e2010-06-21 19:39:50 -03005844{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005845 u8 event;
5846
5847 if (chan->mode != L2CAP_MODE_ERTM)
5848 return;
5849
5850 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005851 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005852}
5853
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005854static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5855{
Mat Martineau63838722012-05-17 20:53:45 -07005856 int err = 0;
5857 /* Pass sequential frames to l2cap_reassemble_sdu()
5858 * until a gap is encountered.
5859 */
5860
5861 BT_DBG("chan %p", chan);
5862
5863 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5864 struct sk_buff *skb;
5865 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5866 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5867
5868 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5869
5870 if (!skb)
5871 break;
5872
5873 skb_unlink(skb, &chan->srej_q);
5874 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5875 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5876 if (err)
5877 break;
5878 }
5879
5880 if (skb_queue_empty(&chan->srej_q)) {
5881 chan->rx_state = L2CAP_RX_STATE_RECV;
5882 l2cap_send_ack(chan);
5883 }
5884
5885 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005886}
5887
5888static void l2cap_handle_srej(struct l2cap_chan *chan,
5889 struct l2cap_ctrl *control)
5890{
Mat Martineauf80842a2012-05-17 20:53:46 -07005891 struct sk_buff *skb;
5892
5893 BT_DBG("chan %p, control %p", chan, control);
5894
5895 if (control->reqseq == chan->next_tx_seq) {
5896 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005897 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005898 return;
5899 }
5900
5901 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5902
5903 if (skb == NULL) {
5904 BT_DBG("Seq %d not available for retransmission",
5905 control->reqseq);
5906 return;
5907 }
5908
5909 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5910 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005911 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005912 return;
5913 }
5914
5915 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5916
5917 if (control->poll) {
5918 l2cap_pass_to_tx(chan, control);
5919
5920 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5921 l2cap_retransmit(chan, control);
5922 l2cap_ertm_send(chan);
5923
5924 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5925 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5926 chan->srej_save_reqseq = control->reqseq;
5927 }
5928 } else {
5929 l2cap_pass_to_tx_fbit(chan, control);
5930
5931 if (control->final) {
5932 if (chan->srej_save_reqseq != control->reqseq ||
5933 !test_and_clear_bit(CONN_SREJ_ACT,
5934 &chan->conn_state))
5935 l2cap_retransmit(chan, control);
5936 } else {
5937 l2cap_retransmit(chan, control);
5938 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5939 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5940 chan->srej_save_reqseq = control->reqseq;
5941 }
5942 }
5943 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005944}
5945
5946static void l2cap_handle_rej(struct l2cap_chan *chan,
5947 struct l2cap_ctrl *control)
5948{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005949 struct sk_buff *skb;
5950
5951 BT_DBG("chan %p, control %p", chan, control);
5952
5953 if (control->reqseq == chan->next_tx_seq) {
5954 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005955 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005956 return;
5957 }
5958
5959 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5960
5961 if (chan->max_tx && skb &&
5962 bt_cb(skb)->control.retries >= chan->max_tx) {
5963 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005964 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005965 return;
5966 }
5967
5968 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5969
5970 l2cap_pass_to_tx(chan, control);
5971
5972 if (control->final) {
5973 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5974 l2cap_retransmit_all(chan, control);
5975 } else {
5976 l2cap_retransmit_all(chan, control);
5977 l2cap_ertm_send(chan);
5978 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5979 set_bit(CONN_REJ_ACT, &chan->conn_state);
5980 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005981}
5982
Mat Martineau4b51dae92012-05-17 20:53:37 -07005983static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5984{
5985 BT_DBG("chan %p, txseq %d", chan, txseq);
5986
5987 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5988 chan->expected_tx_seq);
5989
5990 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5991 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01005992 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07005993 /* See notes below regarding "double poll" and
5994 * invalid packets.
5995 */
5996 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5997 BT_DBG("Invalid/Ignore - after SREJ");
5998 return L2CAP_TXSEQ_INVALID_IGNORE;
5999 } else {
6000 BT_DBG("Invalid - in window after SREJ sent");
6001 return L2CAP_TXSEQ_INVALID;
6002 }
6003 }
6004
6005 if (chan->srej_list.head == txseq) {
6006 BT_DBG("Expected SREJ");
6007 return L2CAP_TXSEQ_EXPECTED_SREJ;
6008 }
6009
6010 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6011 BT_DBG("Duplicate SREJ - txseq already stored");
6012 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6013 }
6014
6015 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6016 BT_DBG("Unexpected SREJ - not requested");
6017 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6018 }
6019 }
6020
6021 if (chan->expected_tx_seq == txseq) {
6022 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6023 chan->tx_win) {
6024 BT_DBG("Invalid - txseq outside tx window");
6025 return L2CAP_TXSEQ_INVALID;
6026 } else {
6027 BT_DBG("Expected");
6028 return L2CAP_TXSEQ_EXPECTED;
6029 }
6030 }
6031
6032 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006033 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006034 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6035 return L2CAP_TXSEQ_DUPLICATE;
6036 }
6037
6038 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6039 /* A source of invalid packets is a "double poll" condition,
6040 * where delays cause us to send multiple poll packets. If
6041 * the remote stack receives and processes both polls,
6042 * sequence numbers can wrap around in such a way that a
6043 * resent frame has a sequence number that looks like new data
6044 * with a sequence gap. This would trigger an erroneous SREJ
6045 * request.
6046 *
6047 * Fortunately, this is impossible with a tx window that's
6048 * less than half of the maximum sequence number, which allows
6049 * invalid frames to be safely ignored.
6050 *
6051 * With tx window sizes greater than half of the tx window
6052 * maximum, the frame is invalid and cannot be ignored. This
6053 * causes a disconnect.
6054 */
6055
6056 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6057 BT_DBG("Invalid/Ignore - txseq outside tx window");
6058 return L2CAP_TXSEQ_INVALID_IGNORE;
6059 } else {
6060 BT_DBG("Invalid - txseq outside tx window");
6061 return L2CAP_TXSEQ_INVALID;
6062 }
6063 } else {
6064 BT_DBG("Unexpected - txseq indicates missing frames");
6065 return L2CAP_TXSEQ_UNEXPECTED;
6066 }
6067}
6068
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006069static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6070 struct l2cap_ctrl *control,
6071 struct sk_buff *skb, u8 event)
6072{
6073 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006074 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006075
6076 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6077 event);
6078
6079 switch (event) {
6080 case L2CAP_EV_RECV_IFRAME:
6081 switch (l2cap_classify_txseq(chan, control->txseq)) {
6082 case L2CAP_TXSEQ_EXPECTED:
6083 l2cap_pass_to_tx(chan, control);
6084
6085 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6086 BT_DBG("Busy, discarding expected seq %d",
6087 control->txseq);
6088 break;
6089 }
6090
6091 chan->expected_tx_seq = __next_seq(chan,
6092 control->txseq);
6093
6094 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006095 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006096
6097 err = l2cap_reassemble_sdu(chan, skb, control);
6098 if (err)
6099 break;
6100
6101 if (control->final) {
6102 if (!test_and_clear_bit(CONN_REJ_ACT,
6103 &chan->conn_state)) {
6104 control->final = 0;
6105 l2cap_retransmit_all(chan, control);
6106 l2cap_ertm_send(chan);
6107 }
6108 }
6109
6110 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6111 l2cap_send_ack(chan);
6112 break;
6113 case L2CAP_TXSEQ_UNEXPECTED:
6114 l2cap_pass_to_tx(chan, control);
6115
6116 /* Can't issue SREJ frames in the local busy state.
6117 * Drop this frame, it will be seen as missing
6118 * when local busy is exited.
6119 */
6120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6121 BT_DBG("Busy, discarding unexpected seq %d",
6122 control->txseq);
6123 break;
6124 }
6125
6126 /* There was a gap in the sequence, so an SREJ
6127 * must be sent for each missing frame. The
6128 * current frame is stored for later use.
6129 */
6130 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006131 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006132 BT_DBG("Queued %p (queue len %d)", skb,
6133 skb_queue_len(&chan->srej_q));
6134
6135 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6136 l2cap_seq_list_clear(&chan->srej_list);
6137 l2cap_send_srej(chan, control->txseq);
6138
6139 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6140 break;
6141 case L2CAP_TXSEQ_DUPLICATE:
6142 l2cap_pass_to_tx(chan, control);
6143 break;
6144 case L2CAP_TXSEQ_INVALID_IGNORE:
6145 break;
6146 case L2CAP_TXSEQ_INVALID:
6147 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006148 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006149 break;
6150 }
6151 break;
6152 case L2CAP_EV_RECV_RR:
6153 l2cap_pass_to_tx(chan, control);
6154 if (control->final) {
6155 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6156
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006157 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6158 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006159 control->final = 0;
6160 l2cap_retransmit_all(chan, control);
6161 }
6162
6163 l2cap_ertm_send(chan);
6164 } else if (control->poll) {
6165 l2cap_send_i_or_rr_or_rnr(chan);
6166 } else {
6167 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6168 &chan->conn_state) &&
6169 chan->unacked_frames)
6170 __set_retrans_timer(chan);
6171
6172 l2cap_ertm_send(chan);
6173 }
6174 break;
6175 case L2CAP_EV_RECV_RNR:
6176 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6177 l2cap_pass_to_tx(chan, control);
6178 if (control && control->poll) {
6179 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6180 l2cap_send_rr_or_rnr(chan, 0);
6181 }
6182 __clear_retrans_timer(chan);
6183 l2cap_seq_list_clear(&chan->retrans_list);
6184 break;
6185 case L2CAP_EV_RECV_REJ:
6186 l2cap_handle_rej(chan, control);
6187 break;
6188 case L2CAP_EV_RECV_SREJ:
6189 l2cap_handle_srej(chan, control);
6190 break;
6191 default:
6192 break;
6193 }
6194
6195 if (skb && !skb_in_use) {
6196 BT_DBG("Freeing %p", skb);
6197 kfree_skb(skb);
6198 }
6199
6200 return err;
6201}
6202
6203static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6204 struct l2cap_ctrl *control,
6205 struct sk_buff *skb, u8 event)
6206{
6207 int err = 0;
6208 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006209 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006210
6211 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6212 event);
6213
6214 switch (event) {
6215 case L2CAP_EV_RECV_IFRAME:
6216 switch (l2cap_classify_txseq(chan, txseq)) {
6217 case L2CAP_TXSEQ_EXPECTED:
6218 /* Keep frame for reassembly later */
6219 l2cap_pass_to_tx(chan, control);
6220 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006221 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006222 BT_DBG("Queued %p (queue len %d)", skb,
6223 skb_queue_len(&chan->srej_q));
6224
6225 chan->expected_tx_seq = __next_seq(chan, txseq);
6226 break;
6227 case L2CAP_TXSEQ_EXPECTED_SREJ:
6228 l2cap_seq_list_pop(&chan->srej_list);
6229
6230 l2cap_pass_to_tx(chan, control);
6231 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006232 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006233 BT_DBG("Queued %p (queue len %d)", skb,
6234 skb_queue_len(&chan->srej_q));
6235
6236 err = l2cap_rx_queued_iframes(chan);
6237 if (err)
6238 break;
6239
6240 break;
6241 case L2CAP_TXSEQ_UNEXPECTED:
6242 /* Got a frame that can't be reassembled yet.
6243 * Save it for later, and send SREJs to cover
6244 * the missing frames.
6245 */
6246 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006247 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006248 BT_DBG("Queued %p (queue len %d)", skb,
6249 skb_queue_len(&chan->srej_q));
6250
6251 l2cap_pass_to_tx(chan, control);
6252 l2cap_send_srej(chan, control->txseq);
6253 break;
6254 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6255 /* This frame was requested with an SREJ, but
6256 * some expected retransmitted frames are
6257 * missing. Request retransmission of missing
6258 * SREJ'd frames.
6259 */
6260 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006261 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006262 BT_DBG("Queued %p (queue len %d)", skb,
6263 skb_queue_len(&chan->srej_q));
6264
6265 l2cap_pass_to_tx(chan, control);
6266 l2cap_send_srej_list(chan, control->txseq);
6267 break;
6268 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6269 /* We've already queued this frame. Drop this copy. */
6270 l2cap_pass_to_tx(chan, control);
6271 break;
6272 case L2CAP_TXSEQ_DUPLICATE:
6273 /* Expecting a later sequence number, so this frame
6274 * was already received. Ignore it completely.
6275 */
6276 break;
6277 case L2CAP_TXSEQ_INVALID_IGNORE:
6278 break;
6279 case L2CAP_TXSEQ_INVALID:
6280 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006281 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006282 break;
6283 }
6284 break;
6285 case L2CAP_EV_RECV_RR:
6286 l2cap_pass_to_tx(chan, control);
6287 if (control->final) {
6288 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6289
6290 if (!test_and_clear_bit(CONN_REJ_ACT,
6291 &chan->conn_state)) {
6292 control->final = 0;
6293 l2cap_retransmit_all(chan, control);
6294 }
6295
6296 l2cap_ertm_send(chan);
6297 } else if (control->poll) {
6298 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6299 &chan->conn_state) &&
6300 chan->unacked_frames) {
6301 __set_retrans_timer(chan);
6302 }
6303
6304 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6305 l2cap_send_srej_tail(chan);
6306 } else {
6307 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6308 &chan->conn_state) &&
6309 chan->unacked_frames)
6310 __set_retrans_timer(chan);
6311
6312 l2cap_send_ack(chan);
6313 }
6314 break;
6315 case L2CAP_EV_RECV_RNR:
6316 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6317 l2cap_pass_to_tx(chan, control);
6318 if (control->poll) {
6319 l2cap_send_srej_tail(chan);
6320 } else {
6321 struct l2cap_ctrl rr_control;
6322 memset(&rr_control, 0, sizeof(rr_control));
6323 rr_control.sframe = 1;
6324 rr_control.super = L2CAP_SUPER_RR;
6325 rr_control.reqseq = chan->buffer_seq;
6326 l2cap_send_sframe(chan, &rr_control);
6327 }
6328
6329 break;
6330 case L2CAP_EV_RECV_REJ:
6331 l2cap_handle_rej(chan, control);
6332 break;
6333 case L2CAP_EV_RECV_SREJ:
6334 l2cap_handle_srej(chan, control);
6335 break;
6336 }
6337
6338 if (skb && !skb_in_use) {
6339 BT_DBG("Freeing %p", skb);
6340 kfree_skb(skb);
6341 }
6342
6343 return err;
6344}
6345
Mat Martineau32b32732012-10-23 15:24:11 -07006346static int l2cap_finish_move(struct l2cap_chan *chan)
6347{
6348 BT_DBG("chan %p", chan);
6349
6350 chan->rx_state = L2CAP_RX_STATE_RECV;
6351
6352 if (chan->hs_hcon)
6353 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6354 else
6355 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6356
6357 return l2cap_resegment(chan);
6358}
6359
6360static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6361 struct l2cap_ctrl *control,
6362 struct sk_buff *skb, u8 event)
6363{
6364 int err;
6365
6366 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6367 event);
6368
6369 if (!control->poll)
6370 return -EPROTO;
6371
6372 l2cap_process_reqseq(chan, control->reqseq);
6373
6374 if (!skb_queue_empty(&chan->tx_q))
6375 chan->tx_send_head = skb_peek(&chan->tx_q);
6376 else
6377 chan->tx_send_head = NULL;
6378
6379 /* Rewind next_tx_seq to the point expected
6380 * by the receiver.
6381 */
6382 chan->next_tx_seq = control->reqseq;
6383 chan->unacked_frames = 0;
6384
6385 err = l2cap_finish_move(chan);
6386 if (err)
6387 return err;
6388
6389 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6390 l2cap_send_i_or_rr_or_rnr(chan);
6391
6392 if (event == L2CAP_EV_RECV_IFRAME)
6393 return -EPROTO;
6394
6395 return l2cap_rx_state_recv(chan, control, NULL, event);
6396}
6397
6398static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6399 struct l2cap_ctrl *control,
6400 struct sk_buff *skb, u8 event)
6401{
6402 int err;
6403
6404 if (!control->final)
6405 return -EPROTO;
6406
6407 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6408
6409 chan->rx_state = L2CAP_RX_STATE_RECV;
6410 l2cap_process_reqseq(chan, control->reqseq);
6411
6412 if (!skb_queue_empty(&chan->tx_q))
6413 chan->tx_send_head = skb_peek(&chan->tx_q);
6414 else
6415 chan->tx_send_head = NULL;
6416
6417 /* Rewind next_tx_seq to the point expected
6418 * by the receiver.
6419 */
6420 chan->next_tx_seq = control->reqseq;
6421 chan->unacked_frames = 0;
6422
6423 if (chan->hs_hcon)
6424 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6425 else
6426 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6427
6428 err = l2cap_resegment(chan);
6429
6430 if (!err)
6431 err = l2cap_rx_state_recv(chan, control, skb, event);
6432
6433 return err;
6434}
6435
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006436static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6437{
6438 /* Make sure reqseq is for a packet that has been sent but not acked */
6439 u16 unacked;
6440
6441 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6442 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6443}
6444
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006445static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6446 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006447{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006448 int err = 0;
6449
6450 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6451 control, skb, event, chan->rx_state);
6452
6453 if (__valid_reqseq(chan, control->reqseq)) {
6454 switch (chan->rx_state) {
6455 case L2CAP_RX_STATE_RECV:
6456 err = l2cap_rx_state_recv(chan, control, skb, event);
6457 break;
6458 case L2CAP_RX_STATE_SREJ_SENT:
6459 err = l2cap_rx_state_srej_sent(chan, control, skb,
6460 event);
6461 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006462 case L2CAP_RX_STATE_WAIT_P:
6463 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6464 break;
6465 case L2CAP_RX_STATE_WAIT_F:
6466 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6467 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006468 default:
6469 /* shut it down */
6470 break;
6471 }
6472 } else {
6473 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6474 control->reqseq, chan->next_tx_seq,
6475 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006476 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006477 }
6478
6479 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006480}
6481
6482static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6483 struct sk_buff *skb)
6484{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006485 int err = 0;
6486
6487 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6488 chan->rx_state);
6489
6490 if (l2cap_classify_txseq(chan, control->txseq) ==
6491 L2CAP_TXSEQ_EXPECTED) {
6492 l2cap_pass_to_tx(chan, control);
6493
6494 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6495 __next_seq(chan, chan->buffer_seq));
6496
6497 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6498
6499 l2cap_reassemble_sdu(chan, skb, control);
6500 } else {
6501 if (chan->sdu) {
6502 kfree_skb(chan->sdu);
6503 chan->sdu = NULL;
6504 }
6505 chan->sdu_last_frag = NULL;
6506 chan->sdu_len = 0;
6507
6508 if (skb) {
6509 BT_DBG("Freeing %p", skb);
6510 kfree_skb(skb);
6511 }
6512 }
6513
6514 chan->last_acked_seq = control->txseq;
6515 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6516
6517 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006518}
6519
6520static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6521{
6522 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6523 u16 len;
6524 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006525
Mat Martineaub76bbd62012-04-11 10:48:43 -07006526 __unpack_control(chan, skb);
6527
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006528 len = skb->len;
6529
6530 /*
6531 * We can just drop the corrupted I-frame here.
6532 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006533 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006534 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006535 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006536 goto drop;
6537
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006538 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006539 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006540
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006541 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006542 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006543
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006544 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006545 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006546 goto drop;
6547 }
6548
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006549 if (!control->sframe) {
6550 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006551
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006552 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6553 control->sar, control->reqseq, control->final,
6554 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006555
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006556 /* Validate F-bit - F=0 always valid, F=1 only
6557 * valid in TX WAIT_F
6558 */
6559 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006560 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006561
6562 if (chan->mode != L2CAP_MODE_STREAMING) {
6563 event = L2CAP_EV_RECV_IFRAME;
6564 err = l2cap_rx(chan, control, skb, event);
6565 } else {
6566 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006567 }
6568
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006569 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006570 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006571 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006572 const u8 rx_func_to_event[4] = {
6573 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6574 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6575 };
6576
6577 /* Only I-frames are expected in streaming mode */
6578 if (chan->mode == L2CAP_MODE_STREAMING)
6579 goto drop;
6580
6581 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6582 control->reqseq, control->final, control->poll,
6583 control->super);
6584
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006585 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006586 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006587 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006588 goto drop;
6589 }
6590
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006591 /* Validate F and P bits */
6592 if (control->final && (control->poll ||
6593 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6594 goto drop;
6595
6596 event = rx_func_to_event[control->super];
6597 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006598 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006599 }
6600
6601 return 0;
6602
6603drop:
6604 kfree_skb(skb);
6605 return 0;
6606}
6607
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006608static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6609{
6610 struct l2cap_conn *conn = chan->conn;
6611 struct l2cap_le_credits pkt;
6612 u16 return_credits;
6613
6614 /* We return more credits to the sender only after the amount of
6615 * credits falls below half of the initial amount.
6616 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006617 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006618 return;
6619
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006620 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006621
6622 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6623
6624 chan->rx_credits += return_credits;
6625
6626 pkt.cid = cpu_to_le16(chan->scid);
6627 pkt.credits = cpu_to_le16(return_credits);
6628
6629 chan->ident = l2cap_get_ident(conn);
6630
6631 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6632}
6633
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006634static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6635{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006636 int err;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006637
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006638 if (!chan->rx_credits) {
6639 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006640 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006641 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006642 }
6643
6644 if (chan->imtu < skb->len) {
6645 BT_ERR("Too big LE L2CAP PDU");
6646 return -ENOBUFS;
6647 }
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006648
6649 chan->rx_credits--;
6650 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6651
6652 l2cap_chan_le_send_credits(chan);
6653
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006654 err = 0;
6655
6656 if (!chan->sdu) {
6657 u16 sdu_len;
6658
6659 sdu_len = get_unaligned_le16(skb->data);
6660 skb_pull(skb, L2CAP_SDULEN_SIZE);
6661
6662 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6663 sdu_len, skb->len, chan->imtu);
6664
6665 if (sdu_len > chan->imtu) {
6666 BT_ERR("Too big LE L2CAP SDU length received");
6667 err = -EMSGSIZE;
6668 goto failed;
6669 }
6670
6671 if (skb->len > sdu_len) {
6672 BT_ERR("Too much LE L2CAP data received");
6673 err = -EINVAL;
6674 goto failed;
6675 }
6676
6677 if (skb->len == sdu_len)
6678 return chan->ops->recv(chan, skb);
6679
6680 chan->sdu = skb;
6681 chan->sdu_len = sdu_len;
6682 chan->sdu_last_frag = skb;
6683
6684 return 0;
6685 }
6686
6687 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6688 chan->sdu->len, skb->len, chan->sdu_len);
6689
6690 if (chan->sdu->len + skb->len > chan->sdu_len) {
6691 BT_ERR("Too much LE L2CAP data received");
6692 err = -EINVAL;
6693 goto failed;
6694 }
6695
6696 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6697 skb = NULL;
6698
6699 if (chan->sdu->len == chan->sdu_len) {
6700 err = chan->ops->recv(chan, chan->sdu);
6701 if (!err) {
6702 chan->sdu = NULL;
6703 chan->sdu_last_frag = NULL;
6704 chan->sdu_len = 0;
6705 }
6706 }
6707
6708failed:
6709 if (err) {
6710 kfree_skb(skb);
6711 kfree_skb(chan->sdu);
6712 chan->sdu = NULL;
6713 chan->sdu_last_frag = NULL;
6714 chan->sdu_len = 0;
6715 }
6716
6717 /* We can't return an error here since we took care of the skb
6718 * freeing internally. An error return would cause the caller to
6719 * do a double-free of the skb.
6720 */
6721 return 0;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006722}
6723
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006724static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6725 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006726{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006727 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006729 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006730 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006731 if (cid == L2CAP_CID_A2MP) {
6732 chan = a2mp_channel_create(conn, skb);
6733 if (!chan) {
6734 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006735 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006736 }
6737
6738 l2cap_chan_lock(chan);
6739 } else {
6740 BT_DBG("unknown cid 0x%4.4x", cid);
6741 /* Drop packet and return */
6742 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006743 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006745 }
6746
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006747 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006748
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03006749 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750 goto drop;
6751
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006752 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006753 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006754 if (l2cap_le_data_rcv(chan, skb) < 0)
6755 goto drop;
6756
6757 goto done;
6758
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006759 case L2CAP_MODE_BASIC:
6760 /* If socket recv buffers overflows we drop data here
6761 * which is *bad* because L2CAP has to be reliable.
6762 * But we don't have any other choice. L2CAP doesn't
6763 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006764
Szymon Janc2c96e032014-02-18 20:48:34 +01006765 if (chan->imtu < skb->len) {
6766 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006767 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006769
Gustavo Padovan80b98022012-05-27 22:27:51 -03006770 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006771 goto done;
6772 break;
6773
6774 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006775 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006776 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006777 goto done;
6778
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006779 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006780 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006781 break;
6782 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006783
6784drop:
6785 kfree_skb(skb);
6786
6787done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006788 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789}
6790
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006791static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6792 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006793{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006794 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006795 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006796
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006797 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006798 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006799
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006800 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6801 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006802 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006803 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006804
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006805 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03006807 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808 goto drop;
6809
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006810 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006811 goto drop;
6812
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006813 /* Store remote BD_ADDR and PSM for msg_name */
Marcel Holtmann06ae3312013-10-18 03:43:00 -07006814 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006815 bt_cb(skb)->psm = psm;
6816
Johan Hedberga24cce12014-08-07 22:56:42 +03006817 if (!chan->ops->recv(chan, skb)) {
6818 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006819 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006820 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006821
6822drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006823 l2cap_chan_put(chan);
6824free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006826}
6827
6828static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6829{
6830 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006831 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006832 u16 cid, len;
6833 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006834
Johan Hedberg61a939c2014-01-17 20:45:11 +02006835 if (hcon->state != BT_CONNECTED) {
6836 BT_DBG("queueing pending rx skb");
6837 skb_queue_tail(&conn->pending_rx, skb);
6838 return;
6839 }
6840
Linus Torvalds1da177e2005-04-16 15:20:36 -07006841 skb_pull(skb, L2CAP_HDR_SIZE);
6842 cid = __le16_to_cpu(lh->cid);
6843 len = __le16_to_cpu(lh->len);
6844
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006845 if (len != skb->len) {
6846 kfree_skb(skb);
6847 return;
6848 }
6849
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006850 /* Since we can't actively block incoming LE connections we must
6851 * at least ensure that we ignore incoming data from them.
6852 */
6853 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006854 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6855 bdaddr_type(hcon, hcon->dst_type))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006856 kfree_skb(skb);
6857 return;
6858 }
6859
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6861
6862 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006863 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006864 l2cap_sig_channel(conn, skb);
6865 break;
6866
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006867 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006868 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006869 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006870 l2cap_conless_channel(conn, psm, skb);
6871 break;
6872
Marcel Holtmanna2877622013-10-02 23:46:54 -07006873 case L2CAP_CID_LE_SIGNALING:
6874 l2cap_le_sig_channel(conn, skb);
6875 break;
6876
Linus Torvalds1da177e2005-04-16 15:20:36 -07006877 default:
6878 l2cap_data_channel(conn, cid, skb);
6879 break;
6880 }
6881}
6882
Johan Hedberg61a939c2014-01-17 20:45:11 +02006883static void process_pending_rx(struct work_struct *work)
6884{
6885 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6886 pending_rx_work);
6887 struct sk_buff *skb;
6888
6889 BT_DBG("");
6890
6891 while ((skb = skb_dequeue(&conn->pending_rx)))
6892 l2cap_recv_frame(conn, skb);
6893}
6894
Johan Hedberg162b49e2014-01-17 20:45:10 +02006895static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6896{
6897 struct l2cap_conn *conn = hcon->l2cap_data;
6898 struct hci_chan *hchan;
6899
6900 if (conn)
6901 return conn;
6902
6903 hchan = hci_chan_create(hcon);
6904 if (!hchan)
6905 return NULL;
6906
Johan Hedberg27f70f32014-07-21 10:50:06 +03006907 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006908 if (!conn) {
6909 hci_chan_del(hchan);
6910 return NULL;
6911 }
6912
6913 kref_init(&conn->ref);
6914 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03006915 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006916 conn->hchan = hchan;
6917
6918 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6919
6920 switch (hcon->type) {
6921 case LE_LINK:
6922 if (hcon->hdev->le_mtu) {
6923 conn->mtu = hcon->hdev->le_mtu;
6924 break;
6925 }
6926 /* fall through */
6927 default:
6928 conn->mtu = hcon->hdev->acl_mtu;
6929 break;
6930 }
6931
6932 conn->feat_mask = 0;
6933
6934 if (hcon->type == ACL_LINK)
6935 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6936 &hcon->hdev->dev_flags);
6937
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02006938 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006939 mutex_init(&conn->chan_lock);
6940
6941 INIT_LIST_HEAD(&conn->chan_l);
6942 INIT_LIST_HEAD(&conn->users);
6943
Johan Hedberg276d8072014-08-11 22:06:41 +03006944 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006945
Johan Hedberg61a939c2014-01-17 20:45:11 +02006946 skb_queue_head_init(&conn->pending_rx);
6947 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03006948 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02006949
Johan Hedberg162b49e2014-01-17 20:45:10 +02006950 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6951
6952 return conn;
6953}
6954
6955static bool is_valid_psm(u16 psm, u8 dst_type) {
6956 if (!psm)
6957 return false;
6958
6959 if (bdaddr_type_is_le(dst_type))
6960 return (psm <= 0x00ff);
6961
6962 /* PSM must be odd and lsb of upper byte must be 0 */
6963 return ((psm & 0x0101) == 0x0001);
6964}
6965
6966int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6967 bdaddr_t *dst, u8 dst_type)
6968{
6969 struct l2cap_conn *conn;
6970 struct hci_conn *hcon;
6971 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006972 int err;
6973
6974 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6975 dst_type, __le16_to_cpu(psm));
6976
6977 hdev = hci_get_route(dst, &chan->src);
6978 if (!hdev)
6979 return -EHOSTUNREACH;
6980
6981 hci_dev_lock(hdev);
6982
Johan Hedberg162b49e2014-01-17 20:45:10 +02006983 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6984 chan->chan_type != L2CAP_CHAN_RAW) {
6985 err = -EINVAL;
6986 goto done;
6987 }
6988
Johan Hedberg21626e62014-01-24 10:35:41 +02006989 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6990 err = -EINVAL;
6991 goto done;
6992 }
6993
6994 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02006995 err = -EINVAL;
6996 goto done;
6997 }
6998
6999 switch (chan->mode) {
7000 case L2CAP_MODE_BASIC:
7001 break;
7002 case L2CAP_MODE_LE_FLOWCTL:
7003 l2cap_le_flowctl_init(chan);
7004 break;
7005 case L2CAP_MODE_ERTM:
7006 case L2CAP_MODE_STREAMING:
7007 if (!disable_ertm)
7008 break;
7009 /* fall through */
7010 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007011 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007012 goto done;
7013 }
7014
7015 switch (chan->state) {
7016 case BT_CONNECT:
7017 case BT_CONNECT2:
7018 case BT_CONFIG:
7019 /* Already connecting */
7020 err = 0;
7021 goto done;
7022
7023 case BT_CONNECTED:
7024 /* Already connected */
7025 err = -EISCONN;
7026 goto done;
7027
7028 case BT_OPEN:
7029 case BT_BOUND:
7030 /* Can connect */
7031 break;
7032
7033 default:
7034 err = -EBADFD;
7035 goto done;
7036 }
7037
7038 /* Set destination address and psm */
7039 bacpy(&chan->dst, dst);
7040 chan->dst_type = dst_type;
7041
7042 chan->psm = psm;
7043 chan->dcid = cid;
7044
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007045 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007046 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007047
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007048 /* Convert from L2CAP channel address type to HCI address type
7049 */
7050 if (dst_type == BDADDR_LE_PUBLIC)
7051 dst_type = ADDR_LE_DEV_PUBLIC;
7052 else
7053 dst_type = ADDR_LE_DEV_RANDOM;
7054
Johan Hedberge804d252014-07-16 11:42:28 +03007055 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7056 role = HCI_ROLE_SLAVE;
7057 else
7058 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007059
Andre Guedes04a6c582014-02-26 20:21:44 -03007060 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
Johan Hedberge804d252014-07-16 11:42:28 +03007061 HCI_LE_CONN_TIMEOUT, role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007062 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007063 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007064 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007065 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007066
7067 if (IS_ERR(hcon)) {
7068 err = PTR_ERR(hcon);
7069 goto done;
7070 }
7071
7072 conn = l2cap_conn_add(hcon);
7073 if (!conn) {
7074 hci_conn_drop(hcon);
7075 err = -ENOMEM;
7076 goto done;
7077 }
7078
Johan Hedberg02e246a2014-10-02 10:16:22 +03007079 mutex_lock(&conn->chan_lock);
7080 l2cap_chan_lock(chan);
7081
Johan Hedberg162b49e2014-01-17 20:45:10 +02007082 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7083 hci_conn_drop(hcon);
7084 err = -EBUSY;
Johan Hedberg02e246a2014-10-02 10:16:22 +03007085 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007086 }
7087
7088 /* Update source addr of the socket */
7089 bacpy(&chan->src, &hcon->src);
7090 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7091
Johan Hedberg02e246a2014-10-02 10:16:22 +03007092 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007093
7094 /* l2cap_chan_add takes its own ref so we can drop this one */
7095 hci_conn_drop(hcon);
7096
7097 l2cap_state_change(chan, BT_CONNECT);
7098 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7099
Johan Hedberg61202e42014-01-28 15:16:48 -08007100 /* Release chan->sport so that it can be reused by other
7101 * sockets (as it's only used for listening sockets).
7102 */
7103 write_lock(&chan_list_lock);
7104 chan->sport = 0;
7105 write_unlock(&chan_list_lock);
7106
Johan Hedberg162b49e2014-01-17 20:45:10 +02007107 if (hcon->state == BT_CONNECTED) {
7108 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7109 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007110 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007111 l2cap_state_change(chan, BT_CONNECTED);
7112 } else
7113 l2cap_do_start(chan);
7114 }
7115
7116 err = 0;
7117
Johan Hedberg02e246a2014-10-02 10:16:22 +03007118chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007119 l2cap_chan_unlock(chan);
Johan Hedberg02e246a2014-10-02 10:16:22 +03007120 mutex_unlock(&conn->chan_lock);
7121done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007122 hci_dev_unlock(hdev);
7123 hci_dev_put(hdev);
7124 return err;
7125}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007126EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007127
Linus Torvalds1da177e2005-04-16 15:20:36 -07007128/* ---- L2CAP interface with lower layer (HCI) ---- */
7129
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007130int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007131{
7132 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007133 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007135 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136
7137 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007138 read_lock(&chan_list_lock);
7139 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007140 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141 continue;
7142
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007143 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007144 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007145 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007146 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007148 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007149 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007150 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007151 lm2 |= HCI_LM_MASTER;
7152 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007153 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007154 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155
7156 return exact ? lm1 : lm2;
7157}
7158
Johan Hedberge760ec12014-08-07 22:56:47 +03007159/* Find the next fixed channel in BT_LISTEN state, continue iteration
7160 * from an existing channel in the list or from the beginning of the
7161 * global list (by passing NULL as first parameter).
7162 */
7163static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg54a1b622014-08-07 22:56:48 +03007164 bdaddr_t *src, u8 link_type)
Johan Hedberge760ec12014-08-07 22:56:47 +03007165{
7166 read_lock(&chan_list_lock);
7167
7168 if (c)
7169 c = list_next_entry(c, global_l);
7170 else
7171 c = list_entry(chan_list.next, typeof(*c), global_l);
7172
7173 list_for_each_entry_from(c, &chan_list, global_l) {
7174 if (c->chan_type != L2CAP_CHAN_FIXED)
7175 continue;
7176 if (c->state != BT_LISTEN)
7177 continue;
7178 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7179 continue;
Johan Hedberg54a1b622014-08-07 22:56:48 +03007180 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7181 continue;
7182 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7183 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007184
7185 l2cap_chan_hold(c);
7186 read_unlock(&chan_list_lock);
7187 return c;
7188 }
7189
7190 read_unlock(&chan_list_lock);
7191
7192 return NULL;
7193}
7194
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007195void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196{
Johan Hedberge760ec12014-08-07 22:56:47 +03007197 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007198 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007199 struct l2cap_chan *pchan;
7200 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007201
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007202 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007203
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007204 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007205 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007206 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007207 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007208
7209 conn = l2cap_conn_add(hcon);
7210 if (!conn)
7211 return;
7212
Johan Hedberge760ec12014-08-07 22:56:47 +03007213 dst_type = bdaddr_type(hcon, hcon->dst_type);
7214
7215 /* If device is blocked, do not create channels for it */
7216 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7217 return;
7218
7219 /* Find fixed channels and notify them of the new connection. We
7220 * use multiple individual lookups, continuing each time where
7221 * we left off, because the list lock would prevent calling the
7222 * potentially sleeping l2cap_chan_lock() function.
7223 */
Johan Hedberg54a1b622014-08-07 22:56:48 +03007224 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007225 while (pchan) {
7226 struct l2cap_chan *chan, *next;
7227
7228 /* Client fixed channels should override server ones */
7229 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7230 goto next;
7231
7232 l2cap_chan_lock(pchan);
7233 chan = pchan->ops->new_connection(pchan);
7234 if (chan) {
7235 bacpy(&chan->src, &hcon->src);
7236 bacpy(&chan->dst, &hcon->dst);
7237 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7238 chan->dst_type = dst_type;
7239
7240 __l2cap_chan_add(conn, chan);
7241 }
7242
7243 l2cap_chan_unlock(pchan);
7244next:
Johan Hedberg54a1b622014-08-07 22:56:48 +03007245 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7246 hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007247 l2cap_chan_put(pchan);
7248 pchan = next;
7249 }
7250
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007251 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252}
7253
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007254int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007255{
7256 struct l2cap_conn *conn = hcon->l2cap_data;
7257
7258 BT_DBG("hcon %p", hcon);
7259
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007260 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007261 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007262 return conn->disc_reason;
7263}
7264
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007265void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007266{
7267 BT_DBG("hcon %p reason %d", hcon, reason);
7268
Joe Perchese1750722011-06-29 18:18:29 -07007269 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007270}
7271
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007272static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007273{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007274 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007275 return;
7276
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007277 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007278 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007279 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007280 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7281 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007282 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007283 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007284 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007285 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007286 }
7287}
7288
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007289int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007290{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007291 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007292 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293
Marcel Holtmann01394182006-07-03 10:02:46 +02007294 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007296
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007297 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007298
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007299 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007301 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007302 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007304 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7305 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007306
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007307 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007308 l2cap_chan_unlock(chan);
7309 continue;
7310 }
7311
Johan Hedberg191eb392014-08-07 22:56:45 +03007312 if (!status && encrypt)
7313 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007314
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007315 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007316 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007317 continue;
7318 }
7319
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007320 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007321 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007322 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007323 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007324 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007325 continue;
7326 }
7327
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007328 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007329 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007330 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007331 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007332 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007333 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007334 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007335 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007336
7337 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007338 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007339 res = L2CAP_CR_PEND;
7340 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007341 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007342 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007343 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007344 res = L2CAP_CR_SUCCESS;
7345 stat = L2CAP_CS_NO_INFO;
7346 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007347 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007348 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007349 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007350 res = L2CAP_CR_SEC_BLOCK;
7351 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007352 }
7353
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007354 rsp.scid = cpu_to_le16(chan->dcid);
7355 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007356 rsp.result = cpu_to_le16(res);
7357 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007358 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007359 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007360
7361 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7362 res == L2CAP_CR_SUCCESS) {
7363 char buf[128];
7364 set_bit(CONF_REQ_SENT, &chan->conf_state);
7365 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7366 L2CAP_CONF_REQ,
7367 l2cap_build_conf_req(chan, buf),
7368 buf);
7369 chan->num_conf_req++;
7370 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371 }
7372
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007373 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374 }
7375
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007376 mutex_unlock(&conn->chan_lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007377
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378 return 0;
7379}
7380
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007381int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382{
7383 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007384 struct l2cap_hdr *hdr;
7385 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007386
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007387 /* For AMP controller do not create l2cap conn */
7388 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7389 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007390
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007391 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007392 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007393
7394 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007395 goto drop;
7396
7397 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7398
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007399 switch (flags) {
7400 case ACL_START:
7401 case ACL_START_NO_FLUSH:
7402 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007403 if (conn->rx_len) {
7404 BT_ERR("Unexpected start frame (len %d)", skb->len);
7405 kfree_skb(conn->rx_skb);
7406 conn->rx_skb = NULL;
7407 conn->rx_len = 0;
7408 l2cap_conn_unreliable(conn, ECOMM);
7409 }
7410
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007411 /* Start fragment always begin with Basic L2CAP header */
7412 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007413 BT_ERR("Frame is too short (len %d)", skb->len);
7414 l2cap_conn_unreliable(conn, ECOMM);
7415 goto drop;
7416 }
7417
7418 hdr = (struct l2cap_hdr *) skb->data;
7419 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7420
7421 if (len == skb->len) {
7422 /* Complete frame received */
7423 l2cap_recv_frame(conn, skb);
7424 return 0;
7425 }
7426
7427 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7428
7429 if (skb->len > len) {
7430 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007431 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007432 l2cap_conn_unreliable(conn, ECOMM);
7433 goto drop;
7434 }
7435
7436 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007437 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007438 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439 goto drop;
7440
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007441 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007442 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007444 break;
7445
7446 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007447 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7448
7449 if (!conn->rx_len) {
7450 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7451 l2cap_conn_unreliable(conn, ECOMM);
7452 goto drop;
7453 }
7454
7455 if (skb->len > conn->rx_len) {
7456 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007457 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007458 kfree_skb(conn->rx_skb);
7459 conn->rx_skb = NULL;
7460 conn->rx_len = 0;
7461 l2cap_conn_unreliable(conn, ECOMM);
7462 goto drop;
7463 }
7464
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007465 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007466 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467 conn->rx_len -= skb->len;
7468
7469 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007470 /* Complete frame received. l2cap_recv_frame
7471 * takes ownership of the skb so set the global
7472 * rx_skb pointer to NULL first.
7473 */
7474 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007475 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007476 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007477 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007478 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007479 }
7480
7481drop:
7482 kfree_skb(skb);
7483 return 0;
7484}
7485
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007486static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007487{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007488 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007489
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007490 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007492 list_for_each_entry(c, &chan_list, global_l) {
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007493 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007494 &c->src, &c->dst,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007495 c->state, __le16_to_cpu(c->psm),
7496 c->scid, c->dcid, c->imtu, c->omtu,
7497 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007499
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007500 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007501
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007502 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503}
7504
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007505static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7506{
7507 return single_open(file, l2cap_debugfs_show, inode->i_private);
7508}
7509
7510static const struct file_operations l2cap_debugfs_fops = {
7511 .open = l2cap_debugfs_open,
7512 .read = seq_read,
7513 .llseek = seq_lseek,
7514 .release = single_release,
7515};
7516
7517static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007518
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007519int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007520{
7521 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007522
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007523 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 if (err < 0)
7525 return err;
7526
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007527 if (IS_ERR_OR_NULL(bt_debugfs))
7528 return 0;
7529
7530 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7531 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532
Samuel Ortiz40b93972014-05-14 17:53:35 +02007533 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007534 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007535 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007536 &le_default_mps);
7537
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539}
7540
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007541void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007543 debugfs_remove(l2cap_debugfs);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007544 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545}
7546
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007547module_param(disable_ertm, bool, 0644);
7548MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");