blob: d46c5127f6c3a47bc9cc2015781c7d92f2ae90ec [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Marcel Holtmanna6801ca2014-07-11 06:03:08 +020049static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -030061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Marcel Holtmann01394182006-07-03 10:02:46 +020079/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030080
Gustavo Padovan2d792812012-10-06 10:07:01 +010081static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020083{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020084 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030085
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020086 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020089 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020090 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +020091}
92
Gustavo Padovan2d792812012-10-06 10:07:01 +010093static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020095{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030097
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020098 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200102 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
105/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700106 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300110 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300111
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300113 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700114 if (c)
115 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200116 mutex_unlock(&conn->chan_lock);
117
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300118 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200119}
120
Mat Martineaub1a130b2012-10-23 15:24:09 -0700121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
Gustavo Padovan2d792812012-10-06 10:07:01 +0100138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200140{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200141 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300142
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200146 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200147 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148}
149
Mat Martineau5b155ef2012-10-23 15:24:14 -0700150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300165{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300166 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300167
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300168 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700169 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100170 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300171 }
Szymon Janc250938c2011-11-16 09:32:22 +0100172 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300177 int err;
178
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200179 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300182 err = -EADDRINUSE;
183 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300184 }
185
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300192
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200204 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300206}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300207EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200211 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Johan Hedberg14824302014-08-07 22:56:50 +0300213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217 chan->scid = scid;
218
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200219 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300220
221 return 0;
222}
223
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200225{
Johan Hedberge77af752013-10-08 10:31:00 +0200226 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200227
Johan Hedberge77af752013-10-08 10:31:00 +0200228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300234 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200235 return cid;
236 }
237
238 return 0;
239}
240
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200241static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300242{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100244 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200245
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300246 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300247 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300248}
249
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200252{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300253 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300254 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200260}
261
Mat Martineau4239d162012-05-17 20:53:49 -0700262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
Mat Martineau608bcc62012-05-17 20:53:32 -0700280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
Mat Martineau3c588192012-04-11 10:48:42 -0700293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
Mat Martineau3c588192012-04-11 10:48:42 -0700339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300357 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700358
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700377
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700385}
386
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300387static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300388{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100390 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200391 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300392 int reason;
393
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300395
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200396 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200397 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300398
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300400 reason = ECONNREFUSED;
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300401 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100402 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300407 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200409 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300410
Gustavo Padovan80b98022012-05-27 22:27:51 -0300411 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200412 mutex_unlock(&conn->chan_lock);
413
Ulisses Furquim371fd832011-12-21 20:02:36 -0200414 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415}
416
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300417struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200418{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300419 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200420
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200424
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200425 mutex_init(&chan->lock);
426
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200427 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300428 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200429 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300430
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300432
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300433 chan->state = BT_OPEN;
434
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530435 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300436
Mat Martineau28270112012-05-17 21:14:09 -0700437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300440 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100441
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300442 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200443}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300444EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200445
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530446static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300447{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530450 BT_DBG("chan %p", chan);
451
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200452 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300453 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200454 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300455
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530456 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300457}
458
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530459void l2cap_chan_hold(struct l2cap_chan *c)
460{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530462
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530463 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530464}
465
466void l2cap_chan_put(struct l2cap_chan *c)
467{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530469
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530470 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530471}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300472EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530473
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300474void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475{
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300483 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300491EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300492
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200493static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300494{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300498 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200499 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200501
502 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300503}
504
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300505void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200506{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200508 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200509
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100511
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300512 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200513
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300519 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200523 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300526 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200527 break;
528
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300531 break;
532
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200533 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300537 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200538 }
539
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300546
Ulisses Furquim371fd832011-12-21 20:02:36 -0200547 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300548
Johan Hedbergc16900c2014-08-15 21:17:06 +0300549 /* Only keep a reference for fixed channels if they requested it */
550 if (chan->chan_type != L2CAP_CHAN_FIXED ||
551 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
552 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300553
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200554 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200555}
556
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300557void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200558{
559 mutex_lock(&conn->chan_lock);
560 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200561 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200562}
563
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300564void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200565{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300566 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200567
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300568 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200569
Gustavo F. Padovan49208c92011-04-04 15:59:54 -0300570 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200571
Johan Hedberg72847ce2014-08-08 09:28:03 +0300572 chan->ops->teardown(chan, err);
573
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900574 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300575 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300576 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200577 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200578
Ulisses Furquim371fd832011-12-21 20:02:36 -0200579 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300580
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300581 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300582
Johan Hedbergc16900c2014-08-15 21:17:06 +0300583 /* Reference was only held for non-fixed channels or
584 * fixed channels that explicitly requested it using the
585 * FLAG_HOLD_HCI_CONN flag.
586 */
587 if (chan->chan_type != L2CAP_CHAN_FIXED ||
588 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200589 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300590
591 if (mgr && mgr->bredr_chan == chan)
592 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200593 }
594
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200595 if (chan->hs_hchan) {
596 struct hci_chan *hs_hchan = chan->hs_hchan;
597
598 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
599 amp_disconnect_logical_link(hs_hchan);
600 }
601
Mat Martineau28270112012-05-17 21:14:09 -0700602 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300603 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300604
Gustavo Padovanee556f62012-05-18 20:22:38 -0300605 switch(chan->mode) {
606 case L2CAP_MODE_BASIC:
607 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300608
Johan Hedberg38319712013-05-17 12:49:23 +0300609 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300610 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300611 break;
612
Gustavo Padovanee556f62012-05-18 20:22:38 -0300613 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300614 __clear_retrans_timer(chan);
615 __clear_monitor_timer(chan);
616 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300617
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300618 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300619
Mat Martineau3c588192012-04-11 10:48:42 -0700620 l2cap_seq_list_free(&chan->srej_list);
621 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300622
623 /* fall through */
624
625 case L2CAP_MODE_STREAMING:
626 skb_queue_purge(&chan->tx_q);
627 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300628 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300629
630 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200631}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300632EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200633
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300634static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200635{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300636 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
637 id_addr_update_work);
638 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200639 struct l2cap_chan *chan;
640
641 mutex_lock(&conn->chan_lock);
642
643 list_for_each_entry(chan, &conn->chan_l, list) {
644 l2cap_chan_lock(chan);
645 bacpy(&chan->dst, &hcon->dst);
646 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
647 l2cap_chan_unlock(chan);
648 }
649
650 mutex_unlock(&conn->chan_lock);
651}
652
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300653static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
654{
655 struct l2cap_conn *conn = chan->conn;
656 struct l2cap_le_conn_rsp rsp;
657 u16 result;
658
659 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
660 result = L2CAP_CR_AUTHORIZATION;
661 else
662 result = L2CAP_CR_BAD_PSM;
663
664 l2cap_state_change(chan, BT_DISCONN);
665
666 rsp.dcid = cpu_to_le16(chan->scid);
667 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200668 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300669 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300670 rsp.result = cpu_to_le16(result);
671
672 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
673 &rsp);
674}
675
Johan Hedberg791d60f2013-05-14 22:24:44 +0300676static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
677{
678 struct l2cap_conn *conn = chan->conn;
679 struct l2cap_conn_rsp rsp;
680 u16 result;
681
682 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
683 result = L2CAP_CR_SEC_BLOCK;
684 else
685 result = L2CAP_CR_BAD_PSM;
686
687 l2cap_state_change(chan, BT_DISCONN);
688
689 rsp.scid = cpu_to_le16(chan->dcid);
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700692 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300693
694 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
695}
696
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300697void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300698{
699 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300700
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700701 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300702
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300703 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300704 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100705 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300706 break;
707
708 case BT_CONNECTED:
709 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800710 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300711 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200712 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300713 } else
714 l2cap_chan_del(chan, reason);
715 break;
716
717 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300718 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
719 if (conn->hcon->type == ACL_LINK)
720 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300721 else if (conn->hcon->type == LE_LINK)
722 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300723 }
724
725 l2cap_chan_del(chan, reason);
726 break;
727
728 case BT_CONNECT:
729 case BT_DISCONN:
730 l2cap_chan_del(chan, reason);
731 break;
732
733 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100734 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300735 break;
736 }
737}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300738EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300739
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300740static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530741{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700742 switch (chan->chan_type) {
743 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300744 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530745 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800746 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530747 return HCI_AT_DEDICATED_BONDING_MITM;
748 case BT_SECURITY_MEDIUM:
749 return HCI_AT_DEDICATED_BONDING;
750 default:
751 return HCI_AT_NO_BONDING;
752 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700753 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700754 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700755 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700756 if (chan->sec_level == BT_SECURITY_LOW)
757 chan->sec_level = BT_SECURITY_SDP;
758 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800759 if (chan->sec_level == BT_SECURITY_HIGH ||
760 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700761 return HCI_AT_NO_BONDING_MITM;
762 else
763 return HCI_AT_NO_BONDING;
764 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700765 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700766 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700767 if (chan->sec_level == BT_SECURITY_LOW)
768 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530769
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800770 if (chan->sec_level == BT_SECURITY_HIGH ||
771 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700772 return HCI_AT_NO_BONDING_MITM;
773 else
774 return HCI_AT_NO_BONDING;
775 }
776 /* fall through */
777 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300778 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530779 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800780 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530781 return HCI_AT_GENERAL_BONDING_MITM;
782 case BT_SECURITY_MEDIUM:
783 return HCI_AT_GENERAL_BONDING;
784 default:
785 return HCI_AT_NO_BONDING;
786 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700787 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530788 }
789}
790
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200791/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300792int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200793{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300794 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100795 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200796
Johan Hedberga17de2f2013-05-14 13:25:37 +0300797 if (conn->hcon->type == LE_LINK)
798 return smp_conn_security(conn->hcon, chan->sec_level);
799
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300800 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100801
Johan Hedberge7cafc42014-07-17 15:35:38 +0300802 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
803 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200804}
805
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200806static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200807{
808 u8 id;
809
810 /* Get next available identificator.
811 * 1 - 128 are used by kernel.
812 * 129 - 199 are reserved.
813 * 200 - 254 are used by utilities like l2ping, etc.
814 */
815
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200816 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817
818 if (++conn->tx_ident > 128)
819 conn->tx_ident = 1;
820
821 id = conn->tx_ident;
822
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200823 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200824
825 return id;
826}
827
Gustavo Padovan2d792812012-10-06 10:07:01 +0100828static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
829 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200830{
831 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200832 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200833
834 BT_DBG("code 0x%2.2x", code);
835
836 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300837 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200838
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200839 if (lmp_no_flush_capable(conn->hcon->hdev))
840 flags = ACL_START_NO_FLUSH;
841 else
842 flags = ACL_START;
843
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700844 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200845 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700846
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200847 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200848}
849
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700850static bool __chan_is_moving(struct l2cap_chan *chan)
851{
852 return chan->move_state != L2CAP_MOVE_STABLE &&
853 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
854}
855
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200856static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
857{
858 struct hci_conn *hcon = chan->conn->hcon;
859 u16 flags;
860
861 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100862 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200863
Mat Martineaud5f8a752012-10-23 15:24:18 -0700864 if (chan->hs_hcon && !__chan_is_moving(chan)) {
865 if (chan->hs_hchan)
866 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
867 else
868 kfree_skb(skb);
869
870 return;
871 }
872
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200873 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100874 lmp_no_flush_capable(hcon->hdev))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200875 flags = ACL_START_NO_FLUSH;
876 else
877 flags = ACL_START;
878
879 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
880 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700883static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
884{
885 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
886 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
887
888 if (enh & L2CAP_CTRL_FRAME_TYPE) {
889 /* S-Frame */
890 control->sframe = 1;
891 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
892 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
893
894 control->sar = 0;
895 control->txseq = 0;
896 } else {
897 /* I-Frame */
898 control->sframe = 0;
899 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
900 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
901
902 control->poll = 0;
903 control->super = 0;
904 }
905}
906
907static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
908{
909 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
910 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
911
912 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
913 /* S-Frame */
914 control->sframe = 1;
915 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
916 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
917
918 control->sar = 0;
919 control->txseq = 0;
920 } else {
921 /* I-Frame */
922 control->sframe = 0;
923 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
924 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
925
926 control->poll = 0;
927 control->super = 0;
928 }
929}
930
931static inline void __unpack_control(struct l2cap_chan *chan,
932 struct sk_buff *skb)
933{
934 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
935 __unpack_extended_control(get_unaligned_le32(skb->data),
936 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700937 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700938 } else {
939 __unpack_enhanced_control(get_unaligned_le16(skb->data),
940 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700941 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700942 }
943}
944
945static u32 __pack_extended_control(struct l2cap_ctrl *control)
946{
947 u32 packed;
948
949 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
950 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
951
952 if (control->sframe) {
953 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
954 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
955 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
956 } else {
957 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
958 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
959 }
960
961 return packed;
962}
963
964static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
965{
966 u16 packed;
967
968 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
969 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
970
971 if (control->sframe) {
972 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
973 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
974 packed |= L2CAP_CTRL_FRAME_TYPE;
975 } else {
976 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
977 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
978 }
979
980 return packed;
981}
982
983static inline void __pack_control(struct l2cap_chan *chan,
984 struct l2cap_ctrl *control,
985 struct sk_buff *skb)
986{
987 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
988 put_unaligned_le32(__pack_extended_control(control),
989 skb->data + L2CAP_HDR_SIZE);
990 } else {
991 put_unaligned_le16(__pack_enhanced_control(control),
992 skb->data + L2CAP_HDR_SIZE);
993 }
994}
995
Gustavo Padovanba7aa642012-05-29 13:29:16 -0300996static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
997{
998 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
999 return L2CAP_EXT_HDR_SIZE;
1000 else
1001 return L2CAP_ENH_HDR_SIZE;
1002}
1003
Mat Martineaua67d7f62012-05-17 20:53:35 -07001004static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1005 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001006{
1007 struct sk_buff *skb;
1008 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001009 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001010
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001011 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001012 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001013
Mat Martineaua67d7f62012-05-17 20:53:35 -07001014 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001015
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001016 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001017 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001018
1019 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001020 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001021 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001022
Mat Martineaua67d7f62012-05-17 20:53:35 -07001023 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1024 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1025 else
1026 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001027
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001028 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001029 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001030 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001031 }
1032
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001033 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001034 return skb;
1035}
1036
1037static void l2cap_send_sframe(struct l2cap_chan *chan,
1038 struct l2cap_ctrl *control)
1039{
1040 struct sk_buff *skb;
1041 u32 control_field;
1042
1043 BT_DBG("chan %p, control %p", chan, control);
1044
1045 if (!control->sframe)
1046 return;
1047
Mat Martineaub99e13a2012-10-23 15:24:19 -07001048 if (__chan_is_moving(chan))
1049 return;
1050
Mat Martineaua67d7f62012-05-17 20:53:35 -07001051 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1052 !control->poll)
1053 control->final = 1;
1054
1055 if (control->super == L2CAP_SUPER_RR)
1056 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1057 else if (control->super == L2CAP_SUPER_RNR)
1058 set_bit(CONN_RNR_SENT, &chan->conn_state);
1059
1060 if (control->super != L2CAP_SUPER_SREJ) {
1061 chan->last_acked_seq = control->reqseq;
1062 __clear_ack_timer(chan);
1063 }
1064
1065 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1066 control->final, control->poll, control->super);
1067
1068 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1069 control_field = __pack_extended_control(control);
1070 else
1071 control_field = __pack_enhanced_control(control);
1072
1073 skb = l2cap_create_sframe_pdu(chan, control_field);
1074 if (!IS_ERR(skb))
1075 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001076}
1077
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001078static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001079{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001080 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001081
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001082 BT_DBG("chan %p, poll %d", chan, poll);
1083
1084 memset(&control, 0, sizeof(control));
1085 control.sframe = 1;
1086 control.poll = poll;
1087
1088 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1089 control.super = L2CAP_SUPER_RNR;
1090 else
1091 control.super = L2CAP_SUPER_RR;
1092
1093 control.reqseq = chan->buffer_seq;
1094 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001095}
1096
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001097static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001098{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001099 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1100 return true;
1101
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001102 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001103}
1104
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001105static bool __amp_capable(struct l2cap_chan *chan)
1106{
1107 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001108 struct hci_dev *hdev;
1109 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001110
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001111 if (!conn->hs_enabled)
1112 return false;
1113
1114 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1115 return false;
1116
1117 read_lock(&hci_dev_list_lock);
1118 list_for_each_entry(hdev, &hci_dev_list, list) {
1119 if (hdev->amp_type != AMP_TYPE_BREDR &&
1120 test_bit(HCI_UP, &hdev->flags)) {
1121 amp_available = true;
1122 break;
1123 }
1124 }
1125 read_unlock(&hci_dev_list_lock);
1126
1127 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1128 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001129
1130 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001131}
1132
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001133static bool l2cap_check_efs(struct l2cap_chan *chan)
1134{
1135 /* Check EFS parameters */
1136 return true;
1137}
1138
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001139void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001140{
1141 struct l2cap_conn *conn = chan->conn;
1142 struct l2cap_conn_req req;
1143
1144 req.scid = cpu_to_le16(chan->scid);
1145 req.psm = chan->psm;
1146
1147 chan->ident = l2cap_get_ident(conn);
1148
1149 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1150
1151 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1152}
1153
Mat Martineau8eb200b2012-10-23 15:24:17 -07001154static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1155{
1156 struct l2cap_create_chan_req req;
1157 req.scid = cpu_to_le16(chan->scid);
1158 req.psm = chan->psm;
1159 req.amp_id = amp_id;
1160
1161 chan->ident = l2cap_get_ident(chan->conn);
1162
1163 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1164 sizeof(req), &req);
1165}
1166
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001167static void l2cap_move_setup(struct l2cap_chan *chan)
1168{
1169 struct sk_buff *skb;
1170
1171 BT_DBG("chan %p", chan);
1172
1173 if (chan->mode != L2CAP_MODE_ERTM)
1174 return;
1175
1176 __clear_retrans_timer(chan);
1177 __clear_monitor_timer(chan);
1178 __clear_ack_timer(chan);
1179
1180 chan->retry_count = 0;
1181 skb_queue_walk(&chan->tx_q, skb) {
1182 if (bt_cb(skb)->control.retries)
1183 bt_cb(skb)->control.retries = 1;
1184 else
1185 break;
1186 }
1187
1188 chan->expected_tx_seq = chan->buffer_seq;
1189
1190 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1191 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1192 l2cap_seq_list_clear(&chan->retrans_list);
1193 l2cap_seq_list_clear(&chan->srej_list);
1194 skb_queue_purge(&chan->srej_q);
1195
1196 chan->tx_state = L2CAP_TX_STATE_XMIT;
1197 chan->rx_state = L2CAP_RX_STATE_MOVE;
1198
1199 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1200}
1201
Mat Martineau5f3847a2012-10-23 15:24:12 -07001202static void l2cap_move_done(struct l2cap_chan *chan)
1203{
1204 u8 move_role = chan->move_role;
1205 BT_DBG("chan %p", chan);
1206
1207 chan->move_state = L2CAP_MOVE_STABLE;
1208 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1209
1210 if (chan->mode != L2CAP_MODE_ERTM)
1211 return;
1212
1213 switch (move_role) {
1214 case L2CAP_MOVE_ROLE_INITIATOR:
1215 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1216 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1217 break;
1218 case L2CAP_MOVE_ROLE_RESPONDER:
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1220 break;
1221 }
1222}
1223
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001224static void l2cap_chan_ready(struct l2cap_chan *chan)
1225{
Mat Martineau28270112012-05-17 21:14:09 -07001226 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001227 chan->conf_state = 0;
1228 __clear_chan_timer(chan);
1229
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001230 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1231 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001232
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001233 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001234
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001235 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001236}
1237
Johan Hedbergf1496de2013-05-13 14:15:56 +03001238static void l2cap_le_connect(struct l2cap_chan *chan)
1239{
1240 struct l2cap_conn *conn = chan->conn;
1241 struct l2cap_le_conn_req req;
1242
Johan Hedberg595177f2013-12-02 22:12:22 +02001243 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1244 return;
1245
Johan Hedbergf1496de2013-05-13 14:15:56 +03001246 req.psm = chan->psm;
1247 req.scid = cpu_to_le16(chan->scid);
1248 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001249 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001250 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001251
1252 chan->ident = l2cap_get_ident(conn);
1253
1254 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1255 sizeof(req), &req);
1256}
1257
1258static void l2cap_le_start(struct l2cap_chan *chan)
1259{
1260 struct l2cap_conn *conn = chan->conn;
1261
1262 if (!smp_conn_security(conn->hcon, chan->sec_level))
1263 return;
1264
1265 if (!chan->psm) {
1266 l2cap_chan_ready(chan);
1267 return;
1268 }
1269
1270 if (chan->state == BT_CONNECT)
1271 l2cap_le_connect(chan);
1272}
1273
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001274static void l2cap_start_connection(struct l2cap_chan *chan)
1275{
1276 if (__amp_capable(chan)) {
1277 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1278 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001279 } else if (chan->conn->hcon->type == LE_LINK) {
1280 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001281 } else {
1282 l2cap_send_conn_req(chan);
1283 }
1284}
1285
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001286static void l2cap_request_info(struct l2cap_conn *conn)
1287{
1288 struct l2cap_info_req req;
1289
1290 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1291 return;
1292
1293 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1294
1295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1296 conn->info_ident = l2cap_get_ident(conn);
1297
1298 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1299
1300 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1301 sizeof(req), &req);
1302}
1303
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001304static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001305{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001306 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001307
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001308 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001309 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001310 return;
1311 }
1312
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001313 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1314 l2cap_request_info(conn);
1315 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001316 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001317
1318 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1319 return;
1320
1321 if (l2cap_chan_check_security(chan, true) &&
1322 __l2cap_no_conn_pending(chan))
1323 l2cap_start_connection(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001324}
1325
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001326static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1327{
1328 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001329 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001330 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1331
1332 switch (mode) {
1333 case L2CAP_MODE_ERTM:
1334 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1335 case L2CAP_MODE_STREAMING:
1336 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1337 default:
1338 return 0x00;
1339 }
1340}
1341
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001342static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001343{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001344 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001345 struct l2cap_disconn_req req;
1346
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001347 if (!conn)
1348 return;
1349
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001350 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001351 __clear_retrans_timer(chan);
1352 __clear_monitor_timer(chan);
1353 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001354 }
1355
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001356 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001357 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001358 return;
1359 }
1360
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001361 req.dcid = cpu_to_le16(chan->dcid);
1362 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001363 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1364 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001365
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001366 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001367}
1368
Linus Torvalds1da177e2005-04-16 15:20:36 -07001369/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001370static void l2cap_conn_start(struct l2cap_conn *conn)
1371{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001372 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001373
1374 BT_DBG("conn %p", conn);
1375
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001376 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001377
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001378 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001379 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001380
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001381 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001382 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001383 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001384 continue;
1385 }
1386
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001387 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001388 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001389 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001390 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001391 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001392 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001393
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001394 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001395 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001396 &chan->conf_state)) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001397 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001398 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001399 continue;
1400 }
1401
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001402 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001403
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001404 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001405 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001406 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001407 rsp.scid = cpu_to_le16(chan->dcid);
1408 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001409
Johan Hedberge7cafc42014-07-17 15:35:38 +03001410 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001411 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001412 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1413 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001414 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001415
1416 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001417 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001418 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1419 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001420 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001421 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001422 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1423 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001424 }
1425
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001426 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001427 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001428
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001429 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001430 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001431 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001432 continue;
1433 }
1434
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001435 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001436 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001437 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001438 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001439 }
1440
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001441 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001442 }
1443
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001444 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001445}
1446
Ville Tervob62f3282011-02-10 22:38:50 -03001447static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1448{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001449 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001450 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001451
Johan Hedberge760ec12014-08-07 22:56:47 +03001452 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001453
Johan Hedberge760ec12014-08-07 22:56:47 +03001454 /* For outgoing pairing which doesn't necessarily have an
1455 * associated socket (e.g. mgmt_pair_device).
1456 */
1457 if (hcon->out)
1458 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001459
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001460 /* For LE slave connections, make sure the connection interval
1461 * is in the range of the minium and maximum interval that has
1462 * been configured for this connection. If not, then trigger
1463 * the connection update procedure.
1464 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001465 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001466 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1467 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1468 struct l2cap_conn_param_update_req req;
1469
1470 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1471 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1472 req.latency = cpu_to_le16(hcon->le_conn_latency);
1473 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1474
1475 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1476 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1477 }
Ville Tervob62f3282011-02-10 22:38:50 -03001478}
1479
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001480static void l2cap_conn_ready(struct l2cap_conn *conn)
1481{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001482 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001483 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001484
1485 BT_DBG("conn %p", conn);
1486
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001487 if (hcon->type == ACL_LINK)
1488 l2cap_request_info(conn);
1489
Johan Hedberge760ec12014-08-07 22:56:47 +03001490 mutex_lock(&conn->chan_lock);
1491
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001492 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001493
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001494 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001495
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001496 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001497 l2cap_chan_unlock(chan);
1498 continue;
1499 }
1500
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001501 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001502 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001503 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001504 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1505 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001506 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001507 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001508 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001509
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001510 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001511 }
1512
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001513 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001514
Johan Hedberg79a05722014-08-08 09:28:04 +03001515 if (hcon->type == LE_LINK)
1516 l2cap_le_conn_ready(conn);
1517
Johan Hedberg61a939c2014-01-17 20:45:11 +02001518 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001519}
1520
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001521/* Notify sockets that we cannot guaranty reliability anymore */
1522static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1523{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001524 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001525
1526 BT_DBG("conn %p", conn);
1527
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001528 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001529
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001530 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001531 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001532 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001533 }
1534
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001535 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001536}
1537
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001538static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001539{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001540 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001541 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001542
Marcel Holtmann984947d2009-02-06 23:35:19 +01001543 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001544 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001545
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001546 l2cap_conn_start(conn);
1547}
1548
David Herrmann2c8e1412013-04-06 20:28:45 +02001549/*
1550 * l2cap_user
1551 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1552 * callback is called during registration. The ->remove callback is called
1553 * during unregistration.
1554 * An l2cap_user object can either be explicitly unregistered or when the
1555 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1556 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1557 * External modules must own a reference to the l2cap_conn object if they intend
1558 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1559 * any time if they don't.
1560 */
1561
1562int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1563{
1564 struct hci_dev *hdev = conn->hcon->hdev;
1565 int ret;
1566
1567 /* We need to check whether l2cap_conn is registered. If it is not, we
1568 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1569 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1570 * relies on the parent hci_conn object to be locked. This itself relies
1571 * on the hci_dev object to be locked. So we must lock the hci device
1572 * here, too. */
1573
1574 hci_dev_lock(hdev);
1575
1576 if (user->list.next || user->list.prev) {
1577 ret = -EINVAL;
1578 goto out_unlock;
1579 }
1580
1581 /* conn->hchan is NULL after l2cap_conn_del() was called */
1582 if (!conn->hchan) {
1583 ret = -ENODEV;
1584 goto out_unlock;
1585 }
1586
1587 ret = user->probe(conn, user);
1588 if (ret)
1589 goto out_unlock;
1590
1591 list_add(&user->list, &conn->users);
1592 ret = 0;
1593
1594out_unlock:
1595 hci_dev_unlock(hdev);
1596 return ret;
1597}
1598EXPORT_SYMBOL(l2cap_register_user);
1599
1600void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1601{
1602 struct hci_dev *hdev = conn->hcon->hdev;
1603
1604 hci_dev_lock(hdev);
1605
1606 if (!user->list.next || !user->list.prev)
1607 goto out_unlock;
1608
1609 list_del(&user->list);
1610 user->list.next = NULL;
1611 user->list.prev = NULL;
1612 user->remove(conn, user);
1613
1614out_unlock:
1615 hci_dev_unlock(hdev);
1616}
1617EXPORT_SYMBOL(l2cap_unregister_user);
1618
1619static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1620{
1621 struct l2cap_user *user;
1622
1623 while (!list_empty(&conn->users)) {
1624 user = list_first_entry(&conn->users, struct l2cap_user, list);
1625 list_del(&user->list);
1626 user->list.next = NULL;
1627 user->list.prev = NULL;
1628 user->remove(conn, user);
1629 }
1630}
1631
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001632static void l2cap_conn_del(struct hci_conn *hcon, int err)
1633{
1634 struct l2cap_conn *conn = hcon->l2cap_data;
1635 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001636
1637 if (!conn)
1638 return;
1639
1640 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1641
1642 kfree_skb(conn->rx_skb);
1643
Johan Hedberg61a939c2014-01-17 20:45:11 +02001644 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001645
1646 /* We can not call flush_work(&conn->pending_rx_work) here since we
1647 * might block if we are running on a worker from the same workqueue
1648 * pending_rx_work is waiting on.
1649 */
1650 if (work_pending(&conn->pending_rx_work))
1651 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001652
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001653 if (work_pending(&conn->id_addr_update_work))
1654 cancel_work_sync(&conn->id_addr_update_work);
1655
David Herrmann2c8e1412013-04-06 20:28:45 +02001656 l2cap_unregister_all_users(conn);
1657
Johan Hedberge31fb862014-08-18 20:33:28 +03001658 /* Force the connection to be immediately dropped */
1659 hcon->disc_timeout = 0;
1660
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001661 mutex_lock(&conn->chan_lock);
1662
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001663 /* Kill channels */
1664 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001665 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001666 l2cap_chan_lock(chan);
1667
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001668 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001669
1670 l2cap_chan_unlock(chan);
1671
Gustavo Padovan80b98022012-05-27 22:27:51 -03001672 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001673 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001674 }
1675
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001676 mutex_unlock(&conn->chan_lock);
1677
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001678 hci_chan_del(conn->hchan);
1679
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001680 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001681 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001682
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001683 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001684 conn->hchan = NULL;
1685 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001686}
1687
David Herrmann9c903e32013-04-06 20:28:44 +02001688static void l2cap_conn_free(struct kref *ref)
1689{
1690 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1691
1692 hci_conn_put(conn->hcon);
1693 kfree(conn);
1694}
1695
Johan Hedberg51bb84572014-08-15 21:06:57 +03001696struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001697{
1698 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001699 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001700}
1701EXPORT_SYMBOL(l2cap_conn_get);
1702
1703void l2cap_conn_put(struct l2cap_conn *conn)
1704{
1705 kref_put(&conn->ref, l2cap_conn_free);
1706}
1707EXPORT_SYMBOL(l2cap_conn_put);
1708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710
Ido Yarivc2287682012-04-20 15:46:07 -03001711/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 * Returns closest match.
1713 */
Ido Yarivc2287682012-04-20 15:46:07 -03001714static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1715 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001716 bdaddr_t *dst,
1717 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001719 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001720
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001721 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001722
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001723 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001724 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 continue;
1726
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001727 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1728 continue;
1729
1730 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1731 continue;
1732
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001733 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001734 int src_match, dst_match;
1735 int src_any, dst_any;
1736
Linus Torvalds1da177e2005-04-16 15:20:36 -07001737 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001738 src_match = !bacmp(&c->src, src);
1739 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001740 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001741 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001742 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001743 return c;
1744 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745
1746 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001747 src_any = !bacmp(&c->src, BDADDR_ANY);
1748 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001749 if ((src_match && dst_any) || (src_any && dst_match) ||
1750 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001751 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752 }
1753 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754
Johan Hedberga24cce12014-08-07 22:56:42 +03001755 if (c1)
1756 l2cap_chan_hold(c1);
1757
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001758 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001759
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001760 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761}
1762
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001763static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001764{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001765 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001766 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001767
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001768 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001769
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001770 l2cap_chan_lock(chan);
1771
Mat Martineau80909e02012-05-17 20:53:50 -07001772 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001773 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001774 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001775 return;
1776 }
1777
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001778 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001779
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001780 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001781 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001782}
1783
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001784static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001785{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001786 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001787 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001788
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001789 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001790
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001791 l2cap_chan_lock(chan);
1792
Mat Martineau80909e02012-05-17 20:53:50 -07001793 if (!chan->conn) {
1794 l2cap_chan_unlock(chan);
1795 l2cap_chan_put(chan);
1796 return;
1797 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001798
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001799 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001800 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001801 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001802}
1803
Gustavo Padovand6603662012-05-21 13:58:22 -03001804static void l2cap_streaming_send(struct l2cap_chan *chan,
1805 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001806{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001807 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001808 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001809
Mat Martineau37339372012-05-17 20:53:33 -07001810 BT_DBG("chan %p, skbs %p", chan, skbs);
1811
Mat Martineaub99e13a2012-10-23 15:24:19 -07001812 if (__chan_is_moving(chan))
1813 return;
1814
Mat Martineau37339372012-05-17 20:53:33 -07001815 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1816
1817 while (!skb_queue_empty(&chan->tx_q)) {
1818
1819 skb = skb_dequeue(&chan->tx_q);
1820
1821 bt_cb(skb)->control.retries = 1;
1822 control = &bt_cb(skb)->control;
1823
1824 control->reqseq = 0;
1825 control->txseq = chan->next_tx_seq;
1826
1827 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001828
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001829 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001830 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1831 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001832 }
1833
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001834 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001835
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001836 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001837
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001838 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001839 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001840 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001841}
1842
Szymon Janc67c9e842011-07-28 16:24:33 +02001843static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001844{
1845 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001846 struct l2cap_ctrl *control;
1847 int sent = 0;
1848
1849 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001850
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001851 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001852 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001853
Mat Martineau94122bb2012-05-02 09:42:02 -07001854 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1855 return 0;
1856
Mat Martineaub99e13a2012-10-23 15:24:19 -07001857 if (__chan_is_moving(chan))
1858 return 0;
1859
Mat Martineau18a48e72012-05-17 20:53:34 -07001860 while (chan->tx_send_head &&
1861 chan->unacked_frames < chan->remote_tx_win &&
1862 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001863
Mat Martineau18a48e72012-05-17 20:53:34 -07001864 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001865
Mat Martineau18a48e72012-05-17 20:53:34 -07001866 bt_cb(skb)->control.retries = 1;
1867 control = &bt_cb(skb)->control;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001868
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001869 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001870 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001871
Mat Martineau18a48e72012-05-17 20:53:34 -07001872 control->reqseq = chan->buffer_seq;
1873 chan->last_acked_seq = chan->buffer_seq;
1874 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001875
Mat Martineau18a48e72012-05-17 20:53:34 -07001876 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001877
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001878 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001879 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1880 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001881 }
1882
Mat Martineau18a48e72012-05-17 20:53:34 -07001883 /* Clone after data has been modified. Data is assumed to be
1884 read-only (for locking purposes) on cloned sk_buffs.
1885 */
1886 tx_skb = skb_clone(skb, GFP_KERNEL);
1887
1888 if (!tx_skb)
1889 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001890
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001891 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001892
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001893 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001894 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001895 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001896 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001897
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001898 if (skb_queue_is_last(&chan->tx_q, skb))
1899 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001900 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001901 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001902
1903 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001904 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001905 }
1906
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001907 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1908 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001909
1910 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001911}
1912
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001913static void l2cap_ertm_resend(struct l2cap_chan *chan)
1914{
1915 struct l2cap_ctrl control;
1916 struct sk_buff *skb;
1917 struct sk_buff *tx_skb;
1918 u16 seq;
1919
1920 BT_DBG("chan %p", chan);
1921
1922 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1923 return;
1924
Mat Martineaub99e13a2012-10-23 15:24:19 -07001925 if (__chan_is_moving(chan))
1926 return;
1927
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001928 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1929 seq = l2cap_seq_list_pop(&chan->retrans_list);
1930
1931 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1932 if (!skb) {
1933 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001934 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001935 continue;
1936 }
1937
1938 bt_cb(skb)->control.retries++;
1939 control = bt_cb(skb)->control;
1940
1941 if (chan->max_tx != 0 &&
1942 bt_cb(skb)->control.retries > chan->max_tx) {
1943 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001944 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001945 l2cap_seq_list_clear(&chan->retrans_list);
1946 break;
1947 }
1948
1949 control.reqseq = chan->buffer_seq;
1950 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1951 control.final = 1;
1952 else
1953 control.final = 0;
1954
1955 if (skb_cloned(skb)) {
1956 /* Cloned sk_buffs are read-only, so we need a
1957 * writeable copy
1958 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001959 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001960 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001961 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001962 }
1963
1964 if (!tx_skb) {
1965 l2cap_seq_list_clear(&chan->retrans_list);
1966 break;
1967 }
1968
1969 /* Update skb contents */
1970 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1971 put_unaligned_le32(__pack_extended_control(&control),
1972 tx_skb->data + L2CAP_HDR_SIZE);
1973 } else {
1974 put_unaligned_le16(__pack_enhanced_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1976 }
1977
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001978 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001979 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001980 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1981 tx_skb->len - L2CAP_FCS_SIZE);
1982 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1983 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001984 }
1985
1986 l2cap_do_send(chan, tx_skb);
1987
1988 BT_DBG("Resent txseq %d", control.txseq);
1989
1990 chan->last_acked_seq = chan->buffer_seq;
1991 }
1992}
1993
Mat Martineauf80842a2012-05-17 20:53:46 -07001994static void l2cap_retransmit(struct l2cap_chan *chan,
1995 struct l2cap_ctrl *control)
1996{
1997 BT_DBG("chan %p, control %p", chan, control);
1998
1999 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2000 l2cap_ertm_resend(chan);
2001}
2002
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002003static void l2cap_retransmit_all(struct l2cap_chan *chan,
2004 struct l2cap_ctrl *control)
2005{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002006 struct sk_buff *skb;
2007
2008 BT_DBG("chan %p, control %p", chan, control);
2009
2010 if (control->poll)
2011 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2012
2013 l2cap_seq_list_clear(&chan->retrans_list);
2014
2015 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2016 return;
2017
2018 if (chan->unacked_frames) {
2019 skb_queue_walk(&chan->tx_q, skb) {
2020 if (bt_cb(skb)->control.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002021 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002022 break;
2023 }
2024
2025 skb_queue_walk_from(&chan->tx_q, skb) {
2026 if (skb == chan->tx_send_head)
2027 break;
2028
2029 l2cap_seq_list_append(&chan->retrans_list,
2030 bt_cb(skb)->control.txseq);
2031 }
2032
2033 l2cap_ertm_resend(chan);
2034 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002035}
2036
Szymon Jancb17e73b2012-01-11 10:59:47 +01002037static void l2cap_send_ack(struct l2cap_chan *chan)
2038{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002039 struct l2cap_ctrl control;
2040 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2041 chan->last_acked_seq);
2042 int threshold;
2043
2044 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2045 chan, chan->last_acked_seq, chan->buffer_seq);
2046
2047 memset(&control, 0, sizeof(control));
2048 control.sframe = 1;
2049
2050 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2051 chan->rx_state == L2CAP_RX_STATE_RECV) {
2052 __clear_ack_timer(chan);
2053 control.super = L2CAP_SUPER_RNR;
2054 control.reqseq = chan->buffer_seq;
2055 l2cap_send_sframe(chan, &control);
2056 } else {
2057 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2058 l2cap_ertm_send(chan);
2059 /* If any i-frames were sent, they included an ack */
2060 if (chan->buffer_seq == chan->last_acked_seq)
2061 frames_to_ack = 0;
2062 }
2063
Mat Martineauc20f8e32012-07-10 05:47:07 -07002064 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002065 * Calculate without mul or div
2066 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002067 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002068 threshold += threshold << 1;
2069 threshold >>= 2;
2070
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002071 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002072 threshold);
2073
2074 if (frames_to_ack >= threshold) {
2075 __clear_ack_timer(chan);
2076 control.super = L2CAP_SUPER_RR;
2077 control.reqseq = chan->buffer_seq;
2078 l2cap_send_sframe(chan, &control);
2079 frames_to_ack = 0;
2080 }
2081
2082 if (frames_to_ack)
2083 __set_ack_timer(chan);
2084 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002085}
2086
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002087static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2088 struct msghdr *msg, int len,
2089 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002090{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002091 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002092 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002093 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094
Jukka Rissanen04988782014-06-18 16:37:07 +03002095 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2096 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002097 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
2099 sent += count;
2100 len -= count;
2101
2102 /* Continuation fragments (no L2CAP header) */
2103 frag = &skb_shinfo(skb)->frag_list;
2104 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002105 struct sk_buff *tmp;
2106
Linus Torvalds1da177e2005-04-16 15:20:36 -07002107 count = min_t(unsigned int, conn->mtu, len);
2108
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002109 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002110 msg->msg_flags & MSG_DONTWAIT);
2111 if (IS_ERR(tmp))
2112 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002113
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002114 *frag = tmp;
2115
Jukka Rissanen04988782014-06-18 16:37:07 +03002116 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2117 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002118 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 sent += count;
2121 len -= count;
2122
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002123 skb->len += (*frag)->len;
2124 skb->data_len += (*frag)->len;
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 frag = &(*frag)->next;
2127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002128
2129 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002130}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002132static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002133 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002134{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002135 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002136 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002137 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002138 struct l2cap_hdr *lh;
2139
Marcel Holtmann8d463212014-06-05 15:22:51 +02002140 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2141 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002142
2143 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002144
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002145 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002146 msg->msg_flags & MSG_DONTWAIT);
2147 if (IS_ERR(skb))
2148 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002149
2150 /* Create L2CAP header */
2151 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002152 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002153 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002154 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002155
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002156 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002157 if (unlikely(err < 0)) {
2158 kfree_skb(skb);
2159 return ERR_PTR(err);
2160 }
2161 return skb;
2162}
2163
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002164static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002165 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002166{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002167 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002168 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002169 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002170 struct l2cap_hdr *lh;
2171
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002172 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002173
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002174 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002175
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002176 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002177 msg->msg_flags & MSG_DONTWAIT);
2178 if (IS_ERR(skb))
2179 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002180
2181 /* Create L2CAP header */
2182 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002183 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002184 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002185
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002186 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002187 if (unlikely(err < 0)) {
2188 kfree_skb(skb);
2189 return ERR_PTR(err);
2190 }
2191 return skb;
2192}
2193
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002194static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002195 struct msghdr *msg, size_t len,
2196 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002197{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002198 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002199 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002200 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002201 struct l2cap_hdr *lh;
2202
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002203 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002204
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002205 if (!conn)
2206 return ERR_PTR(-ENOTCONN);
2207
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002208 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002209
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002210 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002211 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002212
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002213 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002214 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002215
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002216 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002217
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002218 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002219 msg->msg_flags & MSG_DONTWAIT);
2220 if (IS_ERR(skb))
2221 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002222
2223 /* Create L2CAP header */
2224 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002225 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002226 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002227
Mat Martineau18a48e72012-05-17 20:53:34 -07002228 /* Control header is populated later */
2229 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2230 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2231 else
2232 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002233
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002234 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002235 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002236
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002237 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002238 if (unlikely(err < 0)) {
2239 kfree_skb(skb);
2240 return ERR_PTR(err);
2241 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002242
Mat Martineau18a48e72012-05-17 20:53:34 -07002243 bt_cb(skb)->control.fcs = chan->fcs;
Mat Martineau3ce35142012-04-25 16:36:14 -07002244 bt_cb(skb)->control.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002245 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002246}
2247
Mat Martineau94122bb2012-05-02 09:42:02 -07002248static int l2cap_segment_sdu(struct l2cap_chan *chan,
2249 struct sk_buff_head *seg_queue,
2250 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002251{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002252 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002253 u16 sdu_len;
2254 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002255 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002256
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002257 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002258
Mat Martineau94122bb2012-05-02 09:42:02 -07002259 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2260 * so fragmented skbs are not used. The HCI layer's handling
2261 * of fragmented skbs is not compatible with ERTM's queueing.
2262 */
2263
2264 /* PDU size is derived from the HCI MTU */
2265 pdu_len = chan->conn->mtu;
2266
Mat Martineaua5495742012-10-23 15:24:21 -07002267 /* Constrain PDU size for BR/EDR connections */
2268 if (!chan->hs_hcon)
2269 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002270
2271 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002272 if (chan->fcs)
2273 pdu_len -= L2CAP_FCS_SIZE;
2274
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002275 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002276
2277 /* Remote device may have requested smaller PDUs */
2278 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2279
2280 if (len <= pdu_len) {
2281 sar = L2CAP_SAR_UNSEGMENTED;
2282 sdu_len = 0;
2283 pdu_len = len;
2284 } else {
2285 sar = L2CAP_SAR_START;
2286 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002287 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002288
2289 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002290 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002291
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002292 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002293 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002294 return PTR_ERR(skb);
2295 }
2296
Mat Martineau94122bb2012-05-02 09:42:02 -07002297 bt_cb(skb)->control.sar = sar;
2298 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002299
Mat Martineau94122bb2012-05-02 09:42:02 -07002300 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002301 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002302 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002303
2304 if (len <= pdu_len) {
2305 sar = L2CAP_SAR_END;
2306 pdu_len = len;
2307 } else {
2308 sar = L2CAP_SAR_CONTINUE;
2309 }
2310 }
2311
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002312 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002313}
2314
Johan Hedberg177f8f22013-05-31 17:54:51 +03002315static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2316 struct msghdr *msg,
2317 size_t len, u16 sdulen)
2318{
2319 struct l2cap_conn *conn = chan->conn;
2320 struct sk_buff *skb;
2321 int err, count, hlen;
2322 struct l2cap_hdr *lh;
2323
2324 BT_DBG("chan %p len %zu", chan, len);
2325
2326 if (!conn)
2327 return ERR_PTR(-ENOTCONN);
2328
2329 hlen = L2CAP_HDR_SIZE;
2330
2331 if (sdulen)
2332 hlen += L2CAP_SDULEN_SIZE;
2333
2334 count = min_t(unsigned int, (conn->mtu - hlen), len);
2335
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002336 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002337 msg->msg_flags & MSG_DONTWAIT);
2338 if (IS_ERR(skb))
2339 return skb;
2340
2341 /* Create L2CAP header */
2342 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2343 lh->cid = cpu_to_le16(chan->dcid);
2344 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2345
2346 if (sdulen)
2347 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2348
2349 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2350 if (unlikely(err < 0)) {
2351 kfree_skb(skb);
2352 return ERR_PTR(err);
2353 }
2354
2355 return skb;
2356}
2357
2358static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2359 struct sk_buff_head *seg_queue,
2360 struct msghdr *msg, size_t len)
2361{
2362 struct sk_buff *skb;
2363 size_t pdu_len;
2364 u16 sdu_len;
2365
2366 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2367
Johan Hedberg177f8f22013-05-31 17:54:51 +03002368 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002369 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002370
2371 while (len > 0) {
2372 if (len <= pdu_len)
2373 pdu_len = len;
2374
2375 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2376 if (IS_ERR(skb)) {
2377 __skb_queue_purge(seg_queue);
2378 return PTR_ERR(skb);
2379 }
2380
2381 __skb_queue_tail(seg_queue, skb);
2382
2383 len -= pdu_len;
2384
2385 if (sdu_len) {
2386 sdu_len = 0;
2387 pdu_len += L2CAP_SDULEN_SIZE;
2388 }
2389 }
2390
2391 return 0;
2392}
2393
Marcel Holtmann8d463212014-06-05 15:22:51 +02002394int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002395{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002396 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002397 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002398 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002399
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002400 if (!chan->conn)
2401 return -ENOTCONN;
2402
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002403 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002404 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002405 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002406 if (IS_ERR(skb))
2407 return PTR_ERR(skb);
2408
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002409 /* Channel lock is released before requesting new skb and then
2410 * reacquired thus we need to recheck channel state.
2411 */
2412 if (chan->state != BT_CONNECTED) {
2413 kfree_skb(skb);
2414 return -ENOTCONN;
2415 }
2416
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002417 l2cap_do_send(chan, skb);
2418 return len;
2419 }
2420
2421 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002422 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002423 /* Check outgoing MTU */
2424 if (len > chan->omtu)
2425 return -EMSGSIZE;
2426
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002427 if (!chan->tx_credits)
2428 return -EAGAIN;
2429
Johan Hedberg177f8f22013-05-31 17:54:51 +03002430 __skb_queue_head_init(&seg_queue);
2431
2432 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2433
2434 if (chan->state != BT_CONNECTED) {
2435 __skb_queue_purge(&seg_queue);
2436 err = -ENOTCONN;
2437 }
2438
2439 if (err)
2440 return err;
2441
2442 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2443
2444 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2445 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2446 chan->tx_credits--;
2447 }
2448
2449 if (!chan->tx_credits)
2450 chan->ops->suspend(chan);
2451
2452 err = len;
2453
2454 break;
2455
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002456 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002457 /* Check outgoing MTU */
2458 if (len > chan->omtu)
2459 return -EMSGSIZE;
2460
2461 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002462 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002463 if (IS_ERR(skb))
2464 return PTR_ERR(skb);
2465
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2468 */
2469 if (chan->state != BT_CONNECTED) {
2470 kfree_skb(skb);
2471 return -ENOTCONN;
2472 }
2473
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002474 l2cap_do_send(chan, skb);
2475 err = len;
2476 break;
2477
2478 case L2CAP_MODE_ERTM:
2479 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002480 /* Check outgoing MTU */
2481 if (len > chan->omtu) {
2482 err = -EMSGSIZE;
2483 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002484 }
2485
Mat Martineau94122bb2012-05-02 09:42:02 -07002486 __skb_queue_head_init(&seg_queue);
2487
2488 /* Do segmentation before calling in to the state machine,
2489 * since it's possible to block while waiting for memory
2490 * allocation.
2491 */
2492 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2493
2494 /* The channel could have been closed while segmenting,
2495 * check that it is still connected.
2496 */
2497 if (chan->state != BT_CONNECTED) {
2498 __skb_queue_purge(&seg_queue);
2499 err = -ENOTCONN;
2500 }
2501
2502 if (err)
2503 break;
2504
Mat Martineau37339372012-05-17 20:53:33 -07002505 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002506 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002507 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002508 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002509
Gustavo Padovand6603662012-05-21 13:58:22 -03002510 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002511
Mat Martineau94122bb2012-05-02 09:42:02 -07002512 /* If the skbs were not queued for sending, they'll still be in
2513 * seg_queue and need to be purged.
2514 */
2515 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002516 break;
2517
2518 default:
2519 BT_DBG("bad state %1.1x", chan->mode);
2520 err = -EBADFD;
2521 }
2522
2523 return err;
2524}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002525EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002526
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002527static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2528{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002529 struct l2cap_ctrl control;
2530 u16 seq;
2531
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002532 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002533
2534 memset(&control, 0, sizeof(control));
2535 control.sframe = 1;
2536 control.super = L2CAP_SUPER_SREJ;
2537
2538 for (seq = chan->expected_tx_seq; seq != txseq;
2539 seq = __next_seq(chan, seq)) {
2540 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2541 control.reqseq = seq;
2542 l2cap_send_sframe(chan, &control);
2543 l2cap_seq_list_append(&chan->srej_list, seq);
2544 }
2545 }
2546
2547 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002548}
2549
2550static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2551{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002552 struct l2cap_ctrl control;
2553
2554 BT_DBG("chan %p", chan);
2555
2556 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2557 return;
2558
2559 memset(&control, 0, sizeof(control));
2560 control.sframe = 1;
2561 control.super = L2CAP_SUPER_SREJ;
2562 control.reqseq = chan->srej_list.tail;
2563 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002564}
2565
2566static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2567{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002568 struct l2cap_ctrl control;
2569 u16 initial_head;
2570 u16 seq;
2571
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002572 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002573
2574 memset(&control, 0, sizeof(control));
2575 control.sframe = 1;
2576 control.super = L2CAP_SUPER_SREJ;
2577
2578 /* Capture initial list head to allow only one pass through the list. */
2579 initial_head = chan->srej_list.head;
2580
2581 do {
2582 seq = l2cap_seq_list_pop(&chan->srej_list);
2583 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2584 break;
2585
2586 control.reqseq = seq;
2587 l2cap_send_sframe(chan, &control);
2588 l2cap_seq_list_append(&chan->srej_list, seq);
2589 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002590}
2591
Mat Martineau608bcc62012-05-17 20:53:32 -07002592static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2593{
2594 struct sk_buff *acked_skb;
2595 u16 ackseq;
2596
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002597 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002598
2599 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2600 return;
2601
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002602 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002603 chan->expected_ack_seq, chan->unacked_frames);
2604
2605 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2606 ackseq = __next_seq(chan, ackseq)) {
2607
2608 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2609 if (acked_skb) {
2610 skb_unlink(acked_skb, &chan->tx_q);
2611 kfree_skb(acked_skb);
2612 chan->unacked_frames--;
2613 }
2614 }
2615
2616 chan->expected_ack_seq = reqseq;
2617
2618 if (chan->unacked_frames == 0)
2619 __clear_retrans_timer(chan);
2620
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002621 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002622}
2623
2624static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2625{
2626 BT_DBG("chan %p", chan);
2627
2628 chan->expected_tx_seq = chan->buffer_seq;
2629 l2cap_seq_list_clear(&chan->srej_list);
2630 skb_queue_purge(&chan->srej_q);
2631 chan->rx_state = L2CAP_RX_STATE_RECV;
2632}
2633
Gustavo Padovand6603662012-05-21 13:58:22 -03002634static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2635 struct l2cap_ctrl *control,
2636 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002637{
Mat Martineau608bcc62012-05-17 20:53:32 -07002638 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2639 event);
2640
2641 switch (event) {
2642 case L2CAP_EV_DATA_REQUEST:
2643 if (chan->tx_send_head == NULL)
2644 chan->tx_send_head = skb_peek(skbs);
2645
2646 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2647 l2cap_ertm_send(chan);
2648 break;
2649 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2650 BT_DBG("Enter LOCAL_BUSY");
2651 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2652
2653 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2654 /* The SREJ_SENT state must be aborted if we are to
2655 * enter the LOCAL_BUSY state.
2656 */
2657 l2cap_abort_rx_srej_sent(chan);
2658 }
2659
2660 l2cap_send_ack(chan);
2661
2662 break;
2663 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2664 BT_DBG("Exit LOCAL_BUSY");
2665 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2666
2667 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2668 struct l2cap_ctrl local_control;
2669
2670 memset(&local_control, 0, sizeof(local_control));
2671 local_control.sframe = 1;
2672 local_control.super = L2CAP_SUPER_RR;
2673 local_control.poll = 1;
2674 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002675 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002676
2677 chan->retry_count = 1;
2678 __set_monitor_timer(chan);
2679 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2680 }
2681 break;
2682 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2683 l2cap_process_reqseq(chan, control->reqseq);
2684 break;
2685 case L2CAP_EV_EXPLICIT_POLL:
2686 l2cap_send_rr_or_rnr(chan, 1);
2687 chan->retry_count = 1;
2688 __set_monitor_timer(chan);
2689 __clear_ack_timer(chan);
2690 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2691 break;
2692 case L2CAP_EV_RETRANS_TO:
2693 l2cap_send_rr_or_rnr(chan, 1);
2694 chan->retry_count = 1;
2695 __set_monitor_timer(chan);
2696 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2697 break;
2698 case L2CAP_EV_RECV_FBIT:
2699 /* Nothing to process */
2700 break;
2701 default:
2702 break;
2703 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002704}
2705
Gustavo Padovand6603662012-05-21 13:58:22 -03002706static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2707 struct l2cap_ctrl *control,
2708 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002709{
Mat Martineau608bcc62012-05-17 20:53:32 -07002710 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2711 event);
2712
2713 switch (event) {
2714 case L2CAP_EV_DATA_REQUEST:
2715 if (chan->tx_send_head == NULL)
2716 chan->tx_send_head = skb_peek(skbs);
2717 /* Queue data, but don't send. */
2718 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2719 break;
2720 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2721 BT_DBG("Enter LOCAL_BUSY");
2722 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723
2724 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2725 /* The SREJ_SENT state must be aborted if we are to
2726 * enter the LOCAL_BUSY state.
2727 */
2728 l2cap_abort_rx_srej_sent(chan);
2729 }
2730
2731 l2cap_send_ack(chan);
2732
2733 break;
2734 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2735 BT_DBG("Exit LOCAL_BUSY");
2736 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2737
2738 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2739 struct l2cap_ctrl local_control;
2740 memset(&local_control, 0, sizeof(local_control));
2741 local_control.sframe = 1;
2742 local_control.super = L2CAP_SUPER_RR;
2743 local_control.poll = 1;
2744 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002745 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002746
2747 chan->retry_count = 1;
2748 __set_monitor_timer(chan);
2749 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2750 }
2751 break;
2752 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2753 l2cap_process_reqseq(chan, control->reqseq);
2754
2755 /* Fall through */
2756
2757 case L2CAP_EV_RECV_FBIT:
2758 if (control && control->final) {
2759 __clear_monitor_timer(chan);
2760 if (chan->unacked_frames > 0)
2761 __set_retrans_timer(chan);
2762 chan->retry_count = 0;
2763 chan->tx_state = L2CAP_TX_STATE_XMIT;
2764 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2765 }
2766 break;
2767 case L2CAP_EV_EXPLICIT_POLL:
2768 /* Ignore */
2769 break;
2770 case L2CAP_EV_MONITOR_TO:
2771 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2772 l2cap_send_rr_or_rnr(chan, 1);
2773 __set_monitor_timer(chan);
2774 chan->retry_count++;
2775 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002776 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002777 }
2778 break;
2779 default:
2780 break;
2781 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002782}
2783
Gustavo Padovand6603662012-05-21 13:58:22 -03002784static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2785 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002786{
Mat Martineau608bcc62012-05-17 20:53:32 -07002787 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2788 chan, control, skbs, event, chan->tx_state);
2789
2790 switch (chan->tx_state) {
2791 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002792 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002793 break;
2794 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002795 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002796 break;
2797 default:
2798 /* Ignore event */
2799 break;
2800 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002801}
2802
Mat Martineau4b51dae92012-05-17 20:53:37 -07002803static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2804 struct l2cap_ctrl *control)
2805{
2806 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002807 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002808}
2809
Mat Martineauf80842a2012-05-17 20:53:46 -07002810static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2811 struct l2cap_ctrl *control)
2812{
2813 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002814 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002815}
2816
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817/* Copy frame to all raw sockets on that connection */
2818static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2819{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002821 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002822
2823 BT_DBG("conn %p", conn);
2824
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002825 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002826
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002827 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002828 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 continue;
2830
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002831 /* Don't send frame to the channel it came from */
2832 if (bt_cb(skb)->chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002834
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002835 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002836 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002838 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002839 kfree_skb(nskb);
2840 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002841
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002842 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843}
2844
2845/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002846static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2847 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002848{
2849 struct sk_buff *skb, **frag;
2850 struct l2cap_cmd_hdr *cmd;
2851 struct l2cap_hdr *lh;
2852 int len, count;
2853
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002854 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2855 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856
Anderson Lizardo300b9622013-06-02 16:30:40 -04002857 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2858 return NULL;
2859
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2861 count = min_t(unsigned int, conn->mtu, len);
2862
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002863 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 if (!skb)
2865 return NULL;
2866
2867 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002868 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002869
2870 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002871 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002872 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002873 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874
2875 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2876 cmd->code = code;
2877 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002878 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
2880 if (dlen) {
2881 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2882 memcpy(skb_put(skb, count), data, count);
2883 data += count;
2884 }
2885
2886 len -= skb->len;
2887
2888 /* Continuation fragments (no L2CAP header) */
2889 frag = &skb_shinfo(skb)->frag_list;
2890 while (len) {
2891 count = min_t(unsigned int, conn->mtu, len);
2892
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002893 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 if (!*frag)
2895 goto fail;
2896
2897 memcpy(skb_put(*frag, count), data, count);
2898
2899 len -= count;
2900 data += count;
2901
2902 frag = &(*frag)->next;
2903 }
2904
2905 return skb;
2906
2907fail:
2908 kfree_skb(skb);
2909 return NULL;
2910}
2911
Gustavo Padovan2d792812012-10-06 10:07:01 +01002912static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2913 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002914{
2915 struct l2cap_conf_opt *opt = *ptr;
2916 int len;
2917
2918 len = L2CAP_CONF_OPT_SIZE + opt->len;
2919 *ptr += len;
2920
2921 *type = opt->type;
2922 *olen = opt->len;
2923
2924 switch (opt->len) {
2925 case 1:
2926 *val = *((u8 *) opt->val);
2927 break;
2928
2929 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002930 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931 break;
2932
2933 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002934 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 break;
2936
2937 default:
2938 *val = (unsigned long) opt->val;
2939 break;
2940 }
2941
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002942 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943 return len;
2944}
2945
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2947{
2948 struct l2cap_conf_opt *opt = *ptr;
2949
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002950 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951
2952 opt->type = type;
2953 opt->len = len;
2954
2955 switch (len) {
2956 case 1:
2957 *((u8 *) opt->val) = val;
2958 break;
2959
2960 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002961 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002962 break;
2963
2964 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002965 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 break;
2967
2968 default:
2969 memcpy(opt->val, (void *) val, len);
2970 break;
2971 }
2972
2973 *ptr += L2CAP_CONF_OPT_SIZE + len;
2974}
2975
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002976static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2977{
2978 struct l2cap_conf_efs efs;
2979
Szymon Janc1ec918c2011-11-16 09:32:21 +01002980 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002981 case L2CAP_MODE_ERTM:
2982 efs.id = chan->local_id;
2983 efs.stype = chan->local_stype;
2984 efs.msdu = cpu_to_le16(chan->local_msdu);
2985 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002986 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2987 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002988 break;
2989
2990 case L2CAP_MODE_STREAMING:
2991 efs.id = 1;
2992 efs.stype = L2CAP_SERV_BESTEFFORT;
2993 efs.msdu = cpu_to_le16(chan->local_msdu);
2994 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2995 efs.acc_lat = 0;
2996 efs.flush_to = 0;
2997 break;
2998
2999 default:
3000 return;
3001 }
3002
3003 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003004 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003005}
3006
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003007static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003008{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003009 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003010 ack_timer.work);
3011 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003012
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003013 BT_DBG("chan %p", chan);
3014
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003015 l2cap_chan_lock(chan);
3016
Mat Martineau03625202012-05-17 20:53:51 -07003017 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3018 chan->last_acked_seq);
3019
3020 if (frames_to_ack)
3021 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003022
3023 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003024 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003025}
3026
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003027int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003028{
Mat Martineau3c588192012-04-11 10:48:42 -07003029 int err;
3030
Mat Martineau105bdf92012-04-27 16:50:48 -07003031 chan->next_tx_seq = 0;
3032 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003033 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003034 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003035 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003036 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003037 chan->last_acked_seq = 0;
3038 chan->sdu = NULL;
3039 chan->sdu_last_frag = NULL;
3040 chan->sdu_len = 0;
3041
Mat Martineaud34c34f2012-05-14 14:49:27 -07003042 skb_queue_head_init(&chan->tx_q);
3043
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003044 chan->local_amp_id = AMP_ID_BREDR;
3045 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003046 chan->move_state = L2CAP_MOVE_STABLE;
3047 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3048
Mat Martineau105bdf92012-04-27 16:50:48 -07003049 if (chan->mode != L2CAP_MODE_ERTM)
3050 return 0;
3051
3052 chan->rx_state = L2CAP_RX_STATE_RECV;
3053 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003054
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003055 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3056 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3057 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003058
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003059 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003060
Mat Martineau3c588192012-04-11 10:48:42 -07003061 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3062 if (err < 0)
3063 return err;
3064
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003065 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3066 if (err < 0)
3067 l2cap_seq_list_free(&chan->srej_list);
3068
3069 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003070}
3071
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003072static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3073{
3074 switch (mode) {
3075 case L2CAP_MODE_STREAMING:
3076 case L2CAP_MODE_ERTM:
3077 if (l2cap_mode_supported(mode, remote_feat_mask))
3078 return mode;
3079 /* fall through */
3080 default:
3081 return L2CAP_MODE_BASIC;
3082 }
3083}
3084
Marcel Holtmann848566b2013-10-01 22:59:22 -07003085static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003086{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003087 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003088}
3089
Marcel Holtmann848566b2013-10-01 22:59:22 -07003090static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003091{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003092 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003093}
3094
Mat Martineau36c86c82012-10-23 15:24:20 -07003095static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3096 struct l2cap_conf_rfc *rfc)
3097{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003098 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003099 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3100
3101 /* Class 1 devices have must have ERTM timeouts
3102 * exceeding the Link Supervision Timeout. The
3103 * default Link Supervision Timeout for AMP
3104 * controllers is 10 seconds.
3105 *
3106 * Class 1 devices use 0xffffffff for their
3107 * best-effort flush timeout, so the clamping logic
3108 * will result in a timeout that meets the above
3109 * requirement. ERTM timeouts are 16-bit values, so
3110 * the maximum timeout is 65.535 seconds.
3111 */
3112
3113 /* Convert timeout to milliseconds and round */
3114 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3115
3116 /* This is the recommended formula for class 2 devices
3117 * that start ERTM timers when packets are sent to the
3118 * controller.
3119 */
3120 ertm_to = 3 * ertm_to + 500;
3121
3122 if (ertm_to > 0xffff)
3123 ertm_to = 0xffff;
3124
3125 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3126 rfc->monitor_timeout = rfc->retrans_timeout;
3127 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003128 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3129 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003130 }
3131}
3132
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003133static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3134{
3135 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003136 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003137 /* use extended control field */
3138 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003139 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3140 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003141 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003142 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003143 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3144 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003145 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003146}
3147
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003148static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003150 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003151 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003153 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003155 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003157 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003158 goto done;
3159
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003160 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003161 case L2CAP_MODE_STREAMING:
3162 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003163 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003164 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003165
Marcel Holtmann848566b2013-10-01 22:59:22 -07003166 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003167 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3168
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003169 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003170 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003171 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003172 break;
3173 }
3174
3175done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003176 if (chan->imtu != L2CAP_DEFAULT_MTU)
3177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003178
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003179 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003180 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003181 if (disable_ertm)
3182 break;
3183
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003184 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003185 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003186 break;
3187
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003188 rfc.mode = L2CAP_MODE_BASIC;
3189 rfc.txwin_size = 0;
3190 rfc.max_transmit = 0;
3191 rfc.retrans_timeout = 0;
3192 rfc.monitor_timeout = 0;
3193 rfc.max_pdu_size = 0;
3194
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003195 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003196 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003197 break;
3198
3199 case L2CAP_MODE_ERTM:
3200 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003201 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003202
3203 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003204
3205 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003206 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3207 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003208 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003209
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003210 l2cap_txwin_setup(chan);
3211
3212 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003213 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003214
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003215 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003216 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003217
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003218 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3219 l2cap_add_opt_efs(&ptr, chan);
3220
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003221 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003223 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003224
3225 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3226 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003227 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003228 chan->fcs = L2CAP_FCS_NONE;
3229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3230 chan->fcs);
3231 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003232 break;
3233
3234 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003235 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003236 rfc.mode = L2CAP_MODE_STREAMING;
3237 rfc.txwin_size = 0;
3238 rfc.max_transmit = 0;
3239 rfc.retrans_timeout = 0;
3240 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003241
3242 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003243 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3244 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003245 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003246
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003248 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003249
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003250 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3251 l2cap_add_opt_efs(&ptr, chan);
3252
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003253 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3254 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003255 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003256 chan->fcs = L2CAP_FCS_NONE;
3257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3258 chan->fcs);
3259 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003260 break;
3261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003263 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003264 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
3266 return ptr - data;
3267}
3268
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003269static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003270{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003271 struct l2cap_conf_rsp *rsp = data;
3272 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003273 void *req = chan->conf_req;
3274 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003275 int type, hint, olen;
3276 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003277 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003278 struct l2cap_conf_efs efs;
3279 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003280 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003281 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003282 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003283
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003284 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003285
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003286 while (len >= L2CAP_CONF_OPT_SIZE) {
3287 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003288
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003289 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003290 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003291
3292 switch (type) {
3293 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003294 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003295 break;
3296
3297 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003298 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003299 break;
3300
3301 case L2CAP_CONF_QOS:
3302 break;
3303
Marcel Holtmann6464f352007-10-20 13:39:51 +02003304 case L2CAP_CONF_RFC:
3305 if (olen == sizeof(rfc))
3306 memcpy(&rfc, (void *) val, olen);
3307 break;
3308
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003309 case L2CAP_CONF_FCS:
3310 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003311 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003312 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003313
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003314 case L2CAP_CONF_EFS:
3315 remote_efs = 1;
3316 if (olen == sizeof(efs))
3317 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003318 break;
3319
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003320 case L2CAP_CONF_EWS:
Marcel Holtmann848566b2013-10-01 22:59:22 -07003321 if (!chan->conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003322 return -ECONNREFUSED;
3323
3324 set_bit(FLAG_EXT_CTRL, &chan->flags);
3325 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003326 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003327 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003328 break;
3329
3330 default:
3331 if (hint)
3332 break;
3333
3334 result = L2CAP_CONF_UNKNOWN;
3335 *((u8 *) ptr++) = type;
3336 break;
3337 }
3338 }
3339
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003340 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003341 goto done;
3342
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003343 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003344 case L2CAP_MODE_STREAMING:
3345 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003346 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003347 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003348 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003349 break;
3350 }
3351
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003352 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003353 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003354 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3355 else
3356 return -ECONNREFUSED;
3357 }
3358
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003359 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003360 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003361
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003362 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003363 }
3364
3365done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003366 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003367 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003368 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003369
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003370 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003371 return -ECONNREFUSED;
3372
Gustavo Padovan2d792812012-10-06 10:07:01 +01003373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3374 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003375 }
3376
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003377 if (result == L2CAP_CONF_SUCCESS) {
3378 /* Configure output options and let the other side know
3379 * which ones we don't like. */
3380
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003381 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3382 result = L2CAP_CONF_UNACCEPT;
3383 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003384 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003385 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003386 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003388
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003389 if (remote_efs) {
3390 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003391 efs.stype != L2CAP_SERV_NOTRAFIC &&
3392 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003393
3394 result = L2CAP_CONF_UNACCEPT;
3395
3396 if (chan->num_conf_req >= 1)
3397 return -ECONNREFUSED;
3398
3399 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003400 sizeof(efs),
3401 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003402 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003403 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003404 result = L2CAP_CONF_PENDING;
3405 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003406 }
3407 }
3408
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003409 switch (rfc.mode) {
3410 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003411 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003412 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003413 break;
3414
3415 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003416 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3417 chan->remote_tx_win = rfc.txwin_size;
3418 else
3419 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3420
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003421 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003422
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003423 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003424 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3425 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003426 rfc.max_pdu_size = cpu_to_le16(size);
3427 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003428
Mat Martineau36c86c82012-10-23 15:24:20 -07003429 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003430
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003431 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003432
3433 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003434 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003435
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003436 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3437 chan->remote_id = efs.id;
3438 chan->remote_stype = efs.stype;
3439 chan->remote_msdu = le16_to_cpu(efs.msdu);
3440 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003441 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003442 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003443 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003444 chan->remote_sdu_itime =
3445 le32_to_cpu(efs.sdu_itime);
3446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003447 sizeof(efs),
3448 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003449 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003450 break;
3451
3452 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003453 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003454 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3455 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003456 rfc.max_pdu_size = cpu_to_le16(size);
3457 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003458
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003459 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003460
Gustavo Padovan2d792812012-10-06 10:07:01 +01003461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3462 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003463
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003464 break;
3465
3466 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003467 result = L2CAP_CONF_UNACCEPT;
3468
3469 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003470 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003471 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003472
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003473 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003474 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003475 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003476 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003477 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003478 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003479
3480 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003481}
3482
Gustavo Padovan2d792812012-10-06 10:07:01 +01003483static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3484 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003486 struct l2cap_conf_req *req = data;
3487 void *ptr = req->data;
3488 int type, olen;
3489 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003490 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003491 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003492
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003493 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003494
3495 while (len >= L2CAP_CONF_OPT_SIZE) {
3496 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3497
3498 switch (type) {
3499 case L2CAP_CONF_MTU:
3500 if (val < L2CAP_DEFAULT_MIN_MTU) {
3501 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003502 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003503 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003504 chan->imtu = val;
3505 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003506 break;
3507
3508 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003509 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003511 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003512 break;
3513
3514 case L2CAP_CONF_RFC:
3515 if (olen == sizeof(rfc))
3516 memcpy(&rfc, (void *)val, olen);
3517
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003518 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003519 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003520 return -ECONNREFUSED;
3521
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003522 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003523
3524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003525 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003526 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003527
3528 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003529 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003531 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003532 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003533
3534 case L2CAP_CONF_EFS:
3535 if (olen == sizeof(efs))
3536 memcpy(&efs, (void *)val, olen);
3537
3538 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003539 efs.stype != L2CAP_SERV_NOTRAFIC &&
3540 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003541 return -ECONNREFUSED;
3542
Gustavo Padovan2d792812012-10-06 10:07:01 +01003543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3544 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003545 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003546
3547 case L2CAP_CONF_FCS:
3548 if (*result == L2CAP_CONF_PENDING)
3549 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003550 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003551 &chan->conf_state);
3552 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003553 }
3554 }
3555
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003556 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003557 return -ECONNREFUSED;
3558
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003559 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003560
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003561 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003562 switch (rfc.mode) {
3563 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003564 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3565 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3566 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003567 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3568 chan->ack_win = min_t(u16, chan->ack_win,
3569 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003570
3571 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3572 chan->local_msdu = le16_to_cpu(efs.msdu);
3573 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003574 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003575 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3576 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003577 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003578 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003579 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003580
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003581 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003582 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003583 }
3584 }
3585
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003586 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003587 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003588
3589 return ptr - data;
3590}
3591
Gustavo Padovan2d792812012-10-06 10:07:01 +01003592static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3593 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594{
3595 struct l2cap_conf_rsp *rsp = data;
3596 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003598 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003599
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003600 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003601 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003602 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
3604 return ptr - data;
3605}
3606
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003607void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3608{
3609 struct l2cap_le_conn_rsp rsp;
3610 struct l2cap_conn *conn = chan->conn;
3611
3612 BT_DBG("chan %p", chan);
3613
3614 rsp.dcid = cpu_to_le16(chan->scid);
3615 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003616 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003617 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003618 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003619
3620 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3621 &rsp);
3622}
3623
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003624void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003625{
3626 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003627 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003628 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003629 u8 rsp_code;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003630
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003631 rsp.scid = cpu_to_le16(chan->dcid);
3632 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003633 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3634 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003635
3636 if (chan->hs_hcon)
3637 rsp_code = L2CAP_CREATE_CHAN_RSP;
3638 else
3639 rsp_code = L2CAP_CONN_RSP;
3640
3641 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3642
3643 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003644
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003645 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003646 return;
3647
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003648 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003649 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003650 chan->num_conf_req++;
3651}
3652
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003653static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003654{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003655 int type, olen;
3656 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003657 /* Use sane default values in case a misbehaving remote device
3658 * did not send an RFC or extended window size option.
3659 */
3660 u16 txwin_ext = chan->ack_win;
3661 struct l2cap_conf_rfc rfc = {
3662 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003663 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3664 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003665 .max_pdu_size = cpu_to_le16(chan->imtu),
3666 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3667 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003668
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003669 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003670
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003671 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003672 return;
3673
3674 while (len >= L2CAP_CONF_OPT_SIZE) {
3675 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3676
Mat Martineauc20f8e32012-07-10 05:47:07 -07003677 switch (type) {
3678 case L2CAP_CONF_RFC:
3679 if (olen == sizeof(rfc))
3680 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003681 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003682 case L2CAP_CONF_EWS:
3683 txwin_ext = val;
3684 break;
3685 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003686 }
3687
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003688 switch (rfc.mode) {
3689 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003690 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3691 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003692 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3693 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3694 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3695 else
3696 chan->ack_win = min_t(u16, chan->ack_win,
3697 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003698 break;
3699 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003700 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003701 }
3702}
3703
Gustavo Padovan2d792812012-10-06 10:07:01 +01003704static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003705 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3706 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003707{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003708 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003709
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003710 if (cmd_len < sizeof(*rej))
3711 return -EPROTO;
3712
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003713 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003714 return 0;
3715
3716 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003717 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003718 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003719
3720 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003721 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003722
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003723 l2cap_conn_start(conn);
3724 }
3725
3726 return 0;
3727}
3728
Mat Martineau17009152012-10-23 15:24:07 -07003729static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3730 struct l2cap_cmd_hdr *cmd,
3731 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003732{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003733 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3734 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003735 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003736 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
3738 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003739 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003741 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003742
3743 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003744 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003745 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003746 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003747 result = L2CAP_CR_BAD_PSM;
3748 goto sendresp;
3749 }
3750
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003751 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003752 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003753
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003754 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003755 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003756 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003757 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003758 result = L2CAP_CR_SEC_BLOCK;
3759 goto response;
3760 }
3761
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762 result = L2CAP_CR_NO_MEM;
3763
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003764 /* Check if we already have channel with that dcid */
3765 if (__l2cap_get_chan_by_dcid(conn, scid))
3766 goto response;
3767
Gustavo Padovan80b98022012-05-27 22:27:51 -03003768 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003769 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003770 goto response;
3771
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003772 /* For certain devices (ex: HID mouse), support for authentication,
3773 * pairing and bonding is optional. For such devices, inorder to avoid
3774 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3775 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3776 */
3777 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3778
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003779 bacpy(&chan->src, &conn->hcon->src);
3780 bacpy(&chan->dst, &conn->hcon->dst);
Marcel Holtmann4f1654e2013-10-13 08:50:41 -07003781 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3782 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003783 chan->psm = psm;
3784 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003785 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003786
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003787 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003788
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003789 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003791 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003792
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003793 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
Marcel Holtmann984947d2009-02-06 23:35:19 +01003795 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003796 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003797 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003798 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003799 result = L2CAP_CR_PEND;
3800 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003801 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003802 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003803 /* Force pending result for AMP controllers.
3804 * The connection will succeed after the
3805 * physical link is up.
3806 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003807 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003808 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003809 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003810 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003811 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003812 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003813 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003814 status = L2CAP_CS_NO_INFO;
3815 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003816 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003817 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003818 result = L2CAP_CR_PEND;
3819 status = L2CAP_CS_AUTHEN_PEND;
3820 }
3821 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003822 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003823 result = L2CAP_CR_PEND;
3824 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825 }
3826
Linus Torvalds1da177e2005-04-16 15:20:36 -07003827response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003828 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003829 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003830 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831
3832sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003833 rsp.scid = cpu_to_le16(scid);
3834 rsp.dcid = cpu_to_le16(dcid);
3835 rsp.result = cpu_to_le16(result);
3836 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003837 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003838
3839 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3840 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003841 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003842
3843 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3844 conn->info_ident = l2cap_get_ident(conn);
3845
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003846 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003847
Gustavo Padovan2d792812012-10-06 10:07:01 +01003848 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3849 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003850 }
3851
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003852 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003853 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003854 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003855 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003856 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003857 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003858 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003859 }
Mat Martineau17009152012-10-23 15:24:07 -07003860
3861 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003862}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003863
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003864static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003865 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003866{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303867 struct hci_dev *hdev = conn->hcon->hdev;
3868 struct hci_conn *hcon = conn->hcon;
3869
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003870 if (cmd_len < sizeof(struct l2cap_conn_req))
3871 return -EPROTO;
3872
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303873 hci_dev_lock(hdev);
3874 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3875 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
Alfonso Acosta48ec92f2014-10-07 08:44:10 +00003876 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303877 hci_dev_unlock(hdev);
3878
Gustavo Padovan300229f2012-10-12 19:40:40 +08003879 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880 return 0;
3881}
3882
Mat Martineau5909cf32012-10-23 15:24:08 -07003883static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003884 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3885 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003886{
3887 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3888 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003889 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003891 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003892
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003893 if (cmd_len < sizeof(*rsp))
3894 return -EPROTO;
3895
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896 scid = __le16_to_cpu(rsp->scid);
3897 dcid = __le16_to_cpu(rsp->dcid);
3898 result = __le16_to_cpu(rsp->result);
3899 status = __le16_to_cpu(rsp->status);
3900
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003901 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003902 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003903
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003904 mutex_lock(&conn->chan_lock);
3905
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003907 chan = __l2cap_get_chan_by_scid(conn, scid);
3908 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003909 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003910 goto unlock;
3911 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003912 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003913 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3914 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003915 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003916 goto unlock;
3917 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003918 }
3919
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003920 err = 0;
3921
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003922 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003923
Linus Torvalds1da177e2005-04-16 15:20:36 -07003924 switch (result) {
3925 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03003926 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003927 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003928 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003929 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003930
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003931 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003932 break;
3933
Linus Torvalds1da177e2005-04-16 15:20:36 -07003934 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003935 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003936 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 break;
3938
3939 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003940 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 break;
3942
3943 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003944 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 break;
3946 }
3947
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003948 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003949
3950unlock:
3951 mutex_unlock(&conn->chan_lock);
3952
3953 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003954}
3955
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003956static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003957{
3958 /* FCS is enabled only in ERTM or streaming mode, if one or both
3959 * sides request it.
3960 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003961 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003962 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003963 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003964 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003965}
3966
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003967static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3968 u8 ident, u16 flags)
3969{
3970 struct l2cap_conn *conn = chan->conn;
3971
3972 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3973 flags);
3974
3975 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3976 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3977
3978 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3979 l2cap_build_conf_rsp(chan, data,
3980 L2CAP_CONF_SUCCESS, flags), data);
3981}
3982
Johan Hedberg662d6522013-10-16 11:20:47 +03003983static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3984 u16 scid, u16 dcid)
3985{
3986 struct l2cap_cmd_rej_cid rej;
3987
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003988 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03003989 rej.scid = __cpu_to_le16(scid);
3990 rej.dcid = __cpu_to_le16(dcid);
3991
3992 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3993}
3994
Gustavo Padovan2d792812012-10-06 10:07:01 +01003995static inline int l2cap_config_req(struct l2cap_conn *conn,
3996 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3997 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003998{
3999 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4000 u16 dcid, flags;
4001 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004002 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004003 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004005 if (cmd_len < sizeof(*req))
4006 return -EPROTO;
4007
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008 dcid = __le16_to_cpu(req->dcid);
4009 flags = __le16_to_cpu(req->flags);
4010
4011 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4012
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004013 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004014 if (!chan) {
4015 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4016 return 0;
4017 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004018
David S. Miller033b1142011-07-21 13:38:42 -07004019 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004020 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4021 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004022 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004023 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004024
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004025 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004026 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004027 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004028 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004029 l2cap_build_conf_rsp(chan, rsp,
4030 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004031 goto unlock;
4032 }
4033
4034 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004035 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4036 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004037
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004038 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004039 /* Incomplete config. Send empty response. */
4040 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004041 l2cap_build_conf_rsp(chan, rsp,
4042 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 goto unlock;
4044 }
4045
4046 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004047 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004048 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004049 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004050 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004051 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
Mat Martineau1500109b2012-10-23 15:24:15 -07004053 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004054 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004055 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004056
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004057 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004058 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004059
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004060 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004061 goto unlock;
4062
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004063 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004064 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004065
Mat Martineau105bdf92012-04-27 16:50:48 -07004066 if (chan->mode == L2CAP_MODE_ERTM ||
4067 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004068 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004069
Mat Martineau3c588192012-04-11 10:48:42 -07004070 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004071 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004072 else
4073 l2cap_chan_ready(chan);
4074
Marcel Holtmann876d9482007-10-20 13:35:42 +02004075 goto unlock;
4076 }
4077
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004078 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004079 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004080 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004081 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004082 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 }
4084
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004085 /* Got Conf Rsp PENDING from remote side and asume we sent
4086 Conf Rsp PENDING in the code above */
4087 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004088 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004089
4090 /* check compatibility */
4091
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004092 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004093 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004094 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4095 else
4096 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004097 }
4098
Linus Torvalds1da177e2005-04-16 15:20:36 -07004099unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004100 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004101 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102}
4103
Gustavo Padovan2d792812012-10-06 10:07:01 +01004104static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004105 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4106 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004107{
4108 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4109 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004110 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004111 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004112 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004113
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004114 if (cmd_len < sizeof(*rsp))
4115 return -EPROTO;
4116
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117 scid = __le16_to_cpu(rsp->scid);
4118 flags = __le16_to_cpu(rsp->flags);
4119 result = __le16_to_cpu(rsp->result);
4120
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004121 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4122 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004124 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004125 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126 return 0;
4127
4128 switch (result) {
4129 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004130 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004131 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 break;
4133
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004134 case L2CAP_CONF_PENDING:
4135 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4136
4137 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4138 char buf[64];
4139
4140 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004141 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004142 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004143 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004144 goto done;
4145 }
4146
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004147 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004148 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4149 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004150 } else {
4151 if (l2cap_check_efs(chan)) {
4152 amp_create_logical_link(chan);
4153 chan->ident = cmd->ident;
4154 }
4155 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004156 }
4157 goto done;
4158
Linus Torvalds1da177e2005-04-16 15:20:36 -07004159 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004160 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004161 char req[64];
4162
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004163 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004164 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004165 goto done;
4166 }
4167
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004168 /* throw out any old stored conf requests */
4169 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004170 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004171 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004172 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004173 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004174 goto done;
4175 }
4176
4177 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004178 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004179 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004180 if (result != L2CAP_CONF_SUCCESS)
4181 goto done;
4182 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183 }
4184
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004185 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004186 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004187
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004188 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004189 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190 goto done;
4191 }
4192
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004193 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 goto done;
4195
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004196 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004198 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004199 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004200
Mat Martineau105bdf92012-04-27 16:50:48 -07004201 if (chan->mode == L2CAP_MODE_ERTM ||
4202 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004203 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004204
Mat Martineau3c588192012-04-11 10:48:42 -07004205 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004206 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004207 else
4208 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004209 }
4210
4211done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004212 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004213 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214}
4215
Gustavo Padovan2d792812012-10-06 10:07:01 +01004216static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4218 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219{
4220 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4221 struct l2cap_disconn_rsp rsp;
4222 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004223 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004224
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004225 if (cmd_len != sizeof(*req))
4226 return -EPROTO;
4227
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228 scid = __le16_to_cpu(req->scid);
4229 dcid = __le16_to_cpu(req->dcid);
4230
4231 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4232
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004233 mutex_lock(&conn->chan_lock);
4234
4235 chan = __l2cap_get_chan_by_scid(conn, dcid);
4236 if (!chan) {
4237 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004238 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4239 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004240 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004241
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004242 l2cap_chan_lock(chan);
4243
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004244 rsp.dcid = cpu_to_le16(chan->scid);
4245 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004246 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4247
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004248 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Mat Martineau61d6ef32012-04-27 16:50:50 -07004250 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004251 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004252
4253 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004254
Gustavo Padovan80b98022012-05-27 22:27:51 -03004255 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004256 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004257
4258 mutex_unlock(&conn->chan_lock);
4259
Linus Torvalds1da177e2005-04-16 15:20:36 -07004260 return 0;
4261}
4262
Gustavo Padovan2d792812012-10-06 10:07:01 +01004263static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004264 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4265 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266{
4267 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4268 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004269 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004271 if (cmd_len != sizeof(*rsp))
4272 return -EPROTO;
4273
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274 scid = __le16_to_cpu(rsp->scid);
4275 dcid = __le16_to_cpu(rsp->dcid);
4276
4277 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4278
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004279 mutex_lock(&conn->chan_lock);
4280
4281 chan = __l2cap_get_chan_by_scid(conn, scid);
4282 if (!chan) {
4283 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004284 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004285 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004286
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004287 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004288
Mat Martineau61d6ef32012-04-27 16:50:50 -07004289 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004290 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004291
4292 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
Gustavo Padovan80b98022012-05-27 22:27:51 -03004294 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004295 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004296
4297 mutex_unlock(&conn->chan_lock);
4298
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 return 0;
4300}
4301
Gustavo Padovan2d792812012-10-06 10:07:01 +01004302static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004303 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4304 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004305{
4306 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307 u16 type;
4308
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004309 if (cmd_len != sizeof(*req))
4310 return -EPROTO;
4311
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312 type = __le16_to_cpu(req->type);
4313
4314 BT_DBG("type 0x%4.4x", type);
4315
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004316 if (type == L2CAP_IT_FEAT_MASK) {
4317 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004318 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004319 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004320 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4321 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004322 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004323 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004324 | L2CAP_FEAT_FCS;
Marcel Holtmann848566b2013-10-01 22:59:22 -07004325 if (conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004326 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004327 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004328
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004329 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004330 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4331 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004332 } else if (type == L2CAP_IT_FIXED_CHAN) {
4333 u8 buf[12];
4334 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004335
Marcel Holtmann848566b2013-10-01 22:59:22 -07004336 if (conn->hs_enabled)
Mat Martineau50a147c2011-11-02 16:18:34 -07004337 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4338 else
4339 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4340
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004341 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4342 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Andrei Emeltchenkoc6337ea2011-10-20 17:02:44 +03004343 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
Gustavo Padovan2d792812012-10-06 10:07:01 +01004344 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4345 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004346 } else {
4347 struct l2cap_info_rsp rsp;
4348 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004349 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004350 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4351 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004352 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004353
4354 return 0;
4355}
4356
Gustavo Padovan2d792812012-10-06 10:07:01 +01004357static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004358 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4359 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004360{
4361 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4362 u16 type, result;
4363
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304364 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004365 return -EPROTO;
4366
Linus Torvalds1da177e2005-04-16 15:20:36 -07004367 type = __le16_to_cpu(rsp->type);
4368 result = __le16_to_cpu(rsp->result);
4369
4370 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4371
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004372 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4373 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004374 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004375 return 0;
4376
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004377 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004378
Ville Tervoadb08ed2010-08-04 09:43:33 +03004379 if (result != L2CAP_IR_SUCCESS) {
4380 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4381 conn->info_ident = 0;
4382
4383 l2cap_conn_start(conn);
4384
4385 return 0;
4386 }
4387
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004388 switch (type) {
4389 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004390 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004391
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004392 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004393 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004394 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004395
4396 conn->info_ident = l2cap_get_ident(conn);
4397
4398 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004399 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004400 } else {
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004406 break;
4407
4408 case L2CAP_IT_FIXED_CHAN:
4409 conn->fixed_chan_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004411 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004412
4413 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004414 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004415 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004416
Linus Torvalds1da177e2005-04-16 15:20:36 -07004417 return 0;
4418}
4419
Mat Martineau17009152012-10-23 15:24:07 -07004420static int l2cap_create_channel_req(struct l2cap_conn *conn,
4421 struct l2cap_cmd_hdr *cmd,
4422 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004423{
4424 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004425 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004426 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004427 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004428 u16 psm, scid;
4429
4430 if (cmd_len != sizeof(*req))
4431 return -EPROTO;
4432
Marcel Holtmann848566b2013-10-01 22:59:22 -07004433 if (!conn->hs_enabled)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004434 return -EINVAL;
4435
4436 psm = le16_to_cpu(req->psm);
4437 scid = le16_to_cpu(req->scid);
4438
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004439 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004440
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004441 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004442 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004443 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4444 req->amp_id);
4445 return 0;
4446 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004447
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004448 /* Validate AMP controller id */
4449 hdev = hci_dev_get(req->amp_id);
4450 if (!hdev)
4451 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004452
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004453 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004454 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004455 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004456 }
4457
4458 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4459 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004460 if (chan) {
4461 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4462 struct hci_conn *hs_hcon;
4463
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004464 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4465 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004466 if (!hs_hcon) {
4467 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004468 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4469 chan->dcid);
4470 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004471 }
4472
4473 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4474
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004475 mgr->bredr_chan = chan;
4476 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004477 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004478 conn->mtu = hdev->block_mtu;
4479 }
4480
4481 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004482
4483 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004484
4485error:
4486 rsp.dcid = 0;
4487 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004488 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4489 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004490
4491 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4492 sizeof(rsp), &rsp);
4493
Johan Hedbergdc280802013-09-16 13:05:13 +03004494 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004495}
4496
Mat Martineau8eb200b2012-10-23 15:24:17 -07004497static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4498{
4499 struct l2cap_move_chan_req req;
4500 u8 ident;
4501
4502 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4503
4504 ident = l2cap_get_ident(chan->conn);
4505 chan->ident = ident;
4506
4507 req.icid = cpu_to_le16(chan->scid);
4508 req.dest_amp_id = dest_amp_id;
4509
4510 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4511 &req);
4512
4513 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4514}
4515
Mat Martineau1500109b2012-10-23 15:24:15 -07004516static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004517{
4518 struct l2cap_move_chan_rsp rsp;
4519
Mat Martineau1500109b2012-10-23 15:24:15 -07004520 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004521
Mat Martineau1500109b2012-10-23 15:24:15 -07004522 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004523 rsp.result = cpu_to_le16(result);
4524
Mat Martineau1500109b2012-10-23 15:24:15 -07004525 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4526 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004527}
4528
Mat Martineau5b155ef2012-10-23 15:24:14 -07004529static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004530{
4531 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004532
Mat Martineau5b155ef2012-10-23 15:24:14 -07004533 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004534
Mat Martineau5b155ef2012-10-23 15:24:14 -07004535 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004536
Mat Martineau5b155ef2012-10-23 15:24:14 -07004537 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004538 cfm.result = cpu_to_le16(result);
4539
Mat Martineau5b155ef2012-10-23 15:24:14 -07004540 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4541 sizeof(cfm), &cfm);
4542
4543 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4544}
4545
4546static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4547{
4548 struct l2cap_move_chan_cfm cfm;
4549
4550 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4551
4552 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004553 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004554
4555 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4556 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004557}
4558
4559static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004560 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004561{
4562 struct l2cap_move_chan_cfm_rsp rsp;
4563
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004564 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004565
4566 rsp.icid = cpu_to_le16(icid);
4567 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4568}
4569
Mat Martineau5f3847a2012-10-23 15:24:12 -07004570static void __release_logical_link(struct l2cap_chan *chan)
4571{
4572 chan->hs_hchan = NULL;
4573 chan->hs_hcon = NULL;
4574
4575 /* Placeholder - release the logical link */
4576}
4577
Mat Martineau1500109b2012-10-23 15:24:15 -07004578static void l2cap_logical_fail(struct l2cap_chan *chan)
4579{
4580 /* Logical link setup failed */
4581 if (chan->state != BT_CONNECTED) {
4582 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004583 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004584 return;
4585 }
4586
4587 switch (chan->move_role) {
4588 case L2CAP_MOVE_ROLE_RESPONDER:
4589 l2cap_move_done(chan);
4590 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4591 break;
4592 case L2CAP_MOVE_ROLE_INITIATOR:
4593 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4594 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4595 /* Remote has only sent pending or
4596 * success responses, clean up
4597 */
4598 l2cap_move_done(chan);
4599 }
4600
4601 /* Other amp move states imply that the move
4602 * has already aborted
4603 */
4604 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4605 break;
4606 }
4607}
4608
4609static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4610 struct hci_chan *hchan)
4611{
4612 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004613
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004614 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004615 chan->hs_hcon->l2cap_data = chan->conn;
4616
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004617 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004618
4619 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004620 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004621
4622 set_default_fcs(chan);
4623
4624 err = l2cap_ertm_init(chan);
4625 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004626 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004627 else
4628 l2cap_chan_ready(chan);
4629 }
4630}
4631
4632static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4633 struct hci_chan *hchan)
4634{
4635 chan->hs_hcon = hchan->conn;
4636 chan->hs_hcon->l2cap_data = chan->conn;
4637
4638 BT_DBG("move_state %d", chan->move_state);
4639
4640 switch (chan->move_state) {
4641 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4642 /* Move confirm will be sent after a success
4643 * response is received
4644 */
4645 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4646 break;
4647 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4649 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4650 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4651 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4652 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4655 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4656 }
4657 break;
4658 default:
4659 /* Move was not in expected state, free the channel */
4660 __release_logical_link(chan);
4661
4662 chan->move_state = L2CAP_MOVE_STABLE;
4663 }
4664}
4665
4666/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004667void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4668 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004669{
Mat Martineau1500109b2012-10-23 15:24:15 -07004670 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4671
4672 if (status) {
4673 l2cap_logical_fail(chan);
4674 __release_logical_link(chan);
4675 return;
4676 }
4677
4678 if (chan->state != BT_CONNECTED) {
4679 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004680 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004681 l2cap_logical_finish_create(chan, hchan);
4682 } else {
4683 l2cap_logical_finish_move(chan, hchan);
4684 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004685}
4686
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004687void l2cap_move_start(struct l2cap_chan *chan)
4688{
4689 BT_DBG("chan %p", chan);
4690
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004691 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004692 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4693 return;
4694 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4695 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4696 /* Placeholder - start physical link setup */
4697 } else {
4698 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4699 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4700 chan->move_id = 0;
4701 l2cap_move_setup(chan);
4702 l2cap_send_move_chan_req(chan, 0);
4703 }
4704}
4705
Mat Martineau8eb200b2012-10-23 15:24:17 -07004706static void l2cap_do_create(struct l2cap_chan *chan, int result,
4707 u8 local_amp_id, u8 remote_amp_id)
4708{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004709 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4710 local_amp_id, remote_amp_id);
4711
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004712 chan->fcs = L2CAP_FCS_NONE;
4713
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004714 /* Outgoing channel on AMP */
4715 if (chan->state == BT_CONNECT) {
4716 if (result == L2CAP_CR_SUCCESS) {
4717 chan->local_amp_id = local_amp_id;
4718 l2cap_send_create_chan_req(chan, remote_amp_id);
4719 } else {
4720 /* Revert to BR/EDR connect */
4721 l2cap_send_conn_req(chan);
4722 }
4723
4724 return;
4725 }
4726
4727 /* Incoming channel on AMP */
4728 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004729 struct l2cap_conn_rsp rsp;
4730 char buf[128];
4731 rsp.scid = cpu_to_le16(chan->dcid);
4732 rsp.dcid = cpu_to_le16(chan->scid);
4733
Mat Martineau8eb200b2012-10-23 15:24:17 -07004734 if (result == L2CAP_CR_SUCCESS) {
4735 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004736 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4737 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004738 } else {
4739 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004740 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4741 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004742 }
4743
4744 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4745 sizeof(rsp), &rsp);
4746
4747 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004748 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004749 set_bit(CONF_REQ_SENT, &chan->conf_state);
4750 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4751 L2CAP_CONF_REQ,
4752 l2cap_build_conf_req(chan, buf), buf);
4753 chan->num_conf_req++;
4754 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004755 }
4756}
4757
4758static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4759 u8 remote_amp_id)
4760{
4761 l2cap_move_setup(chan);
4762 chan->move_id = local_amp_id;
4763 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4764
4765 l2cap_send_move_chan_req(chan, remote_amp_id);
4766}
4767
4768static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4769{
4770 struct hci_chan *hchan = NULL;
4771
4772 /* Placeholder - get hci_chan for logical link */
4773
4774 if (hchan) {
4775 if (hchan->state == BT_CONNECTED) {
4776 /* Logical link is ready to go */
4777 chan->hs_hcon = hchan->conn;
4778 chan->hs_hcon->l2cap_data = chan->conn;
4779 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4780 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4781
4782 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4783 } else {
4784 /* Wait for logical link to be ready */
4785 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4786 }
4787 } else {
4788 /* Logical link not available */
4789 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4790 }
4791}
4792
4793static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4794{
4795 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4796 u8 rsp_result;
4797 if (result == -EINVAL)
4798 rsp_result = L2CAP_MR_BAD_ID;
4799 else
4800 rsp_result = L2CAP_MR_NOT_ALLOWED;
4801
4802 l2cap_send_move_chan_rsp(chan, rsp_result);
4803 }
4804
4805 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4806 chan->move_state = L2CAP_MOVE_STABLE;
4807
4808 /* Restart data transmission */
4809 l2cap_ertm_send(chan);
4810}
4811
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004812/* Invoke with locked chan */
4813void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004814{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004815 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004816 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004817
Mat Martineau8eb200b2012-10-23 15:24:17 -07004818 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4819 chan, result, local_amp_id, remote_amp_id);
4820
Mat Martineau8eb200b2012-10-23 15:24:17 -07004821 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4822 l2cap_chan_unlock(chan);
4823 return;
4824 }
4825
4826 if (chan->state != BT_CONNECTED) {
4827 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4828 } else if (result != L2CAP_MR_SUCCESS) {
4829 l2cap_do_move_cancel(chan, result);
4830 } else {
4831 switch (chan->move_role) {
4832 case L2CAP_MOVE_ROLE_INITIATOR:
4833 l2cap_do_move_initiate(chan, local_amp_id,
4834 remote_amp_id);
4835 break;
4836 case L2CAP_MOVE_ROLE_RESPONDER:
4837 l2cap_do_move_respond(chan, result);
4838 break;
4839 default:
4840 l2cap_do_move_cancel(chan, result);
4841 break;
4842 }
4843 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004844}
4845
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004846static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004847 struct l2cap_cmd_hdr *cmd,
4848 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004849{
4850 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004851 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004852 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004853 u16 icid = 0;
4854 u16 result = L2CAP_MR_NOT_ALLOWED;
4855
4856 if (cmd_len != sizeof(*req))
4857 return -EPROTO;
4858
4859 icid = le16_to_cpu(req->icid);
4860
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004861 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004862
Marcel Holtmann848566b2013-10-01 22:59:22 -07004863 if (!conn->hs_enabled)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004864 return -EINVAL;
4865
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004866 chan = l2cap_get_chan_by_dcid(conn, icid);
4867 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004868 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004869 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004870 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4871 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004872 return 0;
4873 }
4874
Mat Martineau1500109b2012-10-23 15:24:15 -07004875 chan->ident = cmd->ident;
4876
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004877 if (chan->scid < L2CAP_CID_DYN_START ||
4878 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4879 (chan->mode != L2CAP_MODE_ERTM &&
4880 chan->mode != L2CAP_MODE_STREAMING)) {
4881 result = L2CAP_MR_NOT_ALLOWED;
4882 goto send_move_response;
4883 }
4884
4885 if (chan->local_amp_id == req->dest_amp_id) {
4886 result = L2CAP_MR_SAME_ID;
4887 goto send_move_response;
4888 }
4889
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004890 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004891 struct hci_dev *hdev;
4892 hdev = hci_dev_get(req->dest_amp_id);
4893 if (!hdev || hdev->dev_type != HCI_AMP ||
4894 !test_bit(HCI_UP, &hdev->flags)) {
4895 if (hdev)
4896 hci_dev_put(hdev);
4897
4898 result = L2CAP_MR_BAD_ID;
4899 goto send_move_response;
4900 }
4901 hci_dev_put(hdev);
4902 }
4903
4904 /* Detect a move collision. Only send a collision response
4905 * if this side has "lost", otherwise proceed with the move.
4906 * The winner has the larger bd_addr.
4907 */
4908 if ((__chan_is_moving(chan) ||
4909 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004910 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004911 result = L2CAP_MR_COLLISION;
4912 goto send_move_response;
4913 }
4914
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004915 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4916 l2cap_move_setup(chan);
4917 chan->move_id = req->dest_amp_id;
4918 icid = chan->dcid;
4919
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004920 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004921 /* Moving to BR/EDR */
4922 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4923 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4924 result = L2CAP_MR_PEND;
4925 } else {
4926 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4927 result = L2CAP_MR_SUCCESS;
4928 }
4929 } else {
4930 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4931 /* Placeholder - uncomment when amp functions are available */
4932 /*amp_accept_physical(chan, req->dest_amp_id);*/
4933 result = L2CAP_MR_PEND;
4934 }
4935
4936send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004937 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004938
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004939 l2cap_chan_unlock(chan);
4940
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004941 return 0;
4942}
4943
Mat Martineau5b155ef2012-10-23 15:24:14 -07004944static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4945{
4946 struct l2cap_chan *chan;
4947 struct hci_chan *hchan = NULL;
4948
4949 chan = l2cap_get_chan_by_scid(conn, icid);
4950 if (!chan) {
4951 l2cap_send_move_chan_cfm_icid(conn, icid);
4952 return;
4953 }
4954
4955 __clear_chan_timer(chan);
4956 if (result == L2CAP_MR_PEND)
4957 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4958
4959 switch (chan->move_state) {
4960 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4961 /* Move confirm will be sent when logical link
4962 * is complete.
4963 */
4964 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4965 break;
4966 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4967 if (result == L2CAP_MR_PEND) {
4968 break;
4969 } else if (test_bit(CONN_LOCAL_BUSY,
4970 &chan->conn_state)) {
4971 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4972 } else {
4973 /* Logical link is up or moving to BR/EDR,
4974 * proceed with move
4975 */
4976 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4977 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4978 }
4979 break;
4980 case L2CAP_MOVE_WAIT_RSP:
4981 /* Moving to AMP */
4982 if (result == L2CAP_MR_SUCCESS) {
4983 /* Remote is ready, send confirm immediately
4984 * after logical link is ready
4985 */
4986 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4987 } else {
4988 /* Both logical link and move success
4989 * are required to confirm
4990 */
4991 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4992 }
4993
4994 /* Placeholder - get hci_chan for logical link */
4995 if (!hchan) {
4996 /* Logical link not available */
4997 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4998 break;
4999 }
5000
5001 /* If the logical link is not yet connected, do not
5002 * send confirmation.
5003 */
5004 if (hchan->state != BT_CONNECTED)
5005 break;
5006
5007 /* Logical link is already ready to go */
5008
5009 chan->hs_hcon = hchan->conn;
5010 chan->hs_hcon->l2cap_data = chan->conn;
5011
5012 if (result == L2CAP_MR_SUCCESS) {
5013 /* Can confirm now */
5014 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5015 } else {
5016 /* Now only need move success
5017 * to confirm
5018 */
5019 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5020 }
5021
5022 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5023 break;
5024 default:
5025 /* Any other amp move state means the move failed. */
5026 chan->move_id = chan->local_amp_id;
5027 l2cap_move_done(chan);
5028 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5029 }
5030
5031 l2cap_chan_unlock(chan);
5032}
5033
5034static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5035 u16 result)
5036{
5037 struct l2cap_chan *chan;
5038
5039 chan = l2cap_get_chan_by_ident(conn, ident);
5040 if (!chan) {
5041 /* Could not locate channel, icid is best guess */
5042 l2cap_send_move_chan_cfm_icid(conn, icid);
5043 return;
5044 }
5045
5046 __clear_chan_timer(chan);
5047
5048 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5049 if (result == L2CAP_MR_COLLISION) {
5050 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5051 } else {
5052 /* Cleanup - cancel move */
5053 chan->move_id = chan->local_amp_id;
5054 l2cap_move_done(chan);
5055 }
5056 }
5057
5058 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5059
5060 l2cap_chan_unlock(chan);
5061}
5062
5063static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5064 struct l2cap_cmd_hdr *cmd,
5065 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005066{
5067 struct l2cap_move_chan_rsp *rsp = data;
5068 u16 icid, result;
5069
5070 if (cmd_len != sizeof(*rsp))
5071 return -EPROTO;
5072
5073 icid = le16_to_cpu(rsp->icid);
5074 result = le16_to_cpu(rsp->result);
5075
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005076 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005077
Mat Martineau5b155ef2012-10-23 15:24:14 -07005078 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5079 l2cap_move_continue(conn, icid, result);
5080 else
5081 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005082
5083 return 0;
5084}
5085
Mat Martineau5f3847a2012-10-23 15:24:12 -07005086static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5087 struct l2cap_cmd_hdr *cmd,
5088 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005089{
5090 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005091 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005092 u16 icid, result;
5093
5094 if (cmd_len != sizeof(*cfm))
5095 return -EPROTO;
5096
5097 icid = le16_to_cpu(cfm->icid);
5098 result = le16_to_cpu(cfm->result);
5099
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005100 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005101
Mat Martineau5f3847a2012-10-23 15:24:12 -07005102 chan = l2cap_get_chan_by_dcid(conn, icid);
5103 if (!chan) {
5104 /* Spec requires a response even if the icid was not found */
5105 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5106 return 0;
5107 }
5108
5109 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5110 if (result == L2CAP_MC_CONFIRMED) {
5111 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005112 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005113 __release_logical_link(chan);
5114 } else {
5115 chan->move_id = chan->local_amp_id;
5116 }
5117
5118 l2cap_move_done(chan);
5119 }
5120
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005121 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5122
Mat Martineau5f3847a2012-10-23 15:24:12 -07005123 l2cap_chan_unlock(chan);
5124
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005125 return 0;
5126}
5127
5128static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005129 struct l2cap_cmd_hdr *cmd,
5130 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005131{
5132 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005133 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005134 u16 icid;
5135
5136 if (cmd_len != sizeof(*rsp))
5137 return -EPROTO;
5138
5139 icid = le16_to_cpu(rsp->icid);
5140
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005141 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005142
Mat Martineau3fd71a02012-10-23 15:24:16 -07005143 chan = l2cap_get_chan_by_scid(conn, icid);
5144 if (!chan)
5145 return 0;
5146
5147 __clear_chan_timer(chan);
5148
5149 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5150 chan->local_amp_id = chan->move_id;
5151
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005152 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005153 __release_logical_link(chan);
5154
5155 l2cap_move_done(chan);
5156 }
5157
5158 l2cap_chan_unlock(chan);
5159
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005160 return 0;
5161}
5162
Claudio Takahaside731152011-02-11 19:28:55 -02005163static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005164 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005165 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005166{
5167 struct hci_conn *hcon = conn->hcon;
5168 struct l2cap_conn_param_update_req *req;
5169 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005170 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005171 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005172
Johan Hedberg40bef302014-07-16 11:42:27 +03005173 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005174 return -EINVAL;
5175
Claudio Takahaside731152011-02-11 19:28:55 -02005176 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5177 return -EPROTO;
5178
5179 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005180 min = __le16_to_cpu(req->min);
5181 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005182 latency = __le16_to_cpu(req->latency);
5183 to_multiplier = __le16_to_cpu(req->to_multiplier);
5184
5185 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005186 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005187
5188 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005189
Andre Guedesd4905f22014-06-25 21:52:52 -03005190 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005191 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005192 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005193 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005194 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005195
5196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005197 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005198
Andre Guedesffb5a8272014-07-01 18:10:11 -03005199 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005200 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005201
Johan Hedbergf4869e22014-07-02 17:37:32 +03005202 store_hint = hci_le_conn_update(hcon, min, max, latency,
5203 to_multiplier);
5204 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5205 store_hint, min, max, latency,
5206 to_multiplier);
5207
Andre Guedesffb5a8272014-07-01 18:10:11 -03005208 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005209
Claudio Takahaside731152011-02-11 19:28:55 -02005210 return 0;
5211}
5212
Johan Hedbergf1496de2013-05-13 14:15:56 +03005213static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5214 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5215 u8 *data)
5216{
5217 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5218 u16 dcid, mtu, mps, credits, result;
5219 struct l2cap_chan *chan;
5220 int err;
5221
5222 if (cmd_len < sizeof(*rsp))
5223 return -EPROTO;
5224
5225 dcid = __le16_to_cpu(rsp->dcid);
5226 mtu = __le16_to_cpu(rsp->mtu);
5227 mps = __le16_to_cpu(rsp->mps);
5228 credits = __le16_to_cpu(rsp->credits);
5229 result = __le16_to_cpu(rsp->result);
5230
5231 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5232 return -EPROTO;
5233
5234 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5235 dcid, mtu, mps, credits, result);
5236
5237 mutex_lock(&conn->chan_lock);
5238
5239 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5240 if (!chan) {
5241 err = -EBADSLT;
5242 goto unlock;
5243 }
5244
5245 err = 0;
5246
5247 l2cap_chan_lock(chan);
5248
5249 switch (result) {
5250 case L2CAP_CR_SUCCESS:
5251 chan->ident = 0;
5252 chan->dcid = dcid;
5253 chan->omtu = mtu;
5254 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005255 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005256 l2cap_chan_ready(chan);
5257 break;
5258
5259 default:
5260 l2cap_chan_del(chan, ECONNREFUSED);
5261 break;
5262 }
5263
5264 l2cap_chan_unlock(chan);
5265
5266unlock:
5267 mutex_unlock(&conn->chan_lock);
5268
5269 return err;
5270}
5271
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005272static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005273 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5274 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005275{
5276 int err = 0;
5277
5278 switch (cmd->code) {
5279 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005280 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005281 break;
5282
5283 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005284 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005285 break;
5286
5287 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005288 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005289 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005290 break;
5291
5292 case L2CAP_CONF_REQ:
5293 err = l2cap_config_req(conn, cmd, cmd_len, data);
5294 break;
5295
5296 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005297 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005298 break;
5299
5300 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005301 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005302 break;
5303
5304 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005305 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005306 break;
5307
5308 case L2CAP_ECHO_REQ:
5309 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5310 break;
5311
5312 case L2CAP_ECHO_RSP:
5313 break;
5314
5315 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005316 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005317 break;
5318
5319 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005320 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005321 break;
5322
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005323 case L2CAP_CREATE_CHAN_REQ:
5324 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5325 break;
5326
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005327 case L2CAP_MOVE_CHAN_REQ:
5328 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5329 break;
5330
5331 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005332 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005333 break;
5334
5335 case L2CAP_MOVE_CHAN_CFM:
5336 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5337 break;
5338
5339 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005340 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005341 break;
5342
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005343 default:
5344 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5345 err = -EINVAL;
5346 break;
5347 }
5348
5349 return err;
5350}
5351
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005352static int l2cap_le_connect_req(struct l2cap_conn *conn,
5353 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5354 u8 *data)
5355{
5356 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5357 struct l2cap_le_conn_rsp rsp;
5358 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005359 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005360 __le16 psm;
5361 u8 result;
5362
5363 if (cmd_len != sizeof(*req))
5364 return -EPROTO;
5365
5366 scid = __le16_to_cpu(req->scid);
5367 mtu = __le16_to_cpu(req->mtu);
5368 mps = __le16_to_cpu(req->mps);
5369 psm = req->psm;
5370 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005371 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005372
5373 if (mtu < 23 || mps < 23)
5374 return -EPROTO;
5375
5376 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5377 scid, mtu, mps);
5378
5379 /* Check if we have socket listening on psm */
5380 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5381 &conn->hcon->dst, LE_LINK);
5382 if (!pchan) {
5383 result = L2CAP_CR_BAD_PSM;
5384 chan = NULL;
5385 goto response;
5386 }
5387
5388 mutex_lock(&conn->chan_lock);
5389 l2cap_chan_lock(pchan);
5390
5391 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5392 result = L2CAP_CR_AUTHENTICATION;
5393 chan = NULL;
5394 goto response_unlock;
5395 }
5396
5397 /* Check if we already have channel with that dcid */
5398 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5399 result = L2CAP_CR_NO_MEM;
5400 chan = NULL;
5401 goto response_unlock;
5402 }
5403
5404 chan = pchan->ops->new_connection(pchan);
5405 if (!chan) {
5406 result = L2CAP_CR_NO_MEM;
5407 goto response_unlock;
5408 }
5409
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005410 l2cap_le_flowctl_init(chan);
5411
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005412 bacpy(&chan->src, &conn->hcon->src);
5413 bacpy(&chan->dst, &conn->hcon->dst);
5414 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5415 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5416 chan->psm = psm;
5417 chan->dcid = scid;
5418 chan->omtu = mtu;
5419 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005420 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005421
5422 __l2cap_chan_add(conn, chan);
5423 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005424 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005425
5426 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5427
5428 chan->ident = cmd->ident;
5429
5430 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5431 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005432 /* The following result value is actually not defined
5433 * for LE CoC but we use it to let the function know
5434 * that it should bail out after doing its cleanup
5435 * instead of sending a response.
5436 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005437 result = L2CAP_CR_PEND;
5438 chan->ops->defer(chan);
5439 } else {
5440 l2cap_chan_ready(chan);
5441 result = L2CAP_CR_SUCCESS;
5442 }
5443
5444response_unlock:
5445 l2cap_chan_unlock(pchan);
5446 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005447 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005448
5449 if (result == L2CAP_CR_PEND)
5450 return 0;
5451
5452response:
5453 if (chan) {
5454 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005455 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005456 } else {
5457 rsp.mtu = 0;
5458 rsp.mps = 0;
5459 }
5460
5461 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005462 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005463 rsp.result = cpu_to_le16(result);
5464
5465 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5466
5467 return 0;
5468}
5469
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005470static inline int l2cap_le_credits(struct l2cap_conn *conn,
5471 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5472 u8 *data)
5473{
5474 struct l2cap_le_credits *pkt;
5475 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005476 u16 cid, credits, max_credits;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005477
5478 if (cmd_len != sizeof(*pkt))
5479 return -EPROTO;
5480
5481 pkt = (struct l2cap_le_credits *) data;
5482 cid = __le16_to_cpu(pkt->cid);
5483 credits = __le16_to_cpu(pkt->credits);
5484
5485 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5486
5487 chan = l2cap_get_chan_by_dcid(conn, cid);
5488 if (!chan)
5489 return -EBADSLT;
5490
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005491 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5492 if (credits > max_credits) {
5493 BT_ERR("LE credits overflow");
5494 l2cap_send_disconn_req(chan, ECONNRESET);
Martin Townsendee930532014-10-13 19:24:45 +01005495 l2cap_chan_unlock(chan);
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005496
5497 /* Return 0 so that we don't trigger an unnecessary
5498 * command reject packet.
5499 */
5500 return 0;
5501 }
5502
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005503 chan->tx_credits += credits;
5504
5505 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5506 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5507 chan->tx_credits--;
5508 }
5509
5510 if (chan->tx_credits)
5511 chan->ops->resume(chan);
5512
5513 l2cap_chan_unlock(chan);
5514
5515 return 0;
5516}
5517
Johan Hedberg71fb4192013-12-10 10:52:48 +02005518static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5519 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5520 u8 *data)
5521{
5522 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5523 struct l2cap_chan *chan;
5524
5525 if (cmd_len < sizeof(*rej))
5526 return -EPROTO;
5527
5528 mutex_lock(&conn->chan_lock);
5529
5530 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5531 if (!chan)
5532 goto done;
5533
5534 l2cap_chan_lock(chan);
5535 l2cap_chan_del(chan, ECONNREFUSED);
5536 l2cap_chan_unlock(chan);
5537
5538done:
5539 mutex_unlock(&conn->chan_lock);
5540 return 0;
5541}
5542
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005543static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005544 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5545 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005546{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005547 int err = 0;
5548
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005549 switch (cmd->code) {
5550 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005551 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005552 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005553
5554 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005555 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5556 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005557
5558 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005559 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005560
Johan Hedbergf1496de2013-05-13 14:15:56 +03005561 case L2CAP_LE_CONN_RSP:
5562 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005563 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005564
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005565 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005566 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5567 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005568
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005569 case L2CAP_LE_CREDITS:
5570 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5571 break;
5572
Johan Hedberg3defe012013-05-15 10:16:06 +03005573 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005574 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5575 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005576
5577 case L2CAP_DISCONN_RSP:
5578 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005579 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005580
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005581 default:
5582 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005583 err = -EINVAL;
5584 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005585 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005586
5587 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005588}
5589
Johan Hedbergc5623552013-04-29 19:35:33 +03005590static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5591 struct sk_buff *skb)
5592{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005593 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005594 struct l2cap_cmd_hdr *cmd;
5595 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005596 int err;
5597
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005598 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005599 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005600
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005601 if (skb->len < L2CAP_CMD_HDR_SIZE)
5602 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005603
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005604 cmd = (void *) skb->data;
5605 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005606
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005607 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005608
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005609 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005610
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005611 if (len != skb->len || !cmd->ident) {
5612 BT_DBG("corrupted command");
5613 goto drop;
5614 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005615
Johan Hedberg203e6392013-05-15 10:07:15 +03005616 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005617 if (err) {
5618 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005619
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005620 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005621
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005622 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005623 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5624 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005625 }
5626
Marcel Holtmann3b166292013-10-02 08:28:21 -07005627drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005628 kfree_skb(skb);
5629}
5630
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005631static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005632 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005633{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005634 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635 u8 *data = skb->data;
5636 int len = skb->len;
5637 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005638 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005639
5640 l2cap_raw_recv(conn, skb);
5641
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005642 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005643 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005644
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005646 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5648 data += L2CAP_CMD_HDR_SIZE;
5649 len -= L2CAP_CMD_HDR_SIZE;
5650
Al Viro88219a02007-07-29 00:17:25 -07005651 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005652
Gustavo Padovan2d792812012-10-06 10:07:01 +01005653 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5654 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655
Al Viro88219a02007-07-29 00:17:25 -07005656 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657 BT_DBG("corrupted command");
5658 break;
5659 }
5660
Johan Hedbergc5623552013-04-29 19:35:33 +03005661 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005662 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005663 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005664
5665 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005667 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005668 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5669 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670 }
5671
Al Viro88219a02007-07-29 00:17:25 -07005672 data += cmd_len;
5673 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005674 }
5675
Marcel Holtmann3b166292013-10-02 08:28:21 -07005676drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677 kfree_skb(skb);
5678}
5679
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005680static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005681{
5682 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005683 int hdr_size;
5684
5685 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5686 hdr_size = L2CAP_EXT_HDR_SIZE;
5687 else
5688 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005689
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005690 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005691 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005692 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5693 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5694
5695 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005696 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005697 }
5698 return 0;
5699}
5700
Mat Martineau6ea00482012-05-17 20:53:52 -07005701static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005702{
Mat Martineaue31f7632012-05-17 20:53:41 -07005703 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005704
Mat Martineaue31f7632012-05-17 20:53:41 -07005705 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005706
Mat Martineaue31f7632012-05-17 20:53:41 -07005707 memset(&control, 0, sizeof(control));
5708 control.sframe = 1;
5709 control.final = 1;
5710 control.reqseq = chan->buffer_seq;
5711 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005712
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005713 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005714 control.super = L2CAP_SUPER_RNR;
5715 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005716 }
5717
Mat Martineaue31f7632012-05-17 20:53:41 -07005718 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5719 chan->unacked_frames > 0)
5720 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005721
Mat Martineaue31f7632012-05-17 20:53:41 -07005722 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005723 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005724
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005725 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005726 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5727 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5728 * send it now.
5729 */
5730 control.super = L2CAP_SUPER_RR;
5731 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005732 }
5733}
5734
Gustavo Padovan2d792812012-10-06 10:07:01 +01005735static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5736 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005737{
Mat Martineau84084a32011-07-22 14:54:00 -07005738 /* skb->len reflects data in skb as well as all fragments
5739 * skb->data_len reflects only data in fragments
5740 */
5741 if (!skb_has_frag_list(skb))
5742 skb_shinfo(skb)->frag_list = new_frag;
5743
5744 new_frag->next = NULL;
5745
5746 (*last_frag)->next = new_frag;
5747 *last_frag = new_frag;
5748
5749 skb->len += new_frag->len;
5750 skb->data_len += new_frag->len;
5751 skb->truesize += new_frag->truesize;
5752}
5753
Mat Martineau4b51dae92012-05-17 20:53:37 -07005754static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5755 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005756{
5757 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005758
Mat Martineau4b51dae92012-05-17 20:53:37 -07005759 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005760 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005761 if (chan->sdu)
5762 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005763
Gustavo Padovan80b98022012-05-27 22:27:51 -03005764 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005765 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005766
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005767 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005768 if (chan->sdu)
5769 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005770
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005771 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005772 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005773
Mat Martineau84084a32011-07-22 14:54:00 -07005774 if (chan->sdu_len > chan->imtu) {
5775 err = -EMSGSIZE;
5776 break;
5777 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005778
Mat Martineau84084a32011-07-22 14:54:00 -07005779 if (skb->len >= chan->sdu_len)
5780 break;
5781
5782 chan->sdu = skb;
5783 chan->sdu_last_frag = skb;
5784
5785 skb = NULL;
5786 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005787 break;
5788
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005789 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005790 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005791 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005792
Mat Martineau84084a32011-07-22 14:54:00 -07005793 append_skb_frag(chan->sdu, skb,
5794 &chan->sdu_last_frag);
5795 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005796
Mat Martineau84084a32011-07-22 14:54:00 -07005797 if (chan->sdu->len >= chan->sdu_len)
5798 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005799
Mat Martineau84084a32011-07-22 14:54:00 -07005800 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005801 break;
5802
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005803 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005804 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005805 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005806
Mat Martineau84084a32011-07-22 14:54:00 -07005807 append_skb_frag(chan->sdu, skb,
5808 &chan->sdu_last_frag);
5809 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005810
Mat Martineau84084a32011-07-22 14:54:00 -07005811 if (chan->sdu->len != chan->sdu_len)
5812 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005813
Gustavo Padovan80b98022012-05-27 22:27:51 -03005814 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005815
Mat Martineau84084a32011-07-22 14:54:00 -07005816 if (!err) {
5817 /* Reassembly complete */
5818 chan->sdu = NULL;
5819 chan->sdu_last_frag = NULL;
5820 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005821 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005822 break;
5823 }
5824
Mat Martineau84084a32011-07-22 14:54:00 -07005825 if (err) {
5826 kfree_skb(skb);
5827 kfree_skb(chan->sdu);
5828 chan->sdu = NULL;
5829 chan->sdu_last_frag = NULL;
5830 chan->sdu_len = 0;
5831 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005832
Mat Martineau84084a32011-07-22 14:54:00 -07005833 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005834}
5835
Mat Martineau32b32732012-10-23 15:24:11 -07005836static int l2cap_resegment(struct l2cap_chan *chan)
5837{
5838 /* Placeholder */
5839 return 0;
5840}
5841
Mat Martineaue3281402011-07-07 09:39:02 -07005842void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132eb2010-06-21 19:39:50 -03005843{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005844 u8 event;
5845
5846 if (chan->mode != L2CAP_MODE_ERTM)
5847 return;
5848
5849 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005850 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005851}
5852
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005853static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5854{
Mat Martineau63838722012-05-17 20:53:45 -07005855 int err = 0;
5856 /* Pass sequential frames to l2cap_reassemble_sdu()
5857 * until a gap is encountered.
5858 */
5859
5860 BT_DBG("chan %p", chan);
5861
5862 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5863 struct sk_buff *skb;
5864 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5865 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5866
5867 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5868
5869 if (!skb)
5870 break;
5871
5872 skb_unlink(skb, &chan->srej_q);
5873 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5874 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5875 if (err)
5876 break;
5877 }
5878
5879 if (skb_queue_empty(&chan->srej_q)) {
5880 chan->rx_state = L2CAP_RX_STATE_RECV;
5881 l2cap_send_ack(chan);
5882 }
5883
5884 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005885}
5886
5887static void l2cap_handle_srej(struct l2cap_chan *chan,
5888 struct l2cap_ctrl *control)
5889{
Mat Martineauf80842a2012-05-17 20:53:46 -07005890 struct sk_buff *skb;
5891
5892 BT_DBG("chan %p, control %p", chan, control);
5893
5894 if (control->reqseq == chan->next_tx_seq) {
5895 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005896 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005897 return;
5898 }
5899
5900 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5901
5902 if (skb == NULL) {
5903 BT_DBG("Seq %d not available for retransmission",
5904 control->reqseq);
5905 return;
5906 }
5907
5908 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5909 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005910 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005911 return;
5912 }
5913
5914 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5915
5916 if (control->poll) {
5917 l2cap_pass_to_tx(chan, control);
5918
5919 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5920 l2cap_retransmit(chan, control);
5921 l2cap_ertm_send(chan);
5922
5923 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5924 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5925 chan->srej_save_reqseq = control->reqseq;
5926 }
5927 } else {
5928 l2cap_pass_to_tx_fbit(chan, control);
5929
5930 if (control->final) {
5931 if (chan->srej_save_reqseq != control->reqseq ||
5932 !test_and_clear_bit(CONN_SREJ_ACT,
5933 &chan->conn_state))
5934 l2cap_retransmit(chan, control);
5935 } else {
5936 l2cap_retransmit(chan, control);
5937 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5938 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5939 chan->srej_save_reqseq = control->reqseq;
5940 }
5941 }
5942 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005943}
5944
5945static void l2cap_handle_rej(struct l2cap_chan *chan,
5946 struct l2cap_ctrl *control)
5947{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005948 struct sk_buff *skb;
5949
5950 BT_DBG("chan %p, control %p", chan, control);
5951
5952 if (control->reqseq == chan->next_tx_seq) {
5953 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005954 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005955 return;
5956 }
5957
5958 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5959
5960 if (chan->max_tx && skb &&
5961 bt_cb(skb)->control.retries >= chan->max_tx) {
5962 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005963 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005964 return;
5965 }
5966
5967 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5968
5969 l2cap_pass_to_tx(chan, control);
5970
5971 if (control->final) {
5972 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5973 l2cap_retransmit_all(chan, control);
5974 } else {
5975 l2cap_retransmit_all(chan, control);
5976 l2cap_ertm_send(chan);
5977 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5978 set_bit(CONN_REJ_ACT, &chan->conn_state);
5979 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005980}
5981
Mat Martineau4b51dae92012-05-17 20:53:37 -07005982static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5983{
5984 BT_DBG("chan %p, txseq %d", chan, txseq);
5985
5986 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5987 chan->expected_tx_seq);
5988
5989 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5990 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01005991 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07005992 /* See notes below regarding "double poll" and
5993 * invalid packets.
5994 */
5995 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5996 BT_DBG("Invalid/Ignore - after SREJ");
5997 return L2CAP_TXSEQ_INVALID_IGNORE;
5998 } else {
5999 BT_DBG("Invalid - in window after SREJ sent");
6000 return L2CAP_TXSEQ_INVALID;
6001 }
6002 }
6003
6004 if (chan->srej_list.head == txseq) {
6005 BT_DBG("Expected SREJ");
6006 return L2CAP_TXSEQ_EXPECTED_SREJ;
6007 }
6008
6009 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6010 BT_DBG("Duplicate SREJ - txseq already stored");
6011 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6012 }
6013
6014 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6015 BT_DBG("Unexpected SREJ - not requested");
6016 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6017 }
6018 }
6019
6020 if (chan->expected_tx_seq == txseq) {
6021 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6022 chan->tx_win) {
6023 BT_DBG("Invalid - txseq outside tx window");
6024 return L2CAP_TXSEQ_INVALID;
6025 } else {
6026 BT_DBG("Expected");
6027 return L2CAP_TXSEQ_EXPECTED;
6028 }
6029 }
6030
6031 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006032 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006033 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6034 return L2CAP_TXSEQ_DUPLICATE;
6035 }
6036
6037 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6038 /* A source of invalid packets is a "double poll" condition,
6039 * where delays cause us to send multiple poll packets. If
6040 * the remote stack receives and processes both polls,
6041 * sequence numbers can wrap around in such a way that a
6042 * resent frame has a sequence number that looks like new data
6043 * with a sequence gap. This would trigger an erroneous SREJ
6044 * request.
6045 *
6046 * Fortunately, this is impossible with a tx window that's
6047 * less than half of the maximum sequence number, which allows
6048 * invalid frames to be safely ignored.
6049 *
6050 * With tx window sizes greater than half of the tx window
6051 * maximum, the frame is invalid and cannot be ignored. This
6052 * causes a disconnect.
6053 */
6054
6055 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6056 BT_DBG("Invalid/Ignore - txseq outside tx window");
6057 return L2CAP_TXSEQ_INVALID_IGNORE;
6058 } else {
6059 BT_DBG("Invalid - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID;
6061 }
6062 } else {
6063 BT_DBG("Unexpected - txseq indicates missing frames");
6064 return L2CAP_TXSEQ_UNEXPECTED;
6065 }
6066}
6067
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006068static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6069 struct l2cap_ctrl *control,
6070 struct sk_buff *skb, u8 event)
6071{
6072 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006073 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006074
6075 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6076 event);
6077
6078 switch (event) {
6079 case L2CAP_EV_RECV_IFRAME:
6080 switch (l2cap_classify_txseq(chan, control->txseq)) {
6081 case L2CAP_TXSEQ_EXPECTED:
6082 l2cap_pass_to_tx(chan, control);
6083
6084 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6085 BT_DBG("Busy, discarding expected seq %d",
6086 control->txseq);
6087 break;
6088 }
6089
6090 chan->expected_tx_seq = __next_seq(chan,
6091 control->txseq);
6092
6093 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006094 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006095
6096 err = l2cap_reassemble_sdu(chan, skb, control);
6097 if (err)
6098 break;
6099
6100 if (control->final) {
6101 if (!test_and_clear_bit(CONN_REJ_ACT,
6102 &chan->conn_state)) {
6103 control->final = 0;
6104 l2cap_retransmit_all(chan, control);
6105 l2cap_ertm_send(chan);
6106 }
6107 }
6108
6109 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6110 l2cap_send_ack(chan);
6111 break;
6112 case L2CAP_TXSEQ_UNEXPECTED:
6113 l2cap_pass_to_tx(chan, control);
6114
6115 /* Can't issue SREJ frames in the local busy state.
6116 * Drop this frame, it will be seen as missing
6117 * when local busy is exited.
6118 */
6119 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6120 BT_DBG("Busy, discarding unexpected seq %d",
6121 control->txseq);
6122 break;
6123 }
6124
6125 /* There was a gap in the sequence, so an SREJ
6126 * must be sent for each missing frame. The
6127 * current frame is stored for later use.
6128 */
6129 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006130 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006131 BT_DBG("Queued %p (queue len %d)", skb,
6132 skb_queue_len(&chan->srej_q));
6133
6134 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6135 l2cap_seq_list_clear(&chan->srej_list);
6136 l2cap_send_srej(chan, control->txseq);
6137
6138 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6139 break;
6140 case L2CAP_TXSEQ_DUPLICATE:
6141 l2cap_pass_to_tx(chan, control);
6142 break;
6143 case L2CAP_TXSEQ_INVALID_IGNORE:
6144 break;
6145 case L2CAP_TXSEQ_INVALID:
6146 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006147 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006148 break;
6149 }
6150 break;
6151 case L2CAP_EV_RECV_RR:
6152 l2cap_pass_to_tx(chan, control);
6153 if (control->final) {
6154 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6155
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006156 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6157 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006158 control->final = 0;
6159 l2cap_retransmit_all(chan, control);
6160 }
6161
6162 l2cap_ertm_send(chan);
6163 } else if (control->poll) {
6164 l2cap_send_i_or_rr_or_rnr(chan);
6165 } else {
6166 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6167 &chan->conn_state) &&
6168 chan->unacked_frames)
6169 __set_retrans_timer(chan);
6170
6171 l2cap_ertm_send(chan);
6172 }
6173 break;
6174 case L2CAP_EV_RECV_RNR:
6175 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6176 l2cap_pass_to_tx(chan, control);
6177 if (control && control->poll) {
6178 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6179 l2cap_send_rr_or_rnr(chan, 0);
6180 }
6181 __clear_retrans_timer(chan);
6182 l2cap_seq_list_clear(&chan->retrans_list);
6183 break;
6184 case L2CAP_EV_RECV_REJ:
6185 l2cap_handle_rej(chan, control);
6186 break;
6187 case L2CAP_EV_RECV_SREJ:
6188 l2cap_handle_srej(chan, control);
6189 break;
6190 default:
6191 break;
6192 }
6193
6194 if (skb && !skb_in_use) {
6195 BT_DBG("Freeing %p", skb);
6196 kfree_skb(skb);
6197 }
6198
6199 return err;
6200}
6201
6202static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6203 struct l2cap_ctrl *control,
6204 struct sk_buff *skb, u8 event)
6205{
6206 int err = 0;
6207 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006208 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006209
6210 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6211 event);
6212
6213 switch (event) {
6214 case L2CAP_EV_RECV_IFRAME:
6215 switch (l2cap_classify_txseq(chan, txseq)) {
6216 case L2CAP_TXSEQ_EXPECTED:
6217 /* Keep frame for reassembly later */
6218 l2cap_pass_to_tx(chan, control);
6219 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006220 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006221 BT_DBG("Queued %p (queue len %d)", skb,
6222 skb_queue_len(&chan->srej_q));
6223
6224 chan->expected_tx_seq = __next_seq(chan, txseq);
6225 break;
6226 case L2CAP_TXSEQ_EXPECTED_SREJ:
6227 l2cap_seq_list_pop(&chan->srej_list);
6228
6229 l2cap_pass_to_tx(chan, control);
6230 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006231 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006232 BT_DBG("Queued %p (queue len %d)", skb,
6233 skb_queue_len(&chan->srej_q));
6234
6235 err = l2cap_rx_queued_iframes(chan);
6236 if (err)
6237 break;
6238
6239 break;
6240 case L2CAP_TXSEQ_UNEXPECTED:
6241 /* Got a frame that can't be reassembled yet.
6242 * Save it for later, and send SREJs to cover
6243 * the missing frames.
6244 */
6245 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006246 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6249
6250 l2cap_pass_to_tx(chan, control);
6251 l2cap_send_srej(chan, control->txseq);
6252 break;
6253 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6254 /* This frame was requested with an SREJ, but
6255 * some expected retransmitted frames are
6256 * missing. Request retransmission of missing
6257 * SREJ'd frames.
6258 */
6259 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006260 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006261 BT_DBG("Queued %p (queue len %d)", skb,
6262 skb_queue_len(&chan->srej_q));
6263
6264 l2cap_pass_to_tx(chan, control);
6265 l2cap_send_srej_list(chan, control->txseq);
6266 break;
6267 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6268 /* We've already queued this frame. Drop this copy. */
6269 l2cap_pass_to_tx(chan, control);
6270 break;
6271 case L2CAP_TXSEQ_DUPLICATE:
6272 /* Expecting a later sequence number, so this frame
6273 * was already received. Ignore it completely.
6274 */
6275 break;
6276 case L2CAP_TXSEQ_INVALID_IGNORE:
6277 break;
6278 case L2CAP_TXSEQ_INVALID:
6279 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006280 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006281 break;
6282 }
6283 break;
6284 case L2CAP_EV_RECV_RR:
6285 l2cap_pass_to_tx(chan, control);
6286 if (control->final) {
6287 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6288
6289 if (!test_and_clear_bit(CONN_REJ_ACT,
6290 &chan->conn_state)) {
6291 control->final = 0;
6292 l2cap_retransmit_all(chan, control);
6293 }
6294
6295 l2cap_ertm_send(chan);
6296 } else if (control->poll) {
6297 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6298 &chan->conn_state) &&
6299 chan->unacked_frames) {
6300 __set_retrans_timer(chan);
6301 }
6302
6303 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6304 l2cap_send_srej_tail(chan);
6305 } else {
6306 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6307 &chan->conn_state) &&
6308 chan->unacked_frames)
6309 __set_retrans_timer(chan);
6310
6311 l2cap_send_ack(chan);
6312 }
6313 break;
6314 case L2CAP_EV_RECV_RNR:
6315 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6316 l2cap_pass_to_tx(chan, control);
6317 if (control->poll) {
6318 l2cap_send_srej_tail(chan);
6319 } else {
6320 struct l2cap_ctrl rr_control;
6321 memset(&rr_control, 0, sizeof(rr_control));
6322 rr_control.sframe = 1;
6323 rr_control.super = L2CAP_SUPER_RR;
6324 rr_control.reqseq = chan->buffer_seq;
6325 l2cap_send_sframe(chan, &rr_control);
6326 }
6327
6328 break;
6329 case L2CAP_EV_RECV_REJ:
6330 l2cap_handle_rej(chan, control);
6331 break;
6332 case L2CAP_EV_RECV_SREJ:
6333 l2cap_handle_srej(chan, control);
6334 break;
6335 }
6336
6337 if (skb && !skb_in_use) {
6338 BT_DBG("Freeing %p", skb);
6339 kfree_skb(skb);
6340 }
6341
6342 return err;
6343}
6344
Mat Martineau32b32732012-10-23 15:24:11 -07006345static int l2cap_finish_move(struct l2cap_chan *chan)
6346{
6347 BT_DBG("chan %p", chan);
6348
6349 chan->rx_state = L2CAP_RX_STATE_RECV;
6350
6351 if (chan->hs_hcon)
6352 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6353 else
6354 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6355
6356 return l2cap_resegment(chan);
6357}
6358
6359static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6360 struct l2cap_ctrl *control,
6361 struct sk_buff *skb, u8 event)
6362{
6363 int err;
6364
6365 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6366 event);
6367
6368 if (!control->poll)
6369 return -EPROTO;
6370
6371 l2cap_process_reqseq(chan, control->reqseq);
6372
6373 if (!skb_queue_empty(&chan->tx_q))
6374 chan->tx_send_head = skb_peek(&chan->tx_q);
6375 else
6376 chan->tx_send_head = NULL;
6377
6378 /* Rewind next_tx_seq to the point expected
6379 * by the receiver.
6380 */
6381 chan->next_tx_seq = control->reqseq;
6382 chan->unacked_frames = 0;
6383
6384 err = l2cap_finish_move(chan);
6385 if (err)
6386 return err;
6387
6388 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6389 l2cap_send_i_or_rr_or_rnr(chan);
6390
6391 if (event == L2CAP_EV_RECV_IFRAME)
6392 return -EPROTO;
6393
6394 return l2cap_rx_state_recv(chan, control, NULL, event);
6395}
6396
6397static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6398 struct l2cap_ctrl *control,
6399 struct sk_buff *skb, u8 event)
6400{
6401 int err;
6402
6403 if (!control->final)
6404 return -EPROTO;
6405
6406 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6407
6408 chan->rx_state = L2CAP_RX_STATE_RECV;
6409 l2cap_process_reqseq(chan, control->reqseq);
6410
6411 if (!skb_queue_empty(&chan->tx_q))
6412 chan->tx_send_head = skb_peek(&chan->tx_q);
6413 else
6414 chan->tx_send_head = NULL;
6415
6416 /* Rewind next_tx_seq to the point expected
6417 * by the receiver.
6418 */
6419 chan->next_tx_seq = control->reqseq;
6420 chan->unacked_frames = 0;
6421
6422 if (chan->hs_hcon)
6423 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6424 else
6425 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6426
6427 err = l2cap_resegment(chan);
6428
6429 if (!err)
6430 err = l2cap_rx_state_recv(chan, control, skb, event);
6431
6432 return err;
6433}
6434
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006435static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6436{
6437 /* Make sure reqseq is for a packet that has been sent but not acked */
6438 u16 unacked;
6439
6440 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6441 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6442}
6443
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006444static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6445 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006446{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006447 int err = 0;
6448
6449 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6450 control, skb, event, chan->rx_state);
6451
6452 if (__valid_reqseq(chan, control->reqseq)) {
6453 switch (chan->rx_state) {
6454 case L2CAP_RX_STATE_RECV:
6455 err = l2cap_rx_state_recv(chan, control, skb, event);
6456 break;
6457 case L2CAP_RX_STATE_SREJ_SENT:
6458 err = l2cap_rx_state_srej_sent(chan, control, skb,
6459 event);
6460 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006461 case L2CAP_RX_STATE_WAIT_P:
6462 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6463 break;
6464 case L2CAP_RX_STATE_WAIT_F:
6465 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6466 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006467 default:
6468 /* shut it down */
6469 break;
6470 }
6471 } else {
6472 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6473 control->reqseq, chan->next_tx_seq,
6474 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006475 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006476 }
6477
6478 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006479}
6480
6481static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6482 struct sk_buff *skb)
6483{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006484 int err = 0;
6485
6486 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6487 chan->rx_state);
6488
6489 if (l2cap_classify_txseq(chan, control->txseq) ==
6490 L2CAP_TXSEQ_EXPECTED) {
6491 l2cap_pass_to_tx(chan, control);
6492
6493 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6494 __next_seq(chan, chan->buffer_seq));
6495
6496 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6497
6498 l2cap_reassemble_sdu(chan, skb, control);
6499 } else {
6500 if (chan->sdu) {
6501 kfree_skb(chan->sdu);
6502 chan->sdu = NULL;
6503 }
6504 chan->sdu_last_frag = NULL;
6505 chan->sdu_len = 0;
6506
6507 if (skb) {
6508 BT_DBG("Freeing %p", skb);
6509 kfree_skb(skb);
6510 }
6511 }
6512
6513 chan->last_acked_seq = control->txseq;
6514 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6515
6516 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006517}
6518
6519static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6520{
6521 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6522 u16 len;
6523 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006524
Mat Martineaub76bbd62012-04-11 10:48:43 -07006525 __unpack_control(chan, skb);
6526
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006527 len = skb->len;
6528
6529 /*
6530 * We can just drop the corrupted I-frame here.
6531 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006532 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006533 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006534 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006535 goto drop;
6536
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006537 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006538 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006539
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006540 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006541 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006542
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006543 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006544 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006545 goto drop;
6546 }
6547
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006548 if (!control->sframe) {
6549 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006550
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006551 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6552 control->sar, control->reqseq, control->final,
6553 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006554
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006555 /* Validate F-bit - F=0 always valid, F=1 only
6556 * valid in TX WAIT_F
6557 */
6558 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006559 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006560
6561 if (chan->mode != L2CAP_MODE_STREAMING) {
6562 event = L2CAP_EV_RECV_IFRAME;
6563 err = l2cap_rx(chan, control, skb, event);
6564 } else {
6565 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006566 }
6567
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006568 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006569 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006570 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006571 const u8 rx_func_to_event[4] = {
6572 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6573 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6574 };
6575
6576 /* Only I-frames are expected in streaming mode */
6577 if (chan->mode == L2CAP_MODE_STREAMING)
6578 goto drop;
6579
6580 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6581 control->reqseq, control->final, control->poll,
6582 control->super);
6583
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006584 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006585 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006586 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006587 goto drop;
6588 }
6589
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006590 /* Validate F and P bits */
6591 if (control->final && (control->poll ||
6592 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6593 goto drop;
6594
6595 event = rx_func_to_event[control->super];
6596 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006597 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006598 }
6599
6600 return 0;
6601
6602drop:
6603 kfree_skb(skb);
6604 return 0;
6605}
6606
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006607static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6608{
6609 struct l2cap_conn *conn = chan->conn;
6610 struct l2cap_le_credits pkt;
6611 u16 return_credits;
6612
6613 /* We return more credits to the sender only after the amount of
6614 * credits falls below half of the initial amount.
6615 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006616 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006617 return;
6618
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006619 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006620
6621 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6622
6623 chan->rx_credits += return_credits;
6624
6625 pkt.cid = cpu_to_le16(chan->scid);
6626 pkt.credits = cpu_to_le16(return_credits);
6627
6628 chan->ident = l2cap_get_ident(conn);
6629
6630 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6631}
6632
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006633static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6634{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006635 int err;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006636
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006637 if (!chan->rx_credits) {
6638 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006639 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006640 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006641 }
6642
6643 if (chan->imtu < skb->len) {
6644 BT_ERR("Too big LE L2CAP PDU");
6645 return -ENOBUFS;
6646 }
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006647
6648 chan->rx_credits--;
6649 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6650
6651 l2cap_chan_le_send_credits(chan);
6652
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006653 err = 0;
6654
6655 if (!chan->sdu) {
6656 u16 sdu_len;
6657
6658 sdu_len = get_unaligned_le16(skb->data);
6659 skb_pull(skb, L2CAP_SDULEN_SIZE);
6660
6661 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6662 sdu_len, skb->len, chan->imtu);
6663
6664 if (sdu_len > chan->imtu) {
6665 BT_ERR("Too big LE L2CAP SDU length received");
6666 err = -EMSGSIZE;
6667 goto failed;
6668 }
6669
6670 if (skb->len > sdu_len) {
6671 BT_ERR("Too much LE L2CAP data received");
6672 err = -EINVAL;
6673 goto failed;
6674 }
6675
6676 if (skb->len == sdu_len)
6677 return chan->ops->recv(chan, skb);
6678
6679 chan->sdu = skb;
6680 chan->sdu_len = sdu_len;
6681 chan->sdu_last_frag = skb;
6682
6683 return 0;
6684 }
6685
6686 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6687 chan->sdu->len, skb->len, chan->sdu_len);
6688
6689 if (chan->sdu->len + skb->len > chan->sdu_len) {
6690 BT_ERR("Too much LE L2CAP data received");
6691 err = -EINVAL;
6692 goto failed;
6693 }
6694
6695 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6696 skb = NULL;
6697
6698 if (chan->sdu->len == chan->sdu_len) {
6699 err = chan->ops->recv(chan, chan->sdu);
6700 if (!err) {
6701 chan->sdu = NULL;
6702 chan->sdu_last_frag = NULL;
6703 chan->sdu_len = 0;
6704 }
6705 }
6706
6707failed:
6708 if (err) {
6709 kfree_skb(skb);
6710 kfree_skb(chan->sdu);
6711 chan->sdu = NULL;
6712 chan->sdu_last_frag = NULL;
6713 chan->sdu_len = 0;
6714 }
6715
6716 /* We can't return an error here since we took care of the skb
6717 * freeing internally. An error return would cause the caller to
6718 * do a double-free of the skb.
6719 */
6720 return 0;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006721}
6722
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006723static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6724 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006725{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006726 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006727
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006728 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006729 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006730 if (cid == L2CAP_CID_A2MP) {
6731 chan = a2mp_channel_create(conn, skb);
6732 if (!chan) {
6733 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006734 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006735 }
6736
6737 l2cap_chan_lock(chan);
6738 } else {
6739 BT_DBG("unknown cid 0x%4.4x", cid);
6740 /* Drop packet and return */
6741 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006742 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006743 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006744 }
6745
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006746 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006747
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006748 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006749 goto drop;
6750
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006751 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006752 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006753 if (l2cap_le_data_rcv(chan, skb) < 0)
6754 goto drop;
6755
6756 goto done;
6757
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006758 case L2CAP_MODE_BASIC:
6759 /* If socket recv buffers overflows we drop data here
6760 * which is *bad* because L2CAP has to be reliable.
6761 * But we don't have any other choice. L2CAP doesn't
6762 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006763
Szymon Janc2c96e032014-02-18 20:48:34 +01006764 if (chan->imtu < skb->len) {
6765 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006766 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006767 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006768
Gustavo Padovan80b98022012-05-27 22:27:51 -03006769 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006770 goto done;
6771 break;
6772
6773 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006774 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006775 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006776 goto done;
6777
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006778 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006779 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006780 break;
6781 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006782
6783drop:
6784 kfree_skb(skb);
6785
6786done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006787 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006788}
6789
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006790static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6791 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006792{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006793 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006794 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006795
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006796 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006797 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006798
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006799 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6800 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006801 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006802 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006803
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006804 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006805
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006806 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006807 goto drop;
6808
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006809 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006810 goto drop;
6811
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006812 /* Store remote BD_ADDR and PSM for msg_name */
Marcel Holtmann06ae3312013-10-18 03:43:00 -07006813 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006814 bt_cb(skb)->psm = psm;
6815
Johan Hedberga24cce12014-08-07 22:56:42 +03006816 if (!chan->ops->recv(chan, skb)) {
6817 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006818 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006819 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006820
6821drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006822 l2cap_chan_put(chan);
6823free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006824 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825}
6826
6827static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6828{
6829 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006830 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006831 u16 cid, len;
6832 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833
Johan Hedberg61a939c2014-01-17 20:45:11 +02006834 if (hcon->state != BT_CONNECTED) {
6835 BT_DBG("queueing pending rx skb");
6836 skb_queue_tail(&conn->pending_rx, skb);
6837 return;
6838 }
6839
Linus Torvalds1da177e2005-04-16 15:20:36 -07006840 skb_pull(skb, L2CAP_HDR_SIZE);
6841 cid = __le16_to_cpu(lh->cid);
6842 len = __le16_to_cpu(lh->len);
6843
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006844 if (len != skb->len) {
6845 kfree_skb(skb);
6846 return;
6847 }
6848
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006849 /* Since we can't actively block incoming LE connections we must
6850 * at least ensure that we ignore incoming data from them.
6851 */
6852 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006853 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6854 bdaddr_type(hcon, hcon->dst_type))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006855 kfree_skb(skb);
6856 return;
6857 }
6858
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6860
6861 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006862 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006863 l2cap_sig_channel(conn, skb);
6864 break;
6865
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006866 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006867 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006868 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006869 l2cap_conless_channel(conn, psm, skb);
6870 break;
6871
Marcel Holtmanna2877622013-10-02 23:46:54 -07006872 case L2CAP_CID_LE_SIGNALING:
6873 l2cap_le_sig_channel(conn, skb);
6874 break;
6875
Linus Torvalds1da177e2005-04-16 15:20:36 -07006876 default:
6877 l2cap_data_channel(conn, cid, skb);
6878 break;
6879 }
6880}
6881
Johan Hedberg61a939c2014-01-17 20:45:11 +02006882static void process_pending_rx(struct work_struct *work)
6883{
6884 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6885 pending_rx_work);
6886 struct sk_buff *skb;
6887
6888 BT_DBG("");
6889
6890 while ((skb = skb_dequeue(&conn->pending_rx)))
6891 l2cap_recv_frame(conn, skb);
6892}
6893
Johan Hedberg162b49e2014-01-17 20:45:10 +02006894static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6895{
6896 struct l2cap_conn *conn = hcon->l2cap_data;
6897 struct hci_chan *hchan;
6898
6899 if (conn)
6900 return conn;
6901
6902 hchan = hci_chan_create(hcon);
6903 if (!hchan)
6904 return NULL;
6905
Johan Hedberg27f70f32014-07-21 10:50:06 +03006906 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006907 if (!conn) {
6908 hci_chan_del(hchan);
6909 return NULL;
6910 }
6911
6912 kref_init(&conn->ref);
6913 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03006914 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006915 conn->hchan = hchan;
6916
6917 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6918
6919 switch (hcon->type) {
6920 case LE_LINK:
6921 if (hcon->hdev->le_mtu) {
6922 conn->mtu = hcon->hdev->le_mtu;
6923 break;
6924 }
6925 /* fall through */
6926 default:
6927 conn->mtu = hcon->hdev->acl_mtu;
6928 break;
6929 }
6930
6931 conn->feat_mask = 0;
6932
6933 if (hcon->type == ACL_LINK)
6934 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6935 &hcon->hdev->dev_flags);
6936
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02006937 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006938 mutex_init(&conn->chan_lock);
6939
6940 INIT_LIST_HEAD(&conn->chan_l);
6941 INIT_LIST_HEAD(&conn->users);
6942
Johan Hedberg276d8072014-08-11 22:06:41 +03006943 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006944
Johan Hedberg61a939c2014-01-17 20:45:11 +02006945 skb_queue_head_init(&conn->pending_rx);
6946 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03006947 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02006948
Johan Hedberg162b49e2014-01-17 20:45:10 +02006949 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6950
6951 return conn;
6952}
6953
6954static bool is_valid_psm(u16 psm, u8 dst_type) {
6955 if (!psm)
6956 return false;
6957
6958 if (bdaddr_type_is_le(dst_type))
6959 return (psm <= 0x00ff);
6960
6961 /* PSM must be odd and lsb of upper byte must be 0 */
6962 return ((psm & 0x0101) == 0x0001);
6963}
6964
6965int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6966 bdaddr_t *dst, u8 dst_type)
6967{
6968 struct l2cap_conn *conn;
6969 struct hci_conn *hcon;
6970 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006971 int err;
6972
6973 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6974 dst_type, __le16_to_cpu(psm));
6975
6976 hdev = hci_get_route(dst, &chan->src);
6977 if (!hdev)
6978 return -EHOSTUNREACH;
6979
6980 hci_dev_lock(hdev);
6981
Johan Hedberg162b49e2014-01-17 20:45:10 +02006982 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6983 chan->chan_type != L2CAP_CHAN_RAW) {
6984 err = -EINVAL;
6985 goto done;
6986 }
6987
Johan Hedberg21626e62014-01-24 10:35:41 +02006988 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6989 err = -EINVAL;
6990 goto done;
6991 }
6992
6993 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02006994 err = -EINVAL;
6995 goto done;
6996 }
6997
6998 switch (chan->mode) {
6999 case L2CAP_MODE_BASIC:
7000 break;
7001 case L2CAP_MODE_LE_FLOWCTL:
7002 l2cap_le_flowctl_init(chan);
7003 break;
7004 case L2CAP_MODE_ERTM:
7005 case L2CAP_MODE_STREAMING:
7006 if (!disable_ertm)
7007 break;
7008 /* fall through */
7009 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007010 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007011 goto done;
7012 }
7013
7014 switch (chan->state) {
7015 case BT_CONNECT:
7016 case BT_CONNECT2:
7017 case BT_CONFIG:
7018 /* Already connecting */
7019 err = 0;
7020 goto done;
7021
7022 case BT_CONNECTED:
7023 /* Already connected */
7024 err = -EISCONN;
7025 goto done;
7026
7027 case BT_OPEN:
7028 case BT_BOUND:
7029 /* Can connect */
7030 break;
7031
7032 default:
7033 err = -EBADFD;
7034 goto done;
7035 }
7036
7037 /* Set destination address and psm */
7038 bacpy(&chan->dst, dst);
7039 chan->dst_type = dst_type;
7040
7041 chan->psm = psm;
7042 chan->dcid = cid;
7043
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007044 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007045 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007046
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007047 /* Convert from L2CAP channel address type to HCI address type
7048 */
7049 if (dst_type == BDADDR_LE_PUBLIC)
7050 dst_type = ADDR_LE_DEV_PUBLIC;
7051 else
7052 dst_type = ADDR_LE_DEV_RANDOM;
7053
Johan Hedberge804d252014-07-16 11:42:28 +03007054 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7055 role = HCI_ROLE_SLAVE;
7056 else
7057 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007058
Andre Guedes04a6c582014-02-26 20:21:44 -03007059 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
Johan Hedberge804d252014-07-16 11:42:28 +03007060 HCI_LE_CONN_TIMEOUT, role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007061 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007062 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007063 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007064 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007065
7066 if (IS_ERR(hcon)) {
7067 err = PTR_ERR(hcon);
7068 goto done;
7069 }
7070
7071 conn = l2cap_conn_add(hcon);
7072 if (!conn) {
7073 hci_conn_drop(hcon);
7074 err = -ENOMEM;
7075 goto done;
7076 }
7077
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007078 mutex_lock(&conn->chan_lock);
7079 l2cap_chan_lock(chan);
7080
Johan Hedberg162b49e2014-01-17 20:45:10 +02007081 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7082 hci_conn_drop(hcon);
7083 err = -EBUSY;
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007084 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007085 }
7086
7087 /* Update source addr of the socket */
7088 bacpy(&chan->src, &hcon->src);
7089 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7090
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007091 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007092
7093 /* l2cap_chan_add takes its own ref so we can drop this one */
7094 hci_conn_drop(hcon);
7095
7096 l2cap_state_change(chan, BT_CONNECT);
7097 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7098
Johan Hedberg61202e42014-01-28 15:16:48 -08007099 /* Release chan->sport so that it can be reused by other
7100 * sockets (as it's only used for listening sockets).
7101 */
7102 write_lock(&chan_list_lock);
7103 chan->sport = 0;
7104 write_unlock(&chan_list_lock);
7105
Johan Hedberg162b49e2014-01-17 20:45:10 +02007106 if (hcon->state == BT_CONNECTED) {
7107 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7108 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007109 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007110 l2cap_state_change(chan, BT_CONNECTED);
7111 } else
7112 l2cap_do_start(chan);
7113 }
7114
7115 err = 0;
7116
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007117chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007118 l2cap_chan_unlock(chan);
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007119 mutex_unlock(&conn->chan_lock);
7120done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007121 hci_dev_unlock(hdev);
7122 hci_dev_put(hdev);
7123 return err;
7124}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007125EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007126
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127/* ---- L2CAP interface with lower layer (HCI) ---- */
7128
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007129int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130{
7131 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007132 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007134 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007135
7136 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007137 read_lock(&chan_list_lock);
7138 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007139 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140 continue;
7141
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007142 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007143 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007144 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007145 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007147 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007148 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007149 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007150 lm2 |= HCI_LM_MASTER;
7151 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007153 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007154
7155 return exact ? lm1 : lm2;
7156}
7157
Johan Hedberge760ec12014-08-07 22:56:47 +03007158/* Find the next fixed channel in BT_LISTEN state, continue iteration
7159 * from an existing channel in the list or from the beginning of the
7160 * global list (by passing NULL as first parameter).
7161 */
7162static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg54a1b622014-08-07 22:56:48 +03007163 bdaddr_t *src, u8 link_type)
Johan Hedberge760ec12014-08-07 22:56:47 +03007164{
7165 read_lock(&chan_list_lock);
7166
7167 if (c)
7168 c = list_next_entry(c, global_l);
7169 else
7170 c = list_entry(chan_list.next, typeof(*c), global_l);
7171
7172 list_for_each_entry_from(c, &chan_list, global_l) {
7173 if (c->chan_type != L2CAP_CHAN_FIXED)
7174 continue;
7175 if (c->state != BT_LISTEN)
7176 continue;
7177 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7178 continue;
Johan Hedberg54a1b622014-08-07 22:56:48 +03007179 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7180 continue;
7181 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7182 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007183
7184 l2cap_chan_hold(c);
7185 read_unlock(&chan_list_lock);
7186 return c;
7187 }
7188
7189 read_unlock(&chan_list_lock);
7190
7191 return NULL;
7192}
7193
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007194void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195{
Johan Hedberge760ec12014-08-07 22:56:47 +03007196 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007197 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007198 struct l2cap_chan *pchan;
7199 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007200
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007201 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007202
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007203 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007204 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007205 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007206 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007207
7208 conn = l2cap_conn_add(hcon);
7209 if (!conn)
7210 return;
7211
Johan Hedberge760ec12014-08-07 22:56:47 +03007212 dst_type = bdaddr_type(hcon, hcon->dst_type);
7213
7214 /* If device is blocked, do not create channels for it */
7215 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7216 return;
7217
7218 /* Find fixed channels and notify them of the new connection. We
7219 * use multiple individual lookups, continuing each time where
7220 * we left off, because the list lock would prevent calling the
7221 * potentially sleeping l2cap_chan_lock() function.
7222 */
Johan Hedberg54a1b622014-08-07 22:56:48 +03007223 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007224 while (pchan) {
7225 struct l2cap_chan *chan, *next;
7226
7227 /* Client fixed channels should override server ones */
7228 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7229 goto next;
7230
7231 l2cap_chan_lock(pchan);
7232 chan = pchan->ops->new_connection(pchan);
7233 if (chan) {
7234 bacpy(&chan->src, &hcon->src);
7235 bacpy(&chan->dst, &hcon->dst);
7236 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7237 chan->dst_type = dst_type;
7238
7239 __l2cap_chan_add(conn, chan);
7240 }
7241
7242 l2cap_chan_unlock(pchan);
7243next:
Johan Hedberg54a1b622014-08-07 22:56:48 +03007244 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7245 hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007246 l2cap_chan_put(pchan);
7247 pchan = next;
7248 }
7249
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007250 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251}
7252
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007253int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007254{
7255 struct l2cap_conn *conn = hcon->l2cap_data;
7256
7257 BT_DBG("hcon %p", hcon);
7258
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007259 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007260 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007261 return conn->disc_reason;
7262}
7263
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007264void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007265{
7266 BT_DBG("hcon %p reason %d", hcon, reason);
7267
Joe Perchese1750722011-06-29 18:18:29 -07007268 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007269}
7270
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007271static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007272{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007273 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007274 return;
7275
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007276 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007277 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007278 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007279 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7280 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007281 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007282 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007283 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007284 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007285 }
7286}
7287
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007288int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007290 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007291 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007292
Marcel Holtmann01394182006-07-03 10:02:46 +02007293 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007294 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007295
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007296 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007297
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007298 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007299
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007300 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007301 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007302
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007303 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7304 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007305
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007306 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007307 l2cap_chan_unlock(chan);
7308 continue;
7309 }
7310
Johan Hedberg191eb392014-08-07 22:56:45 +03007311 if (!status && encrypt)
7312 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007313
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007314 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007315 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007316 continue;
7317 }
7318
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007319 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007320 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007321 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007322 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007323 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007324 continue;
7325 }
7326
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007327 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007328 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007329 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007330 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007331 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007332 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007333 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007334 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007335
7336 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007337 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007338 res = L2CAP_CR_PEND;
7339 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007340 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007341 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007342 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007343 res = L2CAP_CR_SUCCESS;
7344 stat = L2CAP_CS_NO_INFO;
7345 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007346 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007347 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007348 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007349 res = L2CAP_CR_SEC_BLOCK;
7350 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007351 }
7352
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007353 rsp.scid = cpu_to_le16(chan->dcid);
7354 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007355 rsp.result = cpu_to_le16(res);
7356 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007357 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007358 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007359
7360 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7361 res == L2CAP_CR_SUCCESS) {
7362 char buf[128];
7363 set_bit(CONF_REQ_SENT, &chan->conf_state);
7364 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7365 L2CAP_CONF_REQ,
7366 l2cap_build_conf_req(chan, buf),
7367 buf);
7368 chan->num_conf_req++;
7369 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370 }
7371
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007372 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373 }
7374
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007375 mutex_unlock(&conn->chan_lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007376
Linus Torvalds1da177e2005-04-16 15:20:36 -07007377 return 0;
7378}
7379
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007380int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007381{
7382 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007383 struct l2cap_hdr *hdr;
7384 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007385
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007386 /* For AMP controller do not create l2cap conn */
7387 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7388 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007389
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007390 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007391 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007392
7393 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394 goto drop;
7395
7396 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7397
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007398 switch (flags) {
7399 case ACL_START:
7400 case ACL_START_NO_FLUSH:
7401 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007402 if (conn->rx_len) {
7403 BT_ERR("Unexpected start frame (len %d)", skb->len);
7404 kfree_skb(conn->rx_skb);
7405 conn->rx_skb = NULL;
7406 conn->rx_len = 0;
7407 l2cap_conn_unreliable(conn, ECOMM);
7408 }
7409
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007410 /* Start fragment always begin with Basic L2CAP header */
7411 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412 BT_ERR("Frame is too short (len %d)", skb->len);
7413 l2cap_conn_unreliable(conn, ECOMM);
7414 goto drop;
7415 }
7416
7417 hdr = (struct l2cap_hdr *) skb->data;
7418 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7419
7420 if (len == skb->len) {
7421 /* Complete frame received */
7422 l2cap_recv_frame(conn, skb);
7423 return 0;
7424 }
7425
7426 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7427
7428 if (skb->len > len) {
7429 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007430 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007431 l2cap_conn_unreliable(conn, ECOMM);
7432 goto drop;
7433 }
7434
7435 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007436 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007437 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438 goto drop;
7439
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007440 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007441 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007443 break;
7444
7445 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7447
7448 if (!conn->rx_len) {
7449 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7450 l2cap_conn_unreliable(conn, ECOMM);
7451 goto drop;
7452 }
7453
7454 if (skb->len > conn->rx_len) {
7455 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007456 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457 kfree_skb(conn->rx_skb);
7458 conn->rx_skb = NULL;
7459 conn->rx_len = 0;
7460 l2cap_conn_unreliable(conn, ECOMM);
7461 goto drop;
7462 }
7463
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007464 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007465 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466 conn->rx_len -= skb->len;
7467
7468 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007469 /* Complete frame received. l2cap_recv_frame
7470 * takes ownership of the skb so set the global
7471 * rx_skb pointer to NULL first.
7472 */
7473 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007474 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007475 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007477 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007478 }
7479
7480drop:
7481 kfree_skb(skb);
7482 return 0;
7483}
7484
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007485static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007487 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007488
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007489 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007490
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007491 list_for_each_entry(c, &chan_list, global_l) {
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007492 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007493 &c->src, &c->dst,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007494 c->state, __le16_to_cpu(c->psm),
7495 c->scid, c->dcid, c->imtu, c->omtu,
7496 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007497 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007498
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007499 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007500
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007501 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007502}
7503
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007504static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7505{
7506 return single_open(file, l2cap_debugfs_show, inode->i_private);
7507}
7508
7509static const struct file_operations l2cap_debugfs_fops = {
7510 .open = l2cap_debugfs_open,
7511 .read = seq_read,
7512 .llseek = seq_lseek,
7513 .release = single_release,
7514};
7515
7516static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007518int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519{
7520 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007521
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007522 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523 if (err < 0)
7524 return err;
7525
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007526 if (IS_ERR_OR_NULL(bt_debugfs))
7527 return 0;
7528
7529 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7530 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531
Samuel Ortiz40b93972014-05-14 17:53:35 +02007532 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007533 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007534 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007535 &le_default_mps);
7536
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538}
7539
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007540void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007542 debugfs_remove(l2cap_debugfs);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007543 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544}
7545
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007546module_param(disable_ertm, bool, 0644);
7547MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");