blob: 76045497eaa144a154851069bcfcd18d340b9444 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Marcel Holtmanna6801ca2014-07-11 06:03:08 +020049static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -030061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Marcel Holtmann01394182006-07-03 10:02:46 +020079/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030080
Gustavo Padovan2d792812012-10-06 10:07:01 +010081static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020083{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020084 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030085
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020086 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020089 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020090 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +020091}
92
Gustavo Padovan2d792812012-10-06 10:07:01 +010093static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020095{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030097
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020098 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200102 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
105/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700106 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300110 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300111
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300113 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700114 if (c)
115 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200116 mutex_unlock(&conn->chan_lock);
117
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300118 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200119}
120
Mat Martineaub1a130b2012-10-23 15:24:09 -0700121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
Gustavo Padovan2d792812012-10-06 10:07:01 +0100138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200140{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200141 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300142
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200146 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200147 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148}
149
Mat Martineau5b155ef2012-10-23 15:24:14 -0700150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300165{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300166 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300167
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300168 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700169 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100170 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300171 }
Szymon Janc250938c2011-11-16 09:32:22 +0100172 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300177 int err;
178
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200179 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300182 err = -EADDRINUSE;
183 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300184 }
185
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300192
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200204 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300206}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300207EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200211 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Johan Hedberg14824302014-08-07 22:56:50 +0300213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217 chan->scid = scid;
218
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200219 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300220
221 return 0;
222}
223
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200225{
Johan Hedberge77af752013-10-08 10:31:00 +0200226 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200227
Johan Hedberge77af752013-10-08 10:31:00 +0200228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300234 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200235 return cid;
236 }
237
238 return 0;
239}
240
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200241static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300242{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100244 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200245
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300246 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300247 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300248}
249
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200252{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300253 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300254 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200260}
261
Mat Martineau4239d162012-05-17 20:53:49 -0700262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
Mat Martineau608bcc62012-05-17 20:53:32 -0700280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
Mat Martineau3c588192012-04-11 10:48:42 -0700293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
Mat Martineau3c588192012-04-11 10:48:42 -0700339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300357 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700358
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700377
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700385}
386
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300387static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300388{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100390 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200391 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300392 int reason;
393
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300395
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200396 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200397 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300398
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300400 reason = ECONNREFUSED;
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300401 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100402 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300407 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200409 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300410
Gustavo Padovan80b98022012-05-27 22:27:51 -0300411 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200412 mutex_unlock(&conn->chan_lock);
413
Ulisses Furquim371fd832011-12-21 20:02:36 -0200414 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415}
416
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300417struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200418{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300419 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200420
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200424
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200425 mutex_init(&chan->lock);
426
Johan Hedbergff714112014-11-13 09:46:04 +0200427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
429
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200430 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300431 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200432 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300433
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300435
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300436 chan->state = BT_OPEN;
437
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530438 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300439
Mat Martineau28270112012-05-17 21:14:09 -0700440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
442
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300443 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100444
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300445 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300447EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200448
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530449static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300450{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
452
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530453 BT_DBG("chan %p", chan);
454
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200455 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300456 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200457 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300458
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530459 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300460}
461
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530462void l2cap_chan_hold(struct l2cap_chan *c)
463{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530465
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530466 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530467}
468
469void l2cap_chan_put(struct l2cap_chan *c)
470{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530472
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530473 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530474}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300475EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530476
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300477void l2cap_chan_set_defaults(struct l2cap_chan *chan)
478{
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300486 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300491
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300494EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300495
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200496static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300497{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200498 chan->sdu = NULL;
499 chan->sdu_last_frag = NULL;
500 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300501 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200502 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200504
505 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300506}
507
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300508void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200509{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200511 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200512
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100514
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300515 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200516
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300522 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200523 break;
524
525 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200526 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300529 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200530 break;
531
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300534 break;
535
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200536 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200537 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300540 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200541 }
542
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300549
Ulisses Furquim371fd832011-12-21 20:02:36 -0200550 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300551
Johan Hedbergc16900c2014-08-15 21:17:06 +0300552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300556
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200557 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200558}
559
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300560void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200561{
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200564 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200565}
566
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300567void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200568{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300569 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200570
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300571 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200572
Gustavo F. Padovan49208c92011-04-04 15:59:54 -0300573 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200574
Johan Hedberg72847ce2014-08-08 09:28:03 +0300575 chan->ops->teardown(chan, err);
576
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900577 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300578 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300579 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200580 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200581
Ulisses Furquim371fd832011-12-21 20:02:36 -0200582 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300583
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300584 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300585
Johan Hedbergc16900c2014-08-15 21:17:06 +0300586 /* Reference was only held for non-fixed channels or
587 * fixed channels that explicitly requested it using the
588 * FLAG_HOLD_HCI_CONN flag.
589 */
590 if (chan->chan_type != L2CAP_CHAN_FIXED ||
591 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200592 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300593
594 if (mgr && mgr->bredr_chan == chan)
595 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200596 }
597
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200598 if (chan->hs_hchan) {
599 struct hci_chan *hs_hchan = chan->hs_hchan;
600
601 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
602 amp_disconnect_logical_link(hs_hchan);
603 }
604
Mat Martineau28270112012-05-17 21:14:09 -0700605 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300606 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300607
Gustavo Padovanee556f62012-05-18 20:22:38 -0300608 switch(chan->mode) {
609 case L2CAP_MODE_BASIC:
610 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300611
Johan Hedberg38319712013-05-17 12:49:23 +0300612 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300613 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300614 break;
615
Gustavo Padovanee556f62012-05-18 20:22:38 -0300616 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300617 __clear_retrans_timer(chan);
618 __clear_monitor_timer(chan);
619 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300620
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300621 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300622
Mat Martineau3c588192012-04-11 10:48:42 -0700623 l2cap_seq_list_free(&chan->srej_list);
624 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300625
626 /* fall through */
627
628 case L2CAP_MODE_STREAMING:
629 skb_queue_purge(&chan->tx_q);
630 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300631 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300632
633 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200634}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300635EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200636
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300637static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200638{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300639 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
640 id_addr_update_work);
641 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200642 struct l2cap_chan *chan;
643
644 mutex_lock(&conn->chan_lock);
645
646 list_for_each_entry(chan, &conn->chan_l, list) {
647 l2cap_chan_lock(chan);
648 bacpy(&chan->dst, &hcon->dst);
649 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
650 l2cap_chan_unlock(chan);
651 }
652
653 mutex_unlock(&conn->chan_lock);
654}
655
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300656static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
657{
658 struct l2cap_conn *conn = chan->conn;
659 struct l2cap_le_conn_rsp rsp;
660 u16 result;
661
662 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
663 result = L2CAP_CR_AUTHORIZATION;
664 else
665 result = L2CAP_CR_BAD_PSM;
666
667 l2cap_state_change(chan, BT_DISCONN);
668
669 rsp.dcid = cpu_to_le16(chan->scid);
670 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200671 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300672 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300673 rsp.result = cpu_to_le16(result);
674
675 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
676 &rsp);
677}
678
Johan Hedberg791d60f2013-05-14 22:24:44 +0300679static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
680{
681 struct l2cap_conn *conn = chan->conn;
682 struct l2cap_conn_rsp rsp;
683 u16 result;
684
685 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
686 result = L2CAP_CR_SEC_BLOCK;
687 else
688 result = L2CAP_CR_BAD_PSM;
689
690 l2cap_state_change(chan, BT_DISCONN);
691
692 rsp.scid = cpu_to_le16(chan->dcid);
693 rsp.dcid = cpu_to_le16(chan->scid);
694 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700695 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300696
697 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
698}
699
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300700void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300701{
702 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300703
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700704 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300705
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300706 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300707 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100708 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300709 break;
710
711 case BT_CONNECTED:
712 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800713 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300714 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200715 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300716 } else
717 l2cap_chan_del(chan, reason);
718 break;
719
720 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300721 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
722 if (conn->hcon->type == ACL_LINK)
723 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300724 else if (conn->hcon->type == LE_LINK)
725 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300726 }
727
728 l2cap_chan_del(chan, reason);
729 break;
730
731 case BT_CONNECT:
732 case BT_DISCONN:
733 l2cap_chan_del(chan, reason);
734 break;
735
736 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100737 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300738 break;
739 }
740}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300741EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300742
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300743static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530744{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700745 switch (chan->chan_type) {
746 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300747 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530748 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800749 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530750 return HCI_AT_DEDICATED_BONDING_MITM;
751 case BT_SECURITY_MEDIUM:
752 return HCI_AT_DEDICATED_BONDING;
753 default:
754 return HCI_AT_NO_BONDING;
755 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700756 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700757 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700758 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700759 if (chan->sec_level == BT_SECURITY_LOW)
760 chan->sec_level = BT_SECURITY_SDP;
761 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800762 if (chan->sec_level == BT_SECURITY_HIGH ||
763 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700764 return HCI_AT_NO_BONDING_MITM;
765 else
766 return HCI_AT_NO_BONDING;
767 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700768 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700769 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700770 if (chan->sec_level == BT_SECURITY_LOW)
771 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530772
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800773 if (chan->sec_level == BT_SECURITY_HIGH ||
774 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700775 return HCI_AT_NO_BONDING_MITM;
776 else
777 return HCI_AT_NO_BONDING;
778 }
779 /* fall through */
780 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300781 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530782 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800783 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530784 return HCI_AT_GENERAL_BONDING_MITM;
785 case BT_SECURITY_MEDIUM:
786 return HCI_AT_GENERAL_BONDING;
787 default:
788 return HCI_AT_NO_BONDING;
789 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700790 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530791 }
792}
793
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200794/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300795int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200796{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300797 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100798 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200799
Johan Hedberga17de2f2013-05-14 13:25:37 +0300800 if (conn->hcon->type == LE_LINK)
801 return smp_conn_security(conn->hcon, chan->sec_level);
802
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300803 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100804
Johan Hedberge7cafc42014-07-17 15:35:38 +0300805 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
806 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200807}
808
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200809static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200810{
811 u8 id;
812
813 /* Get next available identificator.
814 * 1 - 128 are used by kernel.
815 * 129 - 199 are reserved.
816 * 200 - 254 are used by utilities like l2ping, etc.
817 */
818
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200819 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200820
821 if (++conn->tx_ident > 128)
822 conn->tx_ident = 1;
823
824 id = conn->tx_ident;
825
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200826 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200827
828 return id;
829}
830
Gustavo Padovan2d792812012-10-06 10:07:01 +0100831static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
832 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200833{
834 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200835 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200836
837 BT_DBG("code 0x%2.2x", code);
838
839 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300840 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200841
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200842 if (lmp_no_flush_capable(conn->hcon->hdev))
843 flags = ACL_START_NO_FLUSH;
844 else
845 flags = ACL_START;
846
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700847 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200848 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700849
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200850 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200851}
852
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700853static bool __chan_is_moving(struct l2cap_chan *chan)
854{
855 return chan->move_state != L2CAP_MOVE_STABLE &&
856 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
857}
858
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200859static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
860{
861 struct hci_conn *hcon = chan->conn->hcon;
862 u16 flags;
863
864 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100865 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200866
Mat Martineaud5f8a752012-10-23 15:24:18 -0700867 if (chan->hs_hcon && !__chan_is_moving(chan)) {
868 if (chan->hs_hchan)
869 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
870 else
871 kfree_skb(skb);
872
873 return;
874 }
875
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200876 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100877 lmp_no_flush_capable(hcon->hdev))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200878 flags = ACL_START_NO_FLUSH;
879 else
880 flags = ACL_START;
881
882 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
883 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700884}
885
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700886static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
887{
888 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
889 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
890
891 if (enh & L2CAP_CTRL_FRAME_TYPE) {
892 /* S-Frame */
893 control->sframe = 1;
894 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
895 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
896
897 control->sar = 0;
898 control->txseq = 0;
899 } else {
900 /* I-Frame */
901 control->sframe = 0;
902 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
903 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
904
905 control->poll = 0;
906 control->super = 0;
907 }
908}
909
910static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
911{
912 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
913 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
914
915 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
916 /* S-Frame */
917 control->sframe = 1;
918 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
919 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
920
921 control->sar = 0;
922 control->txseq = 0;
923 } else {
924 /* I-Frame */
925 control->sframe = 0;
926 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
927 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
928
929 control->poll = 0;
930 control->super = 0;
931 }
932}
933
934static inline void __unpack_control(struct l2cap_chan *chan,
935 struct sk_buff *skb)
936{
937 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
938 __unpack_extended_control(get_unaligned_le32(skb->data),
939 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700940 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700941 } else {
942 __unpack_enhanced_control(get_unaligned_le16(skb->data),
943 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700944 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700945 }
946}
947
948static u32 __pack_extended_control(struct l2cap_ctrl *control)
949{
950 u32 packed;
951
952 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
953 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
954
955 if (control->sframe) {
956 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
957 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
958 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
959 } else {
960 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
961 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
962 }
963
964 return packed;
965}
966
967static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
968{
969 u16 packed;
970
971 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
972 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
973
974 if (control->sframe) {
975 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
976 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
977 packed |= L2CAP_CTRL_FRAME_TYPE;
978 } else {
979 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
980 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
981 }
982
983 return packed;
984}
985
986static inline void __pack_control(struct l2cap_chan *chan,
987 struct l2cap_ctrl *control,
988 struct sk_buff *skb)
989{
990 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
991 put_unaligned_le32(__pack_extended_control(control),
992 skb->data + L2CAP_HDR_SIZE);
993 } else {
994 put_unaligned_le16(__pack_enhanced_control(control),
995 skb->data + L2CAP_HDR_SIZE);
996 }
997}
998
Gustavo Padovanba7aa642012-05-29 13:29:16 -0300999static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1000{
1001 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1002 return L2CAP_EXT_HDR_SIZE;
1003 else
1004 return L2CAP_ENH_HDR_SIZE;
1005}
1006
Mat Martineaua67d7f62012-05-17 20:53:35 -07001007static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1008 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001009{
1010 struct sk_buff *skb;
1011 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001012 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001013
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001014 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001015 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001016
Mat Martineaua67d7f62012-05-17 20:53:35 -07001017 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001018
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001019 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001020 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001021
1022 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001023 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001024 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001025
Mat Martineaua67d7f62012-05-17 20:53:35 -07001026 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1027 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1028 else
1029 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001030
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001031 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001032 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001033 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001034 }
1035
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001036 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001037 return skb;
1038}
1039
1040static void l2cap_send_sframe(struct l2cap_chan *chan,
1041 struct l2cap_ctrl *control)
1042{
1043 struct sk_buff *skb;
1044 u32 control_field;
1045
1046 BT_DBG("chan %p, control %p", chan, control);
1047
1048 if (!control->sframe)
1049 return;
1050
Mat Martineaub99e13a2012-10-23 15:24:19 -07001051 if (__chan_is_moving(chan))
1052 return;
1053
Mat Martineaua67d7f62012-05-17 20:53:35 -07001054 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1055 !control->poll)
1056 control->final = 1;
1057
1058 if (control->super == L2CAP_SUPER_RR)
1059 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1060 else if (control->super == L2CAP_SUPER_RNR)
1061 set_bit(CONN_RNR_SENT, &chan->conn_state);
1062
1063 if (control->super != L2CAP_SUPER_SREJ) {
1064 chan->last_acked_seq = control->reqseq;
1065 __clear_ack_timer(chan);
1066 }
1067
1068 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1069 control->final, control->poll, control->super);
1070
1071 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1072 control_field = __pack_extended_control(control);
1073 else
1074 control_field = __pack_enhanced_control(control);
1075
1076 skb = l2cap_create_sframe_pdu(chan, control_field);
1077 if (!IS_ERR(skb))
1078 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001079}
1080
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001081static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001082{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001083 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001084
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001085 BT_DBG("chan %p, poll %d", chan, poll);
1086
1087 memset(&control, 0, sizeof(control));
1088 control.sframe = 1;
1089 control.poll = poll;
1090
1091 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1092 control.super = L2CAP_SUPER_RNR;
1093 else
1094 control.super = L2CAP_SUPER_RR;
1095
1096 control.reqseq = chan->buffer_seq;
1097 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001098}
1099
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001100static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001101{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001102 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1103 return true;
1104
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001105 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001106}
1107
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001108static bool __amp_capable(struct l2cap_chan *chan)
1109{
1110 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001111 struct hci_dev *hdev;
1112 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001113
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001114 if (!conn->hs_enabled)
1115 return false;
1116
1117 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1118 return false;
1119
1120 read_lock(&hci_dev_list_lock);
1121 list_for_each_entry(hdev, &hci_dev_list, list) {
1122 if (hdev->amp_type != AMP_TYPE_BREDR &&
1123 test_bit(HCI_UP, &hdev->flags)) {
1124 amp_available = true;
1125 break;
1126 }
1127 }
1128 read_unlock(&hci_dev_list_lock);
1129
1130 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1131 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001132
1133 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001134}
1135
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001136static bool l2cap_check_efs(struct l2cap_chan *chan)
1137{
1138 /* Check EFS parameters */
1139 return true;
1140}
1141
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001142void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001143{
1144 struct l2cap_conn *conn = chan->conn;
1145 struct l2cap_conn_req req;
1146
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149
1150 chan->ident = l2cap_get_ident(conn);
1151
1152 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1153
1154 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1155}
1156
Mat Martineau8eb200b2012-10-23 15:24:17 -07001157static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1158{
1159 struct l2cap_create_chan_req req;
1160 req.scid = cpu_to_le16(chan->scid);
1161 req.psm = chan->psm;
1162 req.amp_id = amp_id;
1163
1164 chan->ident = l2cap_get_ident(chan->conn);
1165
1166 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1167 sizeof(req), &req);
1168}
1169
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001170static void l2cap_move_setup(struct l2cap_chan *chan)
1171{
1172 struct sk_buff *skb;
1173
1174 BT_DBG("chan %p", chan);
1175
1176 if (chan->mode != L2CAP_MODE_ERTM)
1177 return;
1178
1179 __clear_retrans_timer(chan);
1180 __clear_monitor_timer(chan);
1181 __clear_ack_timer(chan);
1182
1183 chan->retry_count = 0;
1184 skb_queue_walk(&chan->tx_q, skb) {
1185 if (bt_cb(skb)->control.retries)
1186 bt_cb(skb)->control.retries = 1;
1187 else
1188 break;
1189 }
1190
1191 chan->expected_tx_seq = chan->buffer_seq;
1192
1193 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1194 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1195 l2cap_seq_list_clear(&chan->retrans_list);
1196 l2cap_seq_list_clear(&chan->srej_list);
1197 skb_queue_purge(&chan->srej_q);
1198
1199 chan->tx_state = L2CAP_TX_STATE_XMIT;
1200 chan->rx_state = L2CAP_RX_STATE_MOVE;
1201
1202 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1203}
1204
Mat Martineau5f3847a2012-10-23 15:24:12 -07001205static void l2cap_move_done(struct l2cap_chan *chan)
1206{
1207 u8 move_role = chan->move_role;
1208 BT_DBG("chan %p", chan);
1209
1210 chan->move_state = L2CAP_MOVE_STABLE;
1211 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1212
1213 if (chan->mode != L2CAP_MODE_ERTM)
1214 return;
1215
1216 switch (move_role) {
1217 case L2CAP_MOVE_ROLE_INITIATOR:
1218 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1219 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1220 break;
1221 case L2CAP_MOVE_ROLE_RESPONDER:
1222 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1223 break;
1224 }
1225}
1226
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001227static void l2cap_chan_ready(struct l2cap_chan *chan)
1228{
Mat Martineau28270112012-05-17 21:14:09 -07001229 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001230 chan->conf_state = 0;
1231 __clear_chan_timer(chan);
1232
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001233 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1234 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001235
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001236 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001237
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001238 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001239}
1240
Johan Hedbergf1496de2013-05-13 14:15:56 +03001241static void l2cap_le_connect(struct l2cap_chan *chan)
1242{
1243 struct l2cap_conn *conn = chan->conn;
1244 struct l2cap_le_conn_req req;
1245
Johan Hedberg595177f2013-12-02 22:12:22 +02001246 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1247 return;
1248
Johan Hedbergf1496de2013-05-13 14:15:56 +03001249 req.psm = chan->psm;
1250 req.scid = cpu_to_le16(chan->scid);
1251 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001252 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001253 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001254
1255 chan->ident = l2cap_get_ident(conn);
1256
1257 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1258 sizeof(req), &req);
1259}
1260
1261static void l2cap_le_start(struct l2cap_chan *chan)
1262{
1263 struct l2cap_conn *conn = chan->conn;
1264
1265 if (!smp_conn_security(conn->hcon, chan->sec_level))
1266 return;
1267
1268 if (!chan->psm) {
1269 l2cap_chan_ready(chan);
1270 return;
1271 }
1272
1273 if (chan->state == BT_CONNECT)
1274 l2cap_le_connect(chan);
1275}
1276
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001277static void l2cap_start_connection(struct l2cap_chan *chan)
1278{
1279 if (__amp_capable(chan)) {
1280 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1281 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001282 } else if (chan->conn->hcon->type == LE_LINK) {
1283 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001284 } else {
1285 l2cap_send_conn_req(chan);
1286 }
1287}
1288
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001289static void l2cap_request_info(struct l2cap_conn *conn)
1290{
1291 struct l2cap_info_req req;
1292
1293 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1294 return;
1295
1296 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1297
1298 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1299 conn->info_ident = l2cap_get_ident(conn);
1300
1301 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1302
1303 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1304 sizeof(req), &req);
1305}
1306
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001307static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001308{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001309 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001310
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001311 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001312 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001313 return;
1314 }
1315
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001316 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1317 l2cap_request_info(conn);
1318 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001319 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001320
1321 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1322 return;
1323
1324 if (l2cap_chan_check_security(chan, true) &&
1325 __l2cap_no_conn_pending(chan))
1326 l2cap_start_connection(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001327}
1328
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001329static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1330{
1331 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001332 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001333 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1334
1335 switch (mode) {
1336 case L2CAP_MODE_ERTM:
1337 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1338 case L2CAP_MODE_STREAMING:
1339 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1340 default:
1341 return 0x00;
1342 }
1343}
1344
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001345static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001346{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001347 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001348 struct l2cap_disconn_req req;
1349
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001350 if (!conn)
1351 return;
1352
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001353 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001354 __clear_retrans_timer(chan);
1355 __clear_monitor_timer(chan);
1356 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001357 }
1358
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001359 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001360 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001361 return;
1362 }
1363
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001364 req.dcid = cpu_to_le16(chan->dcid);
1365 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001366 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1367 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001368
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001369 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001370}
1371
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001373static void l2cap_conn_start(struct l2cap_conn *conn)
1374{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001375 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001376
1377 BT_DBG("conn %p", conn);
1378
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001379 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001380
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001381 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001382 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001383
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001384 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001385 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001386 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001387 continue;
1388 }
1389
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001390 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001391 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001392 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001393 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001394 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001395 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001396
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001397 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001398 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001399 &chan->conf_state)) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001400 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001401 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001402 continue;
1403 }
1404
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001405 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001406
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001407 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001408 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001409 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001410 rsp.scid = cpu_to_le16(chan->dcid);
1411 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001412
Johan Hedberge7cafc42014-07-17 15:35:38 +03001413 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001414 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001415 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1416 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001417 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001418
1419 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001420 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001421 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1422 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001423 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001424 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001425 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1426 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001427 }
1428
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001429 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001430 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001431
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001432 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001433 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001434 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001435 continue;
1436 }
1437
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001438 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001439 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001440 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001441 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001442 }
1443
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001444 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001445 }
1446
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001447 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001448}
1449
Ville Tervob62f3282011-02-10 22:38:50 -03001450static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1451{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001452 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001453 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001454
Johan Hedberge760ec12014-08-07 22:56:47 +03001455 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001456
Johan Hedberge760ec12014-08-07 22:56:47 +03001457 /* For outgoing pairing which doesn't necessarily have an
1458 * associated socket (e.g. mgmt_pair_device).
1459 */
1460 if (hcon->out)
1461 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001462
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001463 /* For LE slave connections, make sure the connection interval
1464 * is in the range of the minium and maximum interval that has
1465 * been configured for this connection. If not, then trigger
1466 * the connection update procedure.
1467 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001468 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001469 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1470 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1471 struct l2cap_conn_param_update_req req;
1472
1473 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1474 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1475 req.latency = cpu_to_le16(hcon->le_conn_latency);
1476 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1477
1478 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1479 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1480 }
Ville Tervob62f3282011-02-10 22:38:50 -03001481}
1482
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001483static void l2cap_conn_ready(struct l2cap_conn *conn)
1484{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001485 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001486 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001487
1488 BT_DBG("conn %p", conn);
1489
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001490 if (hcon->type == ACL_LINK)
1491 l2cap_request_info(conn);
1492
Johan Hedberge760ec12014-08-07 22:56:47 +03001493 mutex_lock(&conn->chan_lock);
1494
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001495 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001496
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001497 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001498
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001499 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001500 l2cap_chan_unlock(chan);
1501 continue;
1502 }
1503
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001504 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001505 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001506 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001507 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1508 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001509 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001510 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001511 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001512
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001513 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001514 }
1515
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001516 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001517
Johan Hedberg79a05722014-08-08 09:28:04 +03001518 if (hcon->type == LE_LINK)
1519 l2cap_le_conn_ready(conn);
1520
Johan Hedberg61a939c2014-01-17 20:45:11 +02001521 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001522}
1523
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001524/* Notify sockets that we cannot guaranty reliability anymore */
1525static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1526{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001527 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001528
1529 BT_DBG("conn %p", conn);
1530
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001531 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001532
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001533 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001534 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001535 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001536 }
1537
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001538 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001539}
1540
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001541static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001542{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001543 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001544 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001545
Marcel Holtmann984947d2009-02-06 23:35:19 +01001546 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001547 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001548
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001549 l2cap_conn_start(conn);
1550}
1551
David Herrmann2c8e1412013-04-06 20:28:45 +02001552/*
1553 * l2cap_user
1554 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1555 * callback is called during registration. The ->remove callback is called
1556 * during unregistration.
1557 * An l2cap_user object can either be explicitly unregistered or when the
1558 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1559 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1560 * External modules must own a reference to the l2cap_conn object if they intend
1561 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1562 * any time if they don't.
1563 */
1564
1565int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1566{
1567 struct hci_dev *hdev = conn->hcon->hdev;
1568 int ret;
1569
1570 /* We need to check whether l2cap_conn is registered. If it is not, we
1571 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1572 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1573 * relies on the parent hci_conn object to be locked. This itself relies
1574 * on the hci_dev object to be locked. So we must lock the hci device
1575 * here, too. */
1576
1577 hci_dev_lock(hdev);
1578
1579 if (user->list.next || user->list.prev) {
1580 ret = -EINVAL;
1581 goto out_unlock;
1582 }
1583
1584 /* conn->hchan is NULL after l2cap_conn_del() was called */
1585 if (!conn->hchan) {
1586 ret = -ENODEV;
1587 goto out_unlock;
1588 }
1589
1590 ret = user->probe(conn, user);
1591 if (ret)
1592 goto out_unlock;
1593
1594 list_add(&user->list, &conn->users);
1595 ret = 0;
1596
1597out_unlock:
1598 hci_dev_unlock(hdev);
1599 return ret;
1600}
1601EXPORT_SYMBOL(l2cap_register_user);
1602
1603void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1604{
1605 struct hci_dev *hdev = conn->hcon->hdev;
1606
1607 hci_dev_lock(hdev);
1608
1609 if (!user->list.next || !user->list.prev)
1610 goto out_unlock;
1611
1612 list_del(&user->list);
1613 user->list.next = NULL;
1614 user->list.prev = NULL;
1615 user->remove(conn, user);
1616
1617out_unlock:
1618 hci_dev_unlock(hdev);
1619}
1620EXPORT_SYMBOL(l2cap_unregister_user);
1621
1622static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1623{
1624 struct l2cap_user *user;
1625
1626 while (!list_empty(&conn->users)) {
1627 user = list_first_entry(&conn->users, struct l2cap_user, list);
1628 list_del(&user->list);
1629 user->list.next = NULL;
1630 user->list.prev = NULL;
1631 user->remove(conn, user);
1632 }
1633}
1634
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001635static void l2cap_conn_del(struct hci_conn *hcon, int err)
1636{
1637 struct l2cap_conn *conn = hcon->l2cap_data;
1638 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001639
1640 if (!conn)
1641 return;
1642
1643 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1644
1645 kfree_skb(conn->rx_skb);
1646
Johan Hedberg61a939c2014-01-17 20:45:11 +02001647 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001648
1649 /* We can not call flush_work(&conn->pending_rx_work) here since we
1650 * might block if we are running on a worker from the same workqueue
1651 * pending_rx_work is waiting on.
1652 */
1653 if (work_pending(&conn->pending_rx_work))
1654 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001655
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001656 if (work_pending(&conn->id_addr_update_work))
1657 cancel_work_sync(&conn->id_addr_update_work);
1658
David Herrmann2c8e1412013-04-06 20:28:45 +02001659 l2cap_unregister_all_users(conn);
1660
Johan Hedberge31fb862014-08-18 20:33:28 +03001661 /* Force the connection to be immediately dropped */
1662 hcon->disc_timeout = 0;
1663
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001664 mutex_lock(&conn->chan_lock);
1665
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001666 /* Kill channels */
1667 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001668 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001669 l2cap_chan_lock(chan);
1670
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001671 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001672
1673 l2cap_chan_unlock(chan);
1674
Gustavo Padovan80b98022012-05-27 22:27:51 -03001675 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001676 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001677 }
1678
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001679 mutex_unlock(&conn->chan_lock);
1680
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001681 hci_chan_del(conn->hchan);
1682
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001683 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001684 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001685
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001686 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001687 conn->hchan = NULL;
1688 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001689}
1690
David Herrmann9c903e32013-04-06 20:28:44 +02001691static void l2cap_conn_free(struct kref *ref)
1692{
1693 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1694
1695 hci_conn_put(conn->hcon);
1696 kfree(conn);
1697}
1698
Johan Hedberg51bb84572014-08-15 21:06:57 +03001699struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001700{
1701 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001702 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001703}
1704EXPORT_SYMBOL(l2cap_conn_get);
1705
1706void l2cap_conn_put(struct l2cap_conn *conn)
1707{
1708 kref_put(&conn->ref, l2cap_conn_free);
1709}
1710EXPORT_SYMBOL(l2cap_conn_put);
1711
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Ido Yarivc2287682012-04-20 15:46:07 -03001714/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 * Returns closest match.
1716 */
Ido Yarivc2287682012-04-20 15:46:07 -03001717static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1718 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001719 bdaddr_t *dst,
1720 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001722 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001724 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001725
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001726 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001727 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001728 continue;
1729
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001730 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1731 continue;
1732
1733 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1734 continue;
1735
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001736 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001737 int src_match, dst_match;
1738 int src_any, dst_any;
1739
Linus Torvalds1da177e2005-04-16 15:20:36 -07001740 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001741 src_match = !bacmp(&c->src, src);
1742 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001743 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001744 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001745 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001746 return c;
1747 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001748
1749 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001750 src_any = !bacmp(&c->src, BDADDR_ANY);
1751 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001752 if ((src_match && dst_any) || (src_any && dst_match) ||
1753 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001754 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755 }
1756 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001757
Johan Hedberga24cce12014-08-07 22:56:42 +03001758 if (c1)
1759 l2cap_chan_hold(c1);
1760
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001761 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001762
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001763 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001766static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001767{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001768 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001769 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001770
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001771 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001772
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001773 l2cap_chan_lock(chan);
1774
Mat Martineau80909e02012-05-17 20:53:50 -07001775 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001776 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001777 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001778 return;
1779 }
1780
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001781 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001782
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001783 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001784 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001785}
1786
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001787static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001788{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001790 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001791
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001792 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001793
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001794 l2cap_chan_lock(chan);
1795
Mat Martineau80909e02012-05-17 20:53:50 -07001796 if (!chan->conn) {
1797 l2cap_chan_unlock(chan);
1798 l2cap_chan_put(chan);
1799 return;
1800 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001801
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001803 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001804 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001805}
1806
Gustavo Padovand6603662012-05-21 13:58:22 -03001807static void l2cap_streaming_send(struct l2cap_chan *chan,
1808 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001809{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001810 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001811 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001812
Mat Martineau37339372012-05-17 20:53:33 -07001813 BT_DBG("chan %p, skbs %p", chan, skbs);
1814
Mat Martineaub99e13a2012-10-23 15:24:19 -07001815 if (__chan_is_moving(chan))
1816 return;
1817
Mat Martineau37339372012-05-17 20:53:33 -07001818 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1819
1820 while (!skb_queue_empty(&chan->tx_q)) {
1821
1822 skb = skb_dequeue(&chan->tx_q);
1823
1824 bt_cb(skb)->control.retries = 1;
1825 control = &bt_cb(skb)->control;
1826
1827 control->reqseq = 0;
1828 control->txseq = chan->next_tx_seq;
1829
1830 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001831
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001832 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001833 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1834 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001835 }
1836
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001837 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001838
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001839 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001840
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001841 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001842 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001843 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001844}
1845
Szymon Janc67c9e842011-07-28 16:24:33 +02001846static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001847{
1848 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001849 struct l2cap_ctrl *control;
1850 int sent = 0;
1851
1852 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001853
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001854 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001855 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001856
Mat Martineau94122bb2012-05-02 09:42:02 -07001857 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1858 return 0;
1859
Mat Martineaub99e13a2012-10-23 15:24:19 -07001860 if (__chan_is_moving(chan))
1861 return 0;
1862
Mat Martineau18a48e72012-05-17 20:53:34 -07001863 while (chan->tx_send_head &&
1864 chan->unacked_frames < chan->remote_tx_win &&
1865 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001866
Mat Martineau18a48e72012-05-17 20:53:34 -07001867 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001868
Mat Martineau18a48e72012-05-17 20:53:34 -07001869 bt_cb(skb)->control.retries = 1;
1870 control = &bt_cb(skb)->control;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001871
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001872 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001873 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001874
Mat Martineau18a48e72012-05-17 20:53:34 -07001875 control->reqseq = chan->buffer_seq;
1876 chan->last_acked_seq = chan->buffer_seq;
1877 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001878
Mat Martineau18a48e72012-05-17 20:53:34 -07001879 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001880
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001881 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001882 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1883 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001884 }
1885
Mat Martineau18a48e72012-05-17 20:53:34 -07001886 /* Clone after data has been modified. Data is assumed to be
1887 read-only (for locking purposes) on cloned sk_buffs.
1888 */
1889 tx_skb = skb_clone(skb, GFP_KERNEL);
1890
1891 if (!tx_skb)
1892 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001893
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001894 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001895
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001897 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001898 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001899 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001900
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001901 if (skb_queue_is_last(&chan->tx_q, skb))
1902 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001903 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001904 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001905
1906 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001907 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001908 }
1909
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001910 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1911 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001912
1913 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001914}
1915
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001916static void l2cap_ertm_resend(struct l2cap_chan *chan)
1917{
1918 struct l2cap_ctrl control;
1919 struct sk_buff *skb;
1920 struct sk_buff *tx_skb;
1921 u16 seq;
1922
1923 BT_DBG("chan %p", chan);
1924
1925 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1926 return;
1927
Mat Martineaub99e13a2012-10-23 15:24:19 -07001928 if (__chan_is_moving(chan))
1929 return;
1930
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001931 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1932 seq = l2cap_seq_list_pop(&chan->retrans_list);
1933
1934 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1935 if (!skb) {
1936 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001937 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001938 continue;
1939 }
1940
1941 bt_cb(skb)->control.retries++;
1942 control = bt_cb(skb)->control;
1943
1944 if (chan->max_tx != 0 &&
1945 bt_cb(skb)->control.retries > chan->max_tx) {
1946 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001947 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001948 l2cap_seq_list_clear(&chan->retrans_list);
1949 break;
1950 }
1951
1952 control.reqseq = chan->buffer_seq;
1953 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1954 control.final = 1;
1955 else
1956 control.final = 0;
1957
1958 if (skb_cloned(skb)) {
1959 /* Cloned sk_buffs are read-only, so we need a
1960 * writeable copy
1961 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001962 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001963 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001964 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001965 }
1966
1967 if (!tx_skb) {
1968 l2cap_seq_list_clear(&chan->retrans_list);
1969 break;
1970 }
1971
1972 /* Update skb contents */
1973 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1974 put_unaligned_le32(__pack_extended_control(&control),
1975 tx_skb->data + L2CAP_HDR_SIZE);
1976 } else {
1977 put_unaligned_le16(__pack_enhanced_control(&control),
1978 tx_skb->data + L2CAP_HDR_SIZE);
1979 }
1980
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001981 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001982 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001983 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1984 tx_skb->len - L2CAP_FCS_SIZE);
1985 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1986 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001987 }
1988
1989 l2cap_do_send(chan, tx_skb);
1990
1991 BT_DBG("Resent txseq %d", control.txseq);
1992
1993 chan->last_acked_seq = chan->buffer_seq;
1994 }
1995}
1996
Mat Martineauf80842a2012-05-17 20:53:46 -07001997static void l2cap_retransmit(struct l2cap_chan *chan,
1998 struct l2cap_ctrl *control)
1999{
2000 BT_DBG("chan %p, control %p", chan, control);
2001
2002 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2003 l2cap_ertm_resend(chan);
2004}
2005
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002006static void l2cap_retransmit_all(struct l2cap_chan *chan,
2007 struct l2cap_ctrl *control)
2008{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002009 struct sk_buff *skb;
2010
2011 BT_DBG("chan %p, control %p", chan, control);
2012
2013 if (control->poll)
2014 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2015
2016 l2cap_seq_list_clear(&chan->retrans_list);
2017
2018 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2019 return;
2020
2021 if (chan->unacked_frames) {
2022 skb_queue_walk(&chan->tx_q, skb) {
2023 if (bt_cb(skb)->control.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002024 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002025 break;
2026 }
2027
2028 skb_queue_walk_from(&chan->tx_q, skb) {
2029 if (skb == chan->tx_send_head)
2030 break;
2031
2032 l2cap_seq_list_append(&chan->retrans_list,
2033 bt_cb(skb)->control.txseq);
2034 }
2035
2036 l2cap_ertm_resend(chan);
2037 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002038}
2039
Szymon Jancb17e73b2012-01-11 10:59:47 +01002040static void l2cap_send_ack(struct l2cap_chan *chan)
2041{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002042 struct l2cap_ctrl control;
2043 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2044 chan->last_acked_seq);
2045 int threshold;
2046
2047 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2048 chan, chan->last_acked_seq, chan->buffer_seq);
2049
2050 memset(&control, 0, sizeof(control));
2051 control.sframe = 1;
2052
2053 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2054 chan->rx_state == L2CAP_RX_STATE_RECV) {
2055 __clear_ack_timer(chan);
2056 control.super = L2CAP_SUPER_RNR;
2057 control.reqseq = chan->buffer_seq;
2058 l2cap_send_sframe(chan, &control);
2059 } else {
2060 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2061 l2cap_ertm_send(chan);
2062 /* If any i-frames were sent, they included an ack */
2063 if (chan->buffer_seq == chan->last_acked_seq)
2064 frames_to_ack = 0;
2065 }
2066
Mat Martineauc20f8e32012-07-10 05:47:07 -07002067 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002068 * Calculate without mul or div
2069 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002070 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002071 threshold += threshold << 1;
2072 threshold >>= 2;
2073
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002074 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002075 threshold);
2076
2077 if (frames_to_ack >= threshold) {
2078 __clear_ack_timer(chan);
2079 control.super = L2CAP_SUPER_RR;
2080 control.reqseq = chan->buffer_seq;
2081 l2cap_send_sframe(chan, &control);
2082 frames_to_ack = 0;
2083 }
2084
2085 if (frames_to_ack)
2086 __set_ack_timer(chan);
2087 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002088}
2089
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002090static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2091 struct msghdr *msg, int len,
2092 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002094 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002095 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002096 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002097
Jukka Rissanen04988782014-06-18 16:37:07 +03002098 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2099 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002100 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101
2102 sent += count;
2103 len -= count;
2104
2105 /* Continuation fragments (no L2CAP header) */
2106 frag = &skb_shinfo(skb)->frag_list;
2107 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002108 struct sk_buff *tmp;
2109
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 count = min_t(unsigned int, conn->mtu, len);
2111
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002112 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002113 msg->msg_flags & MSG_DONTWAIT);
2114 if (IS_ERR(tmp))
2115 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002116
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002117 *frag = tmp;
2118
Jukka Rissanen04988782014-06-18 16:37:07 +03002119 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2120 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002121 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002122
2123 sent += count;
2124 len -= count;
2125
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002126 skb->len += (*frag)->len;
2127 skb->data_len += (*frag)->len;
2128
Linus Torvalds1da177e2005-04-16 15:20:36 -07002129 frag = &(*frag)->next;
2130 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002131
2132 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002133}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002135static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002136 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002137{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002138 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002139 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002140 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002141 struct l2cap_hdr *lh;
2142
Marcel Holtmann8d463212014-06-05 15:22:51 +02002143 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2144 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002145
2146 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002147
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002148 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002149 msg->msg_flags & MSG_DONTWAIT);
2150 if (IS_ERR(skb))
2151 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002152
2153 /* Create L2CAP header */
2154 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002155 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002156 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002157 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002158
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002159 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002160 if (unlikely(err < 0)) {
2161 kfree_skb(skb);
2162 return ERR_PTR(err);
2163 }
2164 return skb;
2165}
2166
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002167static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002168 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002169{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002170 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002171 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002172 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002173 struct l2cap_hdr *lh;
2174
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002175 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002176
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002177 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002178
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002179 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002180 msg->msg_flags & MSG_DONTWAIT);
2181 if (IS_ERR(skb))
2182 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002183
2184 /* Create L2CAP header */
2185 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002186 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002187 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002188
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002189 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002190 if (unlikely(err < 0)) {
2191 kfree_skb(skb);
2192 return ERR_PTR(err);
2193 }
2194 return skb;
2195}
2196
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002197static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002198 struct msghdr *msg, size_t len,
2199 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002200{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002201 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002202 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002203 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002204 struct l2cap_hdr *lh;
2205
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002206 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002207
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002208 if (!conn)
2209 return ERR_PTR(-ENOTCONN);
2210
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002211 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002212
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002213 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002214 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002215
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002216 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002217 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002218
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002219 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002220
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002221 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002222 msg->msg_flags & MSG_DONTWAIT);
2223 if (IS_ERR(skb))
2224 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002225
2226 /* Create L2CAP header */
2227 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002228 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002229 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002230
Mat Martineau18a48e72012-05-17 20:53:34 -07002231 /* Control header is populated later */
2232 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2233 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2234 else
2235 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002236
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002237 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002238 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002239
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002240 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002241 if (unlikely(err < 0)) {
2242 kfree_skb(skb);
2243 return ERR_PTR(err);
2244 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002245
Mat Martineau18a48e72012-05-17 20:53:34 -07002246 bt_cb(skb)->control.fcs = chan->fcs;
Mat Martineau3ce35142012-04-25 16:36:14 -07002247 bt_cb(skb)->control.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002248 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249}
2250
Mat Martineau94122bb2012-05-02 09:42:02 -07002251static int l2cap_segment_sdu(struct l2cap_chan *chan,
2252 struct sk_buff_head *seg_queue,
2253 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002254{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002255 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002256 u16 sdu_len;
2257 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002258 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002259
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002260 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002261
Mat Martineau94122bb2012-05-02 09:42:02 -07002262 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2263 * so fragmented skbs are not used. The HCI layer's handling
2264 * of fragmented skbs is not compatible with ERTM's queueing.
2265 */
2266
2267 /* PDU size is derived from the HCI MTU */
2268 pdu_len = chan->conn->mtu;
2269
Mat Martineaua5495742012-10-23 15:24:21 -07002270 /* Constrain PDU size for BR/EDR connections */
2271 if (!chan->hs_hcon)
2272 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002273
2274 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002275 if (chan->fcs)
2276 pdu_len -= L2CAP_FCS_SIZE;
2277
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002278 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002279
2280 /* Remote device may have requested smaller PDUs */
2281 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2282
2283 if (len <= pdu_len) {
2284 sar = L2CAP_SAR_UNSEGMENTED;
2285 sdu_len = 0;
2286 pdu_len = len;
2287 } else {
2288 sar = L2CAP_SAR_START;
2289 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002290 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002291
2292 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002293 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002294
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002295 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002296 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002297 return PTR_ERR(skb);
2298 }
2299
Mat Martineau94122bb2012-05-02 09:42:02 -07002300 bt_cb(skb)->control.sar = sar;
2301 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002302
Mat Martineau94122bb2012-05-02 09:42:02 -07002303 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002304 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002305 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002306
2307 if (len <= pdu_len) {
2308 sar = L2CAP_SAR_END;
2309 pdu_len = len;
2310 } else {
2311 sar = L2CAP_SAR_CONTINUE;
2312 }
2313 }
2314
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002315 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002316}
2317
Johan Hedberg177f8f22013-05-31 17:54:51 +03002318static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2319 struct msghdr *msg,
2320 size_t len, u16 sdulen)
2321{
2322 struct l2cap_conn *conn = chan->conn;
2323 struct sk_buff *skb;
2324 int err, count, hlen;
2325 struct l2cap_hdr *lh;
2326
2327 BT_DBG("chan %p len %zu", chan, len);
2328
2329 if (!conn)
2330 return ERR_PTR(-ENOTCONN);
2331
2332 hlen = L2CAP_HDR_SIZE;
2333
2334 if (sdulen)
2335 hlen += L2CAP_SDULEN_SIZE;
2336
2337 count = min_t(unsigned int, (conn->mtu - hlen), len);
2338
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002339 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002340 msg->msg_flags & MSG_DONTWAIT);
2341 if (IS_ERR(skb))
2342 return skb;
2343
2344 /* Create L2CAP header */
2345 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2346 lh->cid = cpu_to_le16(chan->dcid);
2347 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2348
2349 if (sdulen)
2350 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2351
2352 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2353 if (unlikely(err < 0)) {
2354 kfree_skb(skb);
2355 return ERR_PTR(err);
2356 }
2357
2358 return skb;
2359}
2360
2361static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2362 struct sk_buff_head *seg_queue,
2363 struct msghdr *msg, size_t len)
2364{
2365 struct sk_buff *skb;
2366 size_t pdu_len;
2367 u16 sdu_len;
2368
2369 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2370
Johan Hedberg177f8f22013-05-31 17:54:51 +03002371 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002372 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002373
2374 while (len > 0) {
2375 if (len <= pdu_len)
2376 pdu_len = len;
2377
2378 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2379 if (IS_ERR(skb)) {
2380 __skb_queue_purge(seg_queue);
2381 return PTR_ERR(skb);
2382 }
2383
2384 __skb_queue_tail(seg_queue, skb);
2385
2386 len -= pdu_len;
2387
2388 if (sdu_len) {
2389 sdu_len = 0;
2390 pdu_len += L2CAP_SDULEN_SIZE;
2391 }
2392 }
2393
2394 return 0;
2395}
2396
Marcel Holtmann8d463212014-06-05 15:22:51 +02002397int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002398{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002399 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002400 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002401 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002402
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002403 if (!chan->conn)
2404 return -ENOTCONN;
2405
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002406 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002407 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002408 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002409 if (IS_ERR(skb))
2410 return PTR_ERR(skb);
2411
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002412 /* Channel lock is released before requesting new skb and then
2413 * reacquired thus we need to recheck channel state.
2414 */
2415 if (chan->state != BT_CONNECTED) {
2416 kfree_skb(skb);
2417 return -ENOTCONN;
2418 }
2419
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002420 l2cap_do_send(chan, skb);
2421 return len;
2422 }
2423
2424 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002425 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002426 /* Check outgoing MTU */
2427 if (len > chan->omtu)
2428 return -EMSGSIZE;
2429
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002430 if (!chan->tx_credits)
2431 return -EAGAIN;
2432
Johan Hedberg177f8f22013-05-31 17:54:51 +03002433 __skb_queue_head_init(&seg_queue);
2434
2435 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2436
2437 if (chan->state != BT_CONNECTED) {
2438 __skb_queue_purge(&seg_queue);
2439 err = -ENOTCONN;
2440 }
2441
2442 if (err)
2443 return err;
2444
2445 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2446
2447 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2448 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2449 chan->tx_credits--;
2450 }
2451
2452 if (!chan->tx_credits)
2453 chan->ops->suspend(chan);
2454
2455 err = len;
2456
2457 break;
2458
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002459 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002460 /* Check outgoing MTU */
2461 if (len > chan->omtu)
2462 return -EMSGSIZE;
2463
2464 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002465 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002466 if (IS_ERR(skb))
2467 return PTR_ERR(skb);
2468
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002469 /* Channel lock is released before requesting new skb and then
2470 * reacquired thus we need to recheck channel state.
2471 */
2472 if (chan->state != BT_CONNECTED) {
2473 kfree_skb(skb);
2474 return -ENOTCONN;
2475 }
2476
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002477 l2cap_do_send(chan, skb);
2478 err = len;
2479 break;
2480
2481 case L2CAP_MODE_ERTM:
2482 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002483 /* Check outgoing MTU */
2484 if (len > chan->omtu) {
2485 err = -EMSGSIZE;
2486 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002487 }
2488
Mat Martineau94122bb2012-05-02 09:42:02 -07002489 __skb_queue_head_init(&seg_queue);
2490
2491 /* Do segmentation before calling in to the state machine,
2492 * since it's possible to block while waiting for memory
2493 * allocation.
2494 */
2495 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2496
2497 /* The channel could have been closed while segmenting,
2498 * check that it is still connected.
2499 */
2500 if (chan->state != BT_CONNECTED) {
2501 __skb_queue_purge(&seg_queue);
2502 err = -ENOTCONN;
2503 }
2504
2505 if (err)
2506 break;
2507
Mat Martineau37339372012-05-17 20:53:33 -07002508 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002509 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002510 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002511 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002512
Gustavo Padovand6603662012-05-21 13:58:22 -03002513 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002514
Mat Martineau94122bb2012-05-02 09:42:02 -07002515 /* If the skbs were not queued for sending, they'll still be in
2516 * seg_queue and need to be purged.
2517 */
2518 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002519 break;
2520
2521 default:
2522 BT_DBG("bad state %1.1x", chan->mode);
2523 err = -EBADFD;
2524 }
2525
2526 return err;
2527}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002528EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002529
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002530static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2531{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002532 struct l2cap_ctrl control;
2533 u16 seq;
2534
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002535 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002536
2537 memset(&control, 0, sizeof(control));
2538 control.sframe = 1;
2539 control.super = L2CAP_SUPER_SREJ;
2540
2541 for (seq = chan->expected_tx_seq; seq != txseq;
2542 seq = __next_seq(chan, seq)) {
2543 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2544 control.reqseq = seq;
2545 l2cap_send_sframe(chan, &control);
2546 l2cap_seq_list_append(&chan->srej_list, seq);
2547 }
2548 }
2549
2550 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002551}
2552
2553static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2554{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002555 struct l2cap_ctrl control;
2556
2557 BT_DBG("chan %p", chan);
2558
2559 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2560 return;
2561
2562 memset(&control, 0, sizeof(control));
2563 control.sframe = 1;
2564 control.super = L2CAP_SUPER_SREJ;
2565 control.reqseq = chan->srej_list.tail;
2566 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002567}
2568
2569static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2570{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002571 struct l2cap_ctrl control;
2572 u16 initial_head;
2573 u16 seq;
2574
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002575 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002576
2577 memset(&control, 0, sizeof(control));
2578 control.sframe = 1;
2579 control.super = L2CAP_SUPER_SREJ;
2580
2581 /* Capture initial list head to allow only one pass through the list. */
2582 initial_head = chan->srej_list.head;
2583
2584 do {
2585 seq = l2cap_seq_list_pop(&chan->srej_list);
2586 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2587 break;
2588
2589 control.reqseq = seq;
2590 l2cap_send_sframe(chan, &control);
2591 l2cap_seq_list_append(&chan->srej_list, seq);
2592 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002593}
2594
Mat Martineau608bcc62012-05-17 20:53:32 -07002595static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2596{
2597 struct sk_buff *acked_skb;
2598 u16 ackseq;
2599
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002600 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002601
2602 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2603 return;
2604
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002605 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002606 chan->expected_ack_seq, chan->unacked_frames);
2607
2608 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2609 ackseq = __next_seq(chan, ackseq)) {
2610
2611 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2612 if (acked_skb) {
2613 skb_unlink(acked_skb, &chan->tx_q);
2614 kfree_skb(acked_skb);
2615 chan->unacked_frames--;
2616 }
2617 }
2618
2619 chan->expected_ack_seq = reqseq;
2620
2621 if (chan->unacked_frames == 0)
2622 __clear_retrans_timer(chan);
2623
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002624 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002625}
2626
2627static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2628{
2629 BT_DBG("chan %p", chan);
2630
2631 chan->expected_tx_seq = chan->buffer_seq;
2632 l2cap_seq_list_clear(&chan->srej_list);
2633 skb_queue_purge(&chan->srej_q);
2634 chan->rx_state = L2CAP_RX_STATE_RECV;
2635}
2636
Gustavo Padovand6603662012-05-21 13:58:22 -03002637static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2638 struct l2cap_ctrl *control,
2639 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002640{
Mat Martineau608bcc62012-05-17 20:53:32 -07002641 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2642 event);
2643
2644 switch (event) {
2645 case L2CAP_EV_DATA_REQUEST:
2646 if (chan->tx_send_head == NULL)
2647 chan->tx_send_head = skb_peek(skbs);
2648
2649 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2650 l2cap_ertm_send(chan);
2651 break;
2652 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2653 BT_DBG("Enter LOCAL_BUSY");
2654 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2655
2656 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2657 /* The SREJ_SENT state must be aborted if we are to
2658 * enter the LOCAL_BUSY state.
2659 */
2660 l2cap_abort_rx_srej_sent(chan);
2661 }
2662
2663 l2cap_send_ack(chan);
2664
2665 break;
2666 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2667 BT_DBG("Exit LOCAL_BUSY");
2668 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2669
2670 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2671 struct l2cap_ctrl local_control;
2672
2673 memset(&local_control, 0, sizeof(local_control));
2674 local_control.sframe = 1;
2675 local_control.super = L2CAP_SUPER_RR;
2676 local_control.poll = 1;
2677 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002678 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002679
2680 chan->retry_count = 1;
2681 __set_monitor_timer(chan);
2682 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2683 }
2684 break;
2685 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2686 l2cap_process_reqseq(chan, control->reqseq);
2687 break;
2688 case L2CAP_EV_EXPLICIT_POLL:
2689 l2cap_send_rr_or_rnr(chan, 1);
2690 chan->retry_count = 1;
2691 __set_monitor_timer(chan);
2692 __clear_ack_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RETRANS_TO:
2696 l2cap_send_rr_or_rnr(chan, 1);
2697 chan->retry_count = 1;
2698 __set_monitor_timer(chan);
2699 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2700 break;
2701 case L2CAP_EV_RECV_FBIT:
2702 /* Nothing to process */
2703 break;
2704 default:
2705 break;
2706 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002707}
2708
Gustavo Padovand6603662012-05-21 13:58:22 -03002709static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2710 struct l2cap_ctrl *control,
2711 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002712{
Mat Martineau608bcc62012-05-17 20:53:32 -07002713 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2714 event);
2715
2716 switch (event) {
2717 case L2CAP_EV_DATA_REQUEST:
2718 if (chan->tx_send_head == NULL)
2719 chan->tx_send_head = skb_peek(skbs);
2720 /* Queue data, but don't send. */
2721 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2722 break;
2723 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2724 BT_DBG("Enter LOCAL_BUSY");
2725 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2726
2727 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2728 /* The SREJ_SENT state must be aborted if we are to
2729 * enter the LOCAL_BUSY state.
2730 */
2731 l2cap_abort_rx_srej_sent(chan);
2732 }
2733
2734 l2cap_send_ack(chan);
2735
2736 break;
2737 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2738 BT_DBG("Exit LOCAL_BUSY");
2739 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2740
2741 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2742 struct l2cap_ctrl local_control;
2743 memset(&local_control, 0, sizeof(local_control));
2744 local_control.sframe = 1;
2745 local_control.super = L2CAP_SUPER_RR;
2746 local_control.poll = 1;
2747 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002748 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002749
2750 chan->retry_count = 1;
2751 __set_monitor_timer(chan);
2752 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2753 }
2754 break;
2755 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2756 l2cap_process_reqseq(chan, control->reqseq);
2757
2758 /* Fall through */
2759
2760 case L2CAP_EV_RECV_FBIT:
2761 if (control && control->final) {
2762 __clear_monitor_timer(chan);
2763 if (chan->unacked_frames > 0)
2764 __set_retrans_timer(chan);
2765 chan->retry_count = 0;
2766 chan->tx_state = L2CAP_TX_STATE_XMIT;
2767 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2768 }
2769 break;
2770 case L2CAP_EV_EXPLICIT_POLL:
2771 /* Ignore */
2772 break;
2773 case L2CAP_EV_MONITOR_TO:
2774 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2775 l2cap_send_rr_or_rnr(chan, 1);
2776 __set_monitor_timer(chan);
2777 chan->retry_count++;
2778 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002779 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002780 }
2781 break;
2782 default:
2783 break;
2784 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002785}
2786
Gustavo Padovand6603662012-05-21 13:58:22 -03002787static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2788 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002789{
Mat Martineau608bcc62012-05-17 20:53:32 -07002790 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2791 chan, control, skbs, event, chan->tx_state);
2792
2793 switch (chan->tx_state) {
2794 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002795 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002796 break;
2797 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002798 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002799 break;
2800 default:
2801 /* Ignore event */
2802 break;
2803 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002804}
2805
Mat Martineau4b51dae92012-05-17 20:53:37 -07002806static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2807 struct l2cap_ctrl *control)
2808{
2809 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002810 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002811}
2812
Mat Martineauf80842a2012-05-17 20:53:46 -07002813static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2814 struct l2cap_ctrl *control)
2815{
2816 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002817 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002818}
2819
Linus Torvalds1da177e2005-04-16 15:20:36 -07002820/* Copy frame to all raw sockets on that connection */
2821static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2822{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002824 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002825
2826 BT_DBG("conn %p", conn);
2827
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002828 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002829
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002830 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002831 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002832 continue;
2833
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002834 /* Don't send frame to the channel it came from */
2835 if (bt_cb(skb)->chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002837
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002838 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002839 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002841 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002842 kfree_skb(nskb);
2843 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002844
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002845 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002846}
2847
2848/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002849static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2850 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002851{
2852 struct sk_buff *skb, **frag;
2853 struct l2cap_cmd_hdr *cmd;
2854 struct l2cap_hdr *lh;
2855 int len, count;
2856
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002857 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2858 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002859
Anderson Lizardo300b9622013-06-02 16:30:40 -04002860 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2861 return NULL;
2862
Linus Torvalds1da177e2005-04-16 15:20:36 -07002863 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2864 count = min_t(unsigned int, conn->mtu, len);
2865
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002866 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002867 if (!skb)
2868 return NULL;
2869
2870 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002871 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002872
2873 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002874 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002875 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002876 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877
2878 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2879 cmd->code = code;
2880 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002881 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002882
2883 if (dlen) {
2884 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2885 memcpy(skb_put(skb, count), data, count);
2886 data += count;
2887 }
2888
2889 len -= skb->len;
2890
2891 /* Continuation fragments (no L2CAP header) */
2892 frag = &skb_shinfo(skb)->frag_list;
2893 while (len) {
2894 count = min_t(unsigned int, conn->mtu, len);
2895
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002896 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897 if (!*frag)
2898 goto fail;
2899
2900 memcpy(skb_put(*frag, count), data, count);
2901
2902 len -= count;
2903 data += count;
2904
2905 frag = &(*frag)->next;
2906 }
2907
2908 return skb;
2909
2910fail:
2911 kfree_skb(skb);
2912 return NULL;
2913}
2914
Gustavo Padovan2d792812012-10-06 10:07:01 +01002915static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2916 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917{
2918 struct l2cap_conf_opt *opt = *ptr;
2919 int len;
2920
2921 len = L2CAP_CONF_OPT_SIZE + opt->len;
2922 *ptr += len;
2923
2924 *type = opt->type;
2925 *olen = opt->len;
2926
2927 switch (opt->len) {
2928 case 1:
2929 *val = *((u8 *) opt->val);
2930 break;
2931
2932 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002933 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002934 break;
2935
2936 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002937 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002938 break;
2939
2940 default:
2941 *val = (unsigned long) opt->val;
2942 break;
2943 }
2944
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002945 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002946 return len;
2947}
2948
Linus Torvalds1da177e2005-04-16 15:20:36 -07002949static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2950{
2951 struct l2cap_conf_opt *opt = *ptr;
2952
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002953 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954
2955 opt->type = type;
2956 opt->len = len;
2957
2958 switch (len) {
2959 case 1:
2960 *((u8 *) opt->val) = val;
2961 break;
2962
2963 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002964 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002965 break;
2966
2967 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002968 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969 break;
2970
2971 default:
2972 memcpy(opt->val, (void *) val, len);
2973 break;
2974 }
2975
2976 *ptr += L2CAP_CONF_OPT_SIZE + len;
2977}
2978
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002979static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2980{
2981 struct l2cap_conf_efs efs;
2982
Szymon Janc1ec918c2011-11-16 09:32:21 +01002983 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002984 case L2CAP_MODE_ERTM:
2985 efs.id = chan->local_id;
2986 efs.stype = chan->local_stype;
2987 efs.msdu = cpu_to_le16(chan->local_msdu);
2988 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002989 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2990 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002991 break;
2992
2993 case L2CAP_MODE_STREAMING:
2994 efs.id = 1;
2995 efs.stype = L2CAP_SERV_BESTEFFORT;
2996 efs.msdu = cpu_to_le16(chan->local_msdu);
2997 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2998 efs.acc_lat = 0;
2999 efs.flush_to = 0;
3000 break;
3001
3002 default:
3003 return;
3004 }
3005
3006 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003007 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003008}
3009
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003010static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003011{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003012 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003013 ack_timer.work);
3014 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003015
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003016 BT_DBG("chan %p", chan);
3017
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003018 l2cap_chan_lock(chan);
3019
Mat Martineau03625202012-05-17 20:53:51 -07003020 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3021 chan->last_acked_seq);
3022
3023 if (frames_to_ack)
3024 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003025
3026 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003027 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003028}
3029
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003030int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003031{
Mat Martineau3c588192012-04-11 10:48:42 -07003032 int err;
3033
Mat Martineau105bdf92012-04-27 16:50:48 -07003034 chan->next_tx_seq = 0;
3035 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003036 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003037 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003038 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003039 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003040 chan->last_acked_seq = 0;
3041 chan->sdu = NULL;
3042 chan->sdu_last_frag = NULL;
3043 chan->sdu_len = 0;
3044
Mat Martineaud34c34f2012-05-14 14:49:27 -07003045 skb_queue_head_init(&chan->tx_q);
3046
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003047 chan->local_amp_id = AMP_ID_BREDR;
3048 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003049 chan->move_state = L2CAP_MOVE_STABLE;
3050 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3051
Mat Martineau105bdf92012-04-27 16:50:48 -07003052 if (chan->mode != L2CAP_MODE_ERTM)
3053 return 0;
3054
3055 chan->rx_state = L2CAP_RX_STATE_RECV;
3056 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003057
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003058 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3059 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3060 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003061
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003062 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003063
Mat Martineau3c588192012-04-11 10:48:42 -07003064 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3065 if (err < 0)
3066 return err;
3067
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003068 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3069 if (err < 0)
3070 l2cap_seq_list_free(&chan->srej_list);
3071
3072 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003073}
3074
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003075static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3076{
3077 switch (mode) {
3078 case L2CAP_MODE_STREAMING:
3079 case L2CAP_MODE_ERTM:
3080 if (l2cap_mode_supported(mode, remote_feat_mask))
3081 return mode;
3082 /* fall through */
3083 default:
3084 return L2CAP_MODE_BASIC;
3085 }
3086}
3087
Marcel Holtmann848566b2013-10-01 22:59:22 -07003088static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003089{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003090 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003091}
3092
Marcel Holtmann848566b2013-10-01 22:59:22 -07003093static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003094{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003095 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003096}
3097
Mat Martineau36c86c82012-10-23 15:24:20 -07003098static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3099 struct l2cap_conf_rfc *rfc)
3100{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003101 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003102 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3103
3104 /* Class 1 devices have must have ERTM timeouts
3105 * exceeding the Link Supervision Timeout. The
3106 * default Link Supervision Timeout for AMP
3107 * controllers is 10 seconds.
3108 *
3109 * Class 1 devices use 0xffffffff for their
3110 * best-effort flush timeout, so the clamping logic
3111 * will result in a timeout that meets the above
3112 * requirement. ERTM timeouts are 16-bit values, so
3113 * the maximum timeout is 65.535 seconds.
3114 */
3115
3116 /* Convert timeout to milliseconds and round */
3117 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3118
3119 /* This is the recommended formula for class 2 devices
3120 * that start ERTM timers when packets are sent to the
3121 * controller.
3122 */
3123 ertm_to = 3 * ertm_to + 500;
3124
3125 if (ertm_to > 0xffff)
3126 ertm_to = 0xffff;
3127
3128 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3129 rfc->monitor_timeout = rfc->retrans_timeout;
3130 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003131 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3132 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003133 }
3134}
3135
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003136static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3137{
3138 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003139 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003140 /* use extended control field */
3141 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003142 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3143 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003144 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003145 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003146 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3147 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003148 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003149}
3150
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003151static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003152{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003154 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003155 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003156 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003157
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003158 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003159
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003160 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003161 goto done;
3162
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003163 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003164 case L2CAP_MODE_STREAMING:
3165 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003166 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003167 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003168
Marcel Holtmann848566b2013-10-01 22:59:22 -07003169 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003170 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3171
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003172 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003173 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003174 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003175 break;
3176 }
3177
3178done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003179 if (chan->imtu != L2CAP_DEFAULT_MTU)
3180 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003181
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003182 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003183 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003184 if (disable_ertm)
3185 break;
3186
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003187 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003188 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003189 break;
3190
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003191 rfc.mode = L2CAP_MODE_BASIC;
3192 rfc.txwin_size = 0;
3193 rfc.max_transmit = 0;
3194 rfc.retrans_timeout = 0;
3195 rfc.monitor_timeout = 0;
3196 rfc.max_pdu_size = 0;
3197
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003198 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003199 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003200 break;
3201
3202 case L2CAP_MODE_ERTM:
3203 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003204 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003205
3206 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003207
3208 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003209 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3210 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003211 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003212
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003213 l2cap_txwin_setup(chan);
3214
3215 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003216 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003217
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003218 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003219 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003220
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003221 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3222 l2cap_add_opt_efs(&ptr, chan);
3223
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003224 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3225 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003226 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003227
3228 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3229 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003230 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003231 chan->fcs = L2CAP_FCS_NONE;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3233 chan->fcs);
3234 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003235 break;
3236
3237 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003238 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003239 rfc.mode = L2CAP_MODE_STREAMING;
3240 rfc.txwin_size = 0;
3241 rfc.max_transmit = 0;
3242 rfc.retrans_timeout = 0;
3243 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003244
3245 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003246 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3247 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003248 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003249
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003251 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003252
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003253 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3254 l2cap_add_opt_efs(&ptr, chan);
3255
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003256 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3257 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003258 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003259 chan->fcs = L2CAP_FCS_NONE;
3260 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3261 chan->fcs);
3262 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003263 break;
3264 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003265
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003266 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003267 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003268
3269 return ptr - data;
3270}
3271
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003272static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003273{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003274 struct l2cap_conf_rsp *rsp = data;
3275 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003276 void *req = chan->conf_req;
3277 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003278 int type, hint, olen;
3279 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003280 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003281 struct l2cap_conf_efs efs;
3282 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003283 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003284 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003285 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003286
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003287 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003288
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003289 while (len >= L2CAP_CONF_OPT_SIZE) {
3290 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003291
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003292 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003293 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003294
3295 switch (type) {
3296 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003297 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003298 break;
3299
3300 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003301 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003302 break;
3303
3304 case L2CAP_CONF_QOS:
3305 break;
3306
Marcel Holtmann6464f352007-10-20 13:39:51 +02003307 case L2CAP_CONF_RFC:
3308 if (olen == sizeof(rfc))
3309 memcpy(&rfc, (void *) val, olen);
3310 break;
3311
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003312 case L2CAP_CONF_FCS:
3313 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003314 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003315 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003316
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003317 case L2CAP_CONF_EFS:
3318 remote_efs = 1;
3319 if (olen == sizeof(efs))
3320 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003321 break;
3322
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003323 case L2CAP_CONF_EWS:
Marcel Holtmann848566b2013-10-01 22:59:22 -07003324 if (!chan->conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003325 return -ECONNREFUSED;
3326
3327 set_bit(FLAG_EXT_CTRL, &chan->flags);
3328 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003329 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003330 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003331 break;
3332
3333 default:
3334 if (hint)
3335 break;
3336
3337 result = L2CAP_CONF_UNKNOWN;
3338 *((u8 *) ptr++) = type;
3339 break;
3340 }
3341 }
3342
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003343 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003344 goto done;
3345
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003346 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003347 case L2CAP_MODE_STREAMING:
3348 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003349 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003350 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003351 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003352 break;
3353 }
3354
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003355 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003356 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003357 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3358 else
3359 return -ECONNREFUSED;
3360 }
3361
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003362 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003363 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003364
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003365 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003366 }
3367
3368done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003369 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003370 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003371 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003372
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003373 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003374 return -ECONNREFUSED;
3375
Gustavo Padovan2d792812012-10-06 10:07:01 +01003376 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3377 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003378 }
3379
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003380 if (result == L2CAP_CONF_SUCCESS) {
3381 /* Configure output options and let the other side know
3382 * which ones we don't like. */
3383
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003384 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3385 result = L2CAP_CONF_UNACCEPT;
3386 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003387 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003388 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003389 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003390 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003391
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003392 if (remote_efs) {
3393 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003394 efs.stype != L2CAP_SERV_NOTRAFIC &&
3395 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003396
3397 result = L2CAP_CONF_UNACCEPT;
3398
3399 if (chan->num_conf_req >= 1)
3400 return -ECONNREFUSED;
3401
3402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003403 sizeof(efs),
3404 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003405 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003406 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003407 result = L2CAP_CONF_PENDING;
3408 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003409 }
3410 }
3411
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003412 switch (rfc.mode) {
3413 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003414 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003415 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003416 break;
3417
3418 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003419 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3420 chan->remote_tx_win = rfc.txwin_size;
3421 else
3422 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3423
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003424 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003425
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003426 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003427 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3428 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003429 rfc.max_pdu_size = cpu_to_le16(size);
3430 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003431
Mat Martineau36c86c82012-10-23 15:24:20 -07003432 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003433
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003434 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003435
3436 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003437 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003438
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003439 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3440 chan->remote_id = efs.id;
3441 chan->remote_stype = efs.stype;
3442 chan->remote_msdu = le16_to_cpu(efs.msdu);
3443 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003444 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003445 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003446 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003447 chan->remote_sdu_itime =
3448 le32_to_cpu(efs.sdu_itime);
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003450 sizeof(efs),
3451 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003452 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003453 break;
3454
3455 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003456 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003457 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3458 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003459 rfc.max_pdu_size = cpu_to_le16(size);
3460 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003461
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003462 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003463
Gustavo Padovan2d792812012-10-06 10:07:01 +01003464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3465 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003466
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003467 break;
3468
3469 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003470 result = L2CAP_CONF_UNACCEPT;
3471
3472 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003473 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003474 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003475
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003476 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003477 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003478 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003479 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003480 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003481 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003482
3483 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003484}
3485
Gustavo Padovan2d792812012-10-06 10:07:01 +01003486static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3487 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003488{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489 struct l2cap_conf_req *req = data;
3490 void *ptr = req->data;
3491 int type, olen;
3492 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003494 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003495
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003496 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003497
3498 while (len >= L2CAP_CONF_OPT_SIZE) {
3499 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3500
3501 switch (type) {
3502 case L2CAP_CONF_MTU:
3503 if (val < L2CAP_DEFAULT_MIN_MTU) {
3504 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003505 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003506 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003507 chan->imtu = val;
3508 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003509 break;
3510
3511 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003512 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003513 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003514 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003515 break;
3516
3517 case L2CAP_CONF_RFC:
3518 if (olen == sizeof(rfc))
3519 memcpy(&rfc, (void *)val, olen);
3520
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003521 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003522 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003523 return -ECONNREFUSED;
3524
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003525 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003526
3527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003528 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003529 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003530
3531 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003532 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003533 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003534 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003535 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003536
3537 case L2CAP_CONF_EFS:
3538 if (olen == sizeof(efs))
3539 memcpy(&efs, (void *)val, olen);
3540
3541 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003542 efs.stype != L2CAP_SERV_NOTRAFIC &&
3543 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003544 return -ECONNREFUSED;
3545
Gustavo Padovan2d792812012-10-06 10:07:01 +01003546 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3547 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003548 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003549
3550 case L2CAP_CONF_FCS:
3551 if (*result == L2CAP_CONF_PENDING)
3552 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003553 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003554 &chan->conf_state);
3555 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003556 }
3557 }
3558
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003559 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003560 return -ECONNREFUSED;
3561
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003562 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003563
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003564 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003565 switch (rfc.mode) {
3566 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003567 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3568 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3569 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003570 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3571 chan->ack_win = min_t(u16, chan->ack_win,
3572 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003573
3574 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3575 chan->local_msdu = le16_to_cpu(efs.msdu);
3576 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003577 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003578 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3579 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003580 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003581 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003582 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003583
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003584 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003585 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003586 }
3587 }
3588
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003589 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003590 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003591
3592 return ptr - data;
3593}
3594
Gustavo Padovan2d792812012-10-06 10:07:01 +01003595static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3596 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597{
3598 struct l2cap_conf_rsp *rsp = data;
3599 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003601 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003602
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003603 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003604 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003605 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003606
3607 return ptr - data;
3608}
3609
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003610void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3611{
3612 struct l2cap_le_conn_rsp rsp;
3613 struct l2cap_conn *conn = chan->conn;
3614
3615 BT_DBG("chan %p", chan);
3616
3617 rsp.dcid = cpu_to_le16(chan->scid);
3618 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003619 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003620 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003621 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003622
3623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3624 &rsp);
3625}
3626
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003627void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003628{
3629 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003630 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003631 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003632 u8 rsp_code;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003633
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003634 rsp.scid = cpu_to_le16(chan->dcid);
3635 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003636 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3637 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003638
3639 if (chan->hs_hcon)
3640 rsp_code = L2CAP_CREATE_CHAN_RSP;
3641 else
3642 rsp_code = L2CAP_CONN_RSP;
3643
3644 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3645
3646 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003647
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003648 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003649 return;
3650
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003651 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003652 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003653 chan->num_conf_req++;
3654}
3655
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003656static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003657{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003658 int type, olen;
3659 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003660 /* Use sane default values in case a misbehaving remote device
3661 * did not send an RFC or extended window size option.
3662 */
3663 u16 txwin_ext = chan->ack_win;
3664 struct l2cap_conf_rfc rfc = {
3665 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003666 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3667 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003668 .max_pdu_size = cpu_to_le16(chan->imtu),
3669 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3670 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003671
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003672 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003673
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003674 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003675 return;
3676
3677 while (len >= L2CAP_CONF_OPT_SIZE) {
3678 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3679
Mat Martineauc20f8e32012-07-10 05:47:07 -07003680 switch (type) {
3681 case L2CAP_CONF_RFC:
3682 if (olen == sizeof(rfc))
3683 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003684 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003685 case L2CAP_CONF_EWS:
3686 txwin_ext = val;
3687 break;
3688 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003689 }
3690
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003691 switch (rfc.mode) {
3692 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003693 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3694 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003695 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3696 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3697 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3698 else
3699 chan->ack_win = min_t(u16, chan->ack_win,
3700 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003701 break;
3702 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003703 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003704 }
3705}
3706
Gustavo Padovan2d792812012-10-06 10:07:01 +01003707static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003708 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3709 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003710{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003711 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003712
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003713 if (cmd_len < sizeof(*rej))
3714 return -EPROTO;
3715
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003716 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003717 return 0;
3718
3719 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003720 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003721 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003722
3723 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003724 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003725
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003726 l2cap_conn_start(conn);
3727 }
3728
3729 return 0;
3730}
3731
Mat Martineau17009152012-10-23 15:24:07 -07003732static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3733 struct l2cap_cmd_hdr *cmd,
3734 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003735{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3737 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003738 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003739 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003740
3741 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003742 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003743
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003744 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003745
3746 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003747 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003748 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003749 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003750 result = L2CAP_CR_BAD_PSM;
3751 goto sendresp;
3752 }
3753
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003754 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003755 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003756
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003757 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003758 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003759 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003760 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003761 result = L2CAP_CR_SEC_BLOCK;
3762 goto response;
3763 }
3764
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765 result = L2CAP_CR_NO_MEM;
3766
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003767 /* Check if we already have channel with that dcid */
3768 if (__l2cap_get_chan_by_dcid(conn, scid))
3769 goto response;
3770
Gustavo Padovan80b98022012-05-27 22:27:51 -03003771 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003772 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773 goto response;
3774
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003775 /* For certain devices (ex: HID mouse), support for authentication,
3776 * pairing and bonding is optional. For such devices, inorder to avoid
3777 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3778 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3779 */
3780 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3781
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003782 bacpy(&chan->src, &conn->hcon->src);
3783 bacpy(&chan->dst, &conn->hcon->dst);
Marcel Holtmann4f1654e2013-10-13 08:50:41 -07003784 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3785 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003786 chan->psm = psm;
3787 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003788 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003790 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003791
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003792 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003793
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003794 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003796 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003797
Marcel Holtmann984947d2009-02-06 23:35:19 +01003798 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003799 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003800 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003801 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003802 result = L2CAP_CR_PEND;
3803 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003804 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003805 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003806 /* Force pending result for AMP controllers.
3807 * The connection will succeed after the
3808 * physical link is up.
3809 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003810 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003811 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003812 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003813 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003814 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003815 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003816 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003817 status = L2CAP_CS_NO_INFO;
3818 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003819 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003820 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003821 result = L2CAP_CR_PEND;
3822 status = L2CAP_CS_AUTHEN_PEND;
3823 }
3824 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003825 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003826 result = L2CAP_CR_PEND;
3827 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828 }
3829
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003831 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003832 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003833 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834
3835sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003836 rsp.scid = cpu_to_le16(scid);
3837 rsp.dcid = cpu_to_le16(dcid);
3838 rsp.result = cpu_to_le16(result);
3839 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003840 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003841
3842 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3843 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003844 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003845
3846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3847 conn->info_ident = l2cap_get_ident(conn);
3848
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003849 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003850
Gustavo Padovan2d792812012-10-06 10:07:01 +01003851 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3852 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003853 }
3854
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003855 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003856 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003857 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003858 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003859 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003860 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003861 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003862 }
Mat Martineau17009152012-10-23 15:24:07 -07003863
3864 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003865}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003866
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003867static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003868 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003869{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303870 struct hci_dev *hdev = conn->hcon->hdev;
3871 struct hci_conn *hcon = conn->hcon;
3872
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003873 if (cmd_len < sizeof(struct l2cap_conn_req))
3874 return -EPROTO;
3875
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303876 hci_dev_lock(hdev);
3877 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3878 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
Alfonso Acosta48ec92f2014-10-07 08:44:10 +00003879 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303880 hci_dev_unlock(hdev);
3881
Gustavo Padovan300229f2012-10-12 19:40:40 +08003882 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003883 return 0;
3884}
3885
Mat Martineau5909cf32012-10-23 15:24:08 -07003886static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003887 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3888 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889{
3890 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3891 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003892 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003893 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003894 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003896 if (cmd_len < sizeof(*rsp))
3897 return -EPROTO;
3898
Linus Torvalds1da177e2005-04-16 15:20:36 -07003899 scid = __le16_to_cpu(rsp->scid);
3900 dcid = __le16_to_cpu(rsp->dcid);
3901 result = __le16_to_cpu(rsp->result);
3902 status = __le16_to_cpu(rsp->status);
3903
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003904 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003905 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003906
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003907 mutex_lock(&conn->chan_lock);
3908
Linus Torvalds1da177e2005-04-16 15:20:36 -07003909 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003910 chan = __l2cap_get_chan_by_scid(conn, scid);
3911 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003912 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003913 goto unlock;
3914 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003916 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3917 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003918 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003919 goto unlock;
3920 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 }
3922
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003923 err = 0;
3924
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003925 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003926
Linus Torvalds1da177e2005-04-16 15:20:36 -07003927 switch (result) {
3928 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03003929 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003930 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003931 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003932 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003933
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003934 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003935 break;
3936
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003938 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003939 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 break;
3941
3942 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003943 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 break;
3945
3946 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003947 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003948 break;
3949 }
3950
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003951 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003952
3953unlock:
3954 mutex_unlock(&conn->chan_lock);
3955
3956 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957}
3958
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003959static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003960{
3961 /* FCS is enabled only in ERTM or streaming mode, if one or both
3962 * sides request it.
3963 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003964 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003965 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003966 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003967 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003968}
3969
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003970static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3971 u8 ident, u16 flags)
3972{
3973 struct l2cap_conn *conn = chan->conn;
3974
3975 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3976 flags);
3977
3978 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3979 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3980
3981 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3982 l2cap_build_conf_rsp(chan, data,
3983 L2CAP_CONF_SUCCESS, flags), data);
3984}
3985
Johan Hedberg662d6522013-10-16 11:20:47 +03003986static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3987 u16 scid, u16 dcid)
3988{
3989 struct l2cap_cmd_rej_cid rej;
3990
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003991 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03003992 rej.scid = __cpu_to_le16(scid);
3993 rej.dcid = __cpu_to_le16(dcid);
3994
3995 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3996}
3997
Gustavo Padovan2d792812012-10-06 10:07:01 +01003998static inline int l2cap_config_req(struct l2cap_conn *conn,
3999 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4000 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001{
4002 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4003 u16 dcid, flags;
4004 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004005 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004006 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004008 if (cmd_len < sizeof(*req))
4009 return -EPROTO;
4010
Linus Torvalds1da177e2005-04-16 15:20:36 -07004011 dcid = __le16_to_cpu(req->dcid);
4012 flags = __le16_to_cpu(req->flags);
4013
4014 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4015
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004016 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004017 if (!chan) {
4018 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4019 return 0;
4020 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004021
David S. Miller033b1142011-07-21 13:38:42 -07004022 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004023 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4024 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004025 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004026 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004027
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004028 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004029 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004030 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004031 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004032 l2cap_build_conf_rsp(chan, rsp,
4033 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004034 goto unlock;
4035 }
4036
4037 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004038 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4039 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004040
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004041 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 /* Incomplete config. Send empty response. */
4043 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004044 l2cap_build_conf_rsp(chan, rsp,
4045 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 goto unlock;
4047 }
4048
4049 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004050 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004051 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004052 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004053 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004054 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004055
Mat Martineau1500109b2012-10-23 15:24:15 -07004056 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004057 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004058 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004059
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004060 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004061 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004062
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004063 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004064 goto unlock;
4065
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004066 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004067 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004068
Mat Martineau105bdf92012-04-27 16:50:48 -07004069 if (chan->mode == L2CAP_MODE_ERTM ||
4070 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004071 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004072
Mat Martineau3c588192012-04-11 10:48:42 -07004073 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004074 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004075 else
4076 l2cap_chan_ready(chan);
4077
Marcel Holtmann876d9482007-10-20 13:35:42 +02004078 goto unlock;
4079 }
4080
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004081 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004082 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004083 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004084 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004085 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086 }
4087
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004088 /* Got Conf Rsp PENDING from remote side and assume we sent
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004089 Conf Rsp PENDING in the code above */
4090 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004091 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004092
4093 /* check compatibility */
4094
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004095 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004096 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004097 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4098 else
4099 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004100 }
4101
Linus Torvalds1da177e2005-04-16 15:20:36 -07004102unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004103 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004104 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105}
4106
Gustavo Padovan2d792812012-10-06 10:07:01 +01004107static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4109 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004110{
4111 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4112 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004113 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004114 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004115 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004117 if (cmd_len < sizeof(*rsp))
4118 return -EPROTO;
4119
Linus Torvalds1da177e2005-04-16 15:20:36 -07004120 scid = __le16_to_cpu(rsp->scid);
4121 flags = __le16_to_cpu(rsp->flags);
4122 result = __le16_to_cpu(rsp->result);
4123
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004124 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4125 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004126
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004127 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004128 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004129 return 0;
4130
4131 switch (result) {
4132 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004133 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004134 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 break;
4136
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004137 case L2CAP_CONF_PENDING:
4138 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4139
4140 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4141 char buf[64];
4142
4143 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004144 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004145 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004146 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004147 goto done;
4148 }
4149
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004150 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004151 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4152 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004153 } else {
4154 if (l2cap_check_efs(chan)) {
4155 amp_create_logical_link(chan);
4156 chan->ident = cmd->ident;
4157 }
4158 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004159 }
4160 goto done;
4161
Linus Torvalds1da177e2005-04-16 15:20:36 -07004162 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004163 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004164 char req[64];
4165
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004166 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004167 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004168 goto done;
4169 }
4170
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004171 /* throw out any old stored conf requests */
4172 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004173 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004174 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004175 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004176 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004177 goto done;
4178 }
4179
4180 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004181 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004182 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004183 if (result != L2CAP_CONF_SUCCESS)
4184 goto done;
4185 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 }
4187
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004188 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004189 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004190
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004191 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004192 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 goto done;
4194 }
4195
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004196 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004197 goto done;
4198
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004199 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004200
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004201 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004202 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004203
Mat Martineau105bdf92012-04-27 16:50:48 -07004204 if (chan->mode == L2CAP_MODE_ERTM ||
4205 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004206 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004207
Mat Martineau3c588192012-04-11 10:48:42 -07004208 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004209 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004210 else
4211 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004212 }
4213
4214done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004215 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004216 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004217}
4218
Gustavo Padovan2d792812012-10-06 10:07:01 +01004219static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004220 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4221 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222{
4223 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4224 struct l2cap_disconn_rsp rsp;
4225 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004226 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004228 if (cmd_len != sizeof(*req))
4229 return -EPROTO;
4230
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231 scid = __le16_to_cpu(req->scid);
4232 dcid = __le16_to_cpu(req->dcid);
4233
4234 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4235
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004236 mutex_lock(&conn->chan_lock);
4237
4238 chan = __l2cap_get_chan_by_scid(conn, dcid);
4239 if (!chan) {
4240 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004241 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4242 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004243 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004245 l2cap_chan_lock(chan);
4246
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004247 rsp.dcid = cpu_to_le16(chan->scid);
4248 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4250
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004251 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004252
Mat Martineau61d6ef32012-04-27 16:50:50 -07004253 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004254 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004255
4256 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004257
Gustavo Padovan80b98022012-05-27 22:27:51 -03004258 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004259 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004260
4261 mutex_unlock(&conn->chan_lock);
4262
Linus Torvalds1da177e2005-04-16 15:20:36 -07004263 return 0;
4264}
4265
Gustavo Padovan2d792812012-10-06 10:07:01 +01004266static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004267 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4268 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269{
4270 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4271 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004272 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004274 if (cmd_len != sizeof(*rsp))
4275 return -EPROTO;
4276
Linus Torvalds1da177e2005-04-16 15:20:36 -07004277 scid = __le16_to_cpu(rsp->scid);
4278 dcid = __le16_to_cpu(rsp->dcid);
4279
4280 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4281
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004282 mutex_lock(&conn->chan_lock);
4283
4284 chan = __l2cap_get_chan_by_scid(conn, scid);
4285 if (!chan) {
4286 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004288 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004289
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004290 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004291
Mat Martineau61d6ef32012-04-27 16:50:50 -07004292 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004293 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004294
4295 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004296
Gustavo Padovan80b98022012-05-27 22:27:51 -03004297 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004298 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004299
4300 mutex_unlock(&conn->chan_lock);
4301
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302 return 0;
4303}
4304
Gustavo Padovan2d792812012-10-06 10:07:01 +01004305static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004306 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4307 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004308{
4309 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 u16 type;
4311
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004312 if (cmd_len != sizeof(*req))
4313 return -EPROTO;
4314
Linus Torvalds1da177e2005-04-16 15:20:36 -07004315 type = __le16_to_cpu(req->type);
4316
4317 BT_DBG("type 0x%4.4x", type);
4318
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004319 if (type == L2CAP_IT_FEAT_MASK) {
4320 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004321 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004322 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004323 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4324 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004325 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004326 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004327 | L2CAP_FEAT_FCS;
Marcel Holtmann848566b2013-10-01 22:59:22 -07004328 if (conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004329 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004330 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004331
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004332 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004333 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4334 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004335 } else if (type == L2CAP_IT_FIXED_CHAN) {
4336 u8 buf[12];
4337 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004338
Marcel Holtmann848566b2013-10-01 22:59:22 -07004339 if (conn->hs_enabled)
Mat Martineau50a147c2011-11-02 16:18:34 -07004340 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4341 else
4342 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4343
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004344 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4345 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Andrei Emeltchenkoc6337ea2011-10-20 17:02:44 +03004346 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
Gustavo Padovan2d792812012-10-06 10:07:01 +01004347 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4348 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004349 } else {
4350 struct l2cap_info_rsp rsp;
4351 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004352 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004353 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4354 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004355 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004356
4357 return 0;
4358}
4359
Gustavo Padovan2d792812012-10-06 10:07:01 +01004360static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004361 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4362 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004363{
4364 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4365 u16 type, result;
4366
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304367 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004368 return -EPROTO;
4369
Linus Torvalds1da177e2005-04-16 15:20:36 -07004370 type = __le16_to_cpu(rsp->type);
4371 result = __le16_to_cpu(rsp->result);
4372
4373 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4374
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004375 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4376 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004377 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004378 return 0;
4379
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004380 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004381
Ville Tervoadb08ed2010-08-04 09:43:33 +03004382 if (result != L2CAP_IR_SUCCESS) {
4383 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4384 conn->info_ident = 0;
4385
4386 l2cap_conn_start(conn);
4387
4388 return 0;
4389 }
4390
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004391 switch (type) {
4392 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004393 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004394
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004395 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004396 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004397 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004398
4399 conn->info_ident = l2cap_get_ident(conn);
4400
4401 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004402 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004403 } else {
4404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4405 conn->info_ident = 0;
4406
4407 l2cap_conn_start(conn);
4408 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004409 break;
4410
4411 case L2CAP_IT_FIXED_CHAN:
4412 conn->fixed_chan_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004413 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004414 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004415
4416 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004417 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004418 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004419
Linus Torvalds1da177e2005-04-16 15:20:36 -07004420 return 0;
4421}
4422
Mat Martineau17009152012-10-23 15:24:07 -07004423static int l2cap_create_channel_req(struct l2cap_conn *conn,
4424 struct l2cap_cmd_hdr *cmd,
4425 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004426{
4427 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004428 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004429 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004430 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004431 u16 psm, scid;
4432
4433 if (cmd_len != sizeof(*req))
4434 return -EPROTO;
4435
Marcel Holtmann848566b2013-10-01 22:59:22 -07004436 if (!conn->hs_enabled)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004437 return -EINVAL;
4438
4439 psm = le16_to_cpu(req->psm);
4440 scid = le16_to_cpu(req->scid);
4441
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004442 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004443
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004444 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004445 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004446 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4447 req->amp_id);
4448 return 0;
4449 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004450
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004451 /* Validate AMP controller id */
4452 hdev = hci_dev_get(req->amp_id);
4453 if (!hdev)
4454 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004455
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004456 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004457 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004458 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004459 }
4460
4461 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4462 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004463 if (chan) {
4464 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4465 struct hci_conn *hs_hcon;
4466
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004467 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4468 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004469 if (!hs_hcon) {
4470 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004471 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4472 chan->dcid);
4473 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004474 }
4475
4476 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4477
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004478 mgr->bredr_chan = chan;
4479 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004480 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004481 conn->mtu = hdev->block_mtu;
4482 }
4483
4484 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004485
4486 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004487
4488error:
4489 rsp.dcid = 0;
4490 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004491 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004493
4494 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4495 sizeof(rsp), &rsp);
4496
Johan Hedbergdc280802013-09-16 13:05:13 +03004497 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004498}
4499
Mat Martineau8eb200b2012-10-23 15:24:17 -07004500static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4501{
4502 struct l2cap_move_chan_req req;
4503 u8 ident;
4504
4505 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4506
4507 ident = l2cap_get_ident(chan->conn);
4508 chan->ident = ident;
4509
4510 req.icid = cpu_to_le16(chan->scid);
4511 req.dest_amp_id = dest_amp_id;
4512
4513 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4514 &req);
4515
4516 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4517}
4518
Mat Martineau1500109b2012-10-23 15:24:15 -07004519static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004520{
4521 struct l2cap_move_chan_rsp rsp;
4522
Mat Martineau1500109b2012-10-23 15:24:15 -07004523 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004524
Mat Martineau1500109b2012-10-23 15:24:15 -07004525 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004526 rsp.result = cpu_to_le16(result);
4527
Mat Martineau1500109b2012-10-23 15:24:15 -07004528 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4529 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004530}
4531
Mat Martineau5b155ef2012-10-23 15:24:14 -07004532static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004533{
4534 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004535
Mat Martineau5b155ef2012-10-23 15:24:14 -07004536 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004537
Mat Martineau5b155ef2012-10-23 15:24:14 -07004538 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004539
Mat Martineau5b155ef2012-10-23 15:24:14 -07004540 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004541 cfm.result = cpu_to_le16(result);
4542
Mat Martineau5b155ef2012-10-23 15:24:14 -07004543 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4544 sizeof(cfm), &cfm);
4545
4546 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4547}
4548
4549static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4550{
4551 struct l2cap_move_chan_cfm cfm;
4552
4553 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4554
4555 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004556 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004557
4558 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4559 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004560}
4561
4562static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004563 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004564{
4565 struct l2cap_move_chan_cfm_rsp rsp;
4566
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004567 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004568
4569 rsp.icid = cpu_to_le16(icid);
4570 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4571}
4572
Mat Martineau5f3847a2012-10-23 15:24:12 -07004573static void __release_logical_link(struct l2cap_chan *chan)
4574{
4575 chan->hs_hchan = NULL;
4576 chan->hs_hcon = NULL;
4577
4578 /* Placeholder - release the logical link */
4579}
4580
Mat Martineau1500109b2012-10-23 15:24:15 -07004581static void l2cap_logical_fail(struct l2cap_chan *chan)
4582{
4583 /* Logical link setup failed */
4584 if (chan->state != BT_CONNECTED) {
4585 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004586 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004587 return;
4588 }
4589
4590 switch (chan->move_role) {
4591 case L2CAP_MOVE_ROLE_RESPONDER:
4592 l2cap_move_done(chan);
4593 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4594 break;
4595 case L2CAP_MOVE_ROLE_INITIATOR:
4596 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4597 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4598 /* Remote has only sent pending or
4599 * success responses, clean up
4600 */
4601 l2cap_move_done(chan);
4602 }
4603
4604 /* Other amp move states imply that the move
4605 * has already aborted
4606 */
4607 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4608 break;
4609 }
4610}
4611
4612static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4613 struct hci_chan *hchan)
4614{
4615 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004616
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004617 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004618 chan->hs_hcon->l2cap_data = chan->conn;
4619
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004620 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004621
4622 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004623 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004624
4625 set_default_fcs(chan);
4626
4627 err = l2cap_ertm_init(chan);
4628 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004629 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004630 else
4631 l2cap_chan_ready(chan);
4632 }
4633}
4634
4635static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4636 struct hci_chan *hchan)
4637{
4638 chan->hs_hcon = hchan->conn;
4639 chan->hs_hcon->l2cap_data = chan->conn;
4640
4641 BT_DBG("move_state %d", chan->move_state);
4642
4643 switch (chan->move_state) {
4644 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4645 /* Move confirm will be sent after a success
4646 * response is received
4647 */
4648 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4649 break;
4650 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4652 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4653 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4654 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4655 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4656 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4657 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4658 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4659 }
4660 break;
4661 default:
4662 /* Move was not in expected state, free the channel */
4663 __release_logical_link(chan);
4664
4665 chan->move_state = L2CAP_MOVE_STABLE;
4666 }
4667}
4668
4669/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004670void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4671 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004672{
Mat Martineau1500109b2012-10-23 15:24:15 -07004673 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4674
4675 if (status) {
4676 l2cap_logical_fail(chan);
4677 __release_logical_link(chan);
4678 return;
4679 }
4680
4681 if (chan->state != BT_CONNECTED) {
4682 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004683 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004684 l2cap_logical_finish_create(chan, hchan);
4685 } else {
4686 l2cap_logical_finish_move(chan, hchan);
4687 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004688}
4689
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004690void l2cap_move_start(struct l2cap_chan *chan)
4691{
4692 BT_DBG("chan %p", chan);
4693
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004694 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004695 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4696 return;
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4699 /* Placeholder - start physical link setup */
4700 } else {
4701 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4702 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4703 chan->move_id = 0;
4704 l2cap_move_setup(chan);
4705 l2cap_send_move_chan_req(chan, 0);
4706 }
4707}
4708
Mat Martineau8eb200b2012-10-23 15:24:17 -07004709static void l2cap_do_create(struct l2cap_chan *chan, int result,
4710 u8 local_amp_id, u8 remote_amp_id)
4711{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004712 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4713 local_amp_id, remote_amp_id);
4714
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004715 chan->fcs = L2CAP_FCS_NONE;
4716
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004717 /* Outgoing channel on AMP */
4718 if (chan->state == BT_CONNECT) {
4719 if (result == L2CAP_CR_SUCCESS) {
4720 chan->local_amp_id = local_amp_id;
4721 l2cap_send_create_chan_req(chan, remote_amp_id);
4722 } else {
4723 /* Revert to BR/EDR connect */
4724 l2cap_send_conn_req(chan);
4725 }
4726
4727 return;
4728 }
4729
4730 /* Incoming channel on AMP */
4731 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004732 struct l2cap_conn_rsp rsp;
4733 char buf[128];
4734 rsp.scid = cpu_to_le16(chan->dcid);
4735 rsp.dcid = cpu_to_le16(chan->scid);
4736
Mat Martineau8eb200b2012-10-23 15:24:17 -07004737 if (result == L2CAP_CR_SUCCESS) {
4738 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004739 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004741 } else {
4742 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004743 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4744 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004745 }
4746
4747 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4748 sizeof(rsp), &rsp);
4749
4750 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004751 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004752 set_bit(CONF_REQ_SENT, &chan->conf_state);
4753 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4754 L2CAP_CONF_REQ,
4755 l2cap_build_conf_req(chan, buf), buf);
4756 chan->num_conf_req++;
4757 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004758 }
4759}
4760
4761static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4762 u8 remote_amp_id)
4763{
4764 l2cap_move_setup(chan);
4765 chan->move_id = local_amp_id;
4766 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4767
4768 l2cap_send_move_chan_req(chan, remote_amp_id);
4769}
4770
4771static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4772{
4773 struct hci_chan *hchan = NULL;
4774
4775 /* Placeholder - get hci_chan for logical link */
4776
4777 if (hchan) {
4778 if (hchan->state == BT_CONNECTED) {
4779 /* Logical link is ready to go */
4780 chan->hs_hcon = hchan->conn;
4781 chan->hs_hcon->l2cap_data = chan->conn;
4782 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4783 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4784
4785 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4786 } else {
4787 /* Wait for logical link to be ready */
4788 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4789 }
4790 } else {
4791 /* Logical link not available */
4792 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4793 }
4794}
4795
4796static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4797{
4798 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4799 u8 rsp_result;
4800 if (result == -EINVAL)
4801 rsp_result = L2CAP_MR_BAD_ID;
4802 else
4803 rsp_result = L2CAP_MR_NOT_ALLOWED;
4804
4805 l2cap_send_move_chan_rsp(chan, rsp_result);
4806 }
4807
4808 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4809 chan->move_state = L2CAP_MOVE_STABLE;
4810
4811 /* Restart data transmission */
4812 l2cap_ertm_send(chan);
4813}
4814
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004815/* Invoke with locked chan */
4816void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004817{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004818 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004819 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004820
Mat Martineau8eb200b2012-10-23 15:24:17 -07004821 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4822 chan, result, local_amp_id, remote_amp_id);
4823
Mat Martineau8eb200b2012-10-23 15:24:17 -07004824 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4825 l2cap_chan_unlock(chan);
4826 return;
4827 }
4828
4829 if (chan->state != BT_CONNECTED) {
4830 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4831 } else if (result != L2CAP_MR_SUCCESS) {
4832 l2cap_do_move_cancel(chan, result);
4833 } else {
4834 switch (chan->move_role) {
4835 case L2CAP_MOVE_ROLE_INITIATOR:
4836 l2cap_do_move_initiate(chan, local_amp_id,
4837 remote_amp_id);
4838 break;
4839 case L2CAP_MOVE_ROLE_RESPONDER:
4840 l2cap_do_move_respond(chan, result);
4841 break;
4842 default:
4843 l2cap_do_move_cancel(chan, result);
4844 break;
4845 }
4846 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004847}
4848
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004849static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004850 struct l2cap_cmd_hdr *cmd,
4851 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004852{
4853 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004854 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004855 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004856 u16 icid = 0;
4857 u16 result = L2CAP_MR_NOT_ALLOWED;
4858
4859 if (cmd_len != sizeof(*req))
4860 return -EPROTO;
4861
4862 icid = le16_to_cpu(req->icid);
4863
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004864 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004865
Marcel Holtmann848566b2013-10-01 22:59:22 -07004866 if (!conn->hs_enabled)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004867 return -EINVAL;
4868
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004869 chan = l2cap_get_chan_by_dcid(conn, icid);
4870 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004871 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004872 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004873 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4874 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004875 return 0;
4876 }
4877
Mat Martineau1500109b2012-10-23 15:24:15 -07004878 chan->ident = cmd->ident;
4879
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004880 if (chan->scid < L2CAP_CID_DYN_START ||
4881 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4882 (chan->mode != L2CAP_MODE_ERTM &&
4883 chan->mode != L2CAP_MODE_STREAMING)) {
4884 result = L2CAP_MR_NOT_ALLOWED;
4885 goto send_move_response;
4886 }
4887
4888 if (chan->local_amp_id == req->dest_amp_id) {
4889 result = L2CAP_MR_SAME_ID;
4890 goto send_move_response;
4891 }
4892
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004893 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004894 struct hci_dev *hdev;
4895 hdev = hci_dev_get(req->dest_amp_id);
4896 if (!hdev || hdev->dev_type != HCI_AMP ||
4897 !test_bit(HCI_UP, &hdev->flags)) {
4898 if (hdev)
4899 hci_dev_put(hdev);
4900
4901 result = L2CAP_MR_BAD_ID;
4902 goto send_move_response;
4903 }
4904 hci_dev_put(hdev);
4905 }
4906
4907 /* Detect a move collision. Only send a collision response
4908 * if this side has "lost", otherwise proceed with the move.
4909 * The winner has the larger bd_addr.
4910 */
4911 if ((__chan_is_moving(chan) ||
4912 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004913 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004914 result = L2CAP_MR_COLLISION;
4915 goto send_move_response;
4916 }
4917
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004918 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4919 l2cap_move_setup(chan);
4920 chan->move_id = req->dest_amp_id;
4921 icid = chan->dcid;
4922
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004923 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004924 /* Moving to BR/EDR */
4925 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4926 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4927 result = L2CAP_MR_PEND;
4928 } else {
4929 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4930 result = L2CAP_MR_SUCCESS;
4931 }
4932 } else {
4933 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4934 /* Placeholder - uncomment when amp functions are available */
4935 /*amp_accept_physical(chan, req->dest_amp_id);*/
4936 result = L2CAP_MR_PEND;
4937 }
4938
4939send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004940 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004941
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004942 l2cap_chan_unlock(chan);
4943
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004944 return 0;
4945}
4946
Mat Martineau5b155ef2012-10-23 15:24:14 -07004947static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4948{
4949 struct l2cap_chan *chan;
4950 struct hci_chan *hchan = NULL;
4951
4952 chan = l2cap_get_chan_by_scid(conn, icid);
4953 if (!chan) {
4954 l2cap_send_move_chan_cfm_icid(conn, icid);
4955 return;
4956 }
4957
4958 __clear_chan_timer(chan);
4959 if (result == L2CAP_MR_PEND)
4960 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4961
4962 switch (chan->move_state) {
4963 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4964 /* Move confirm will be sent when logical link
4965 * is complete.
4966 */
4967 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4968 break;
4969 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4970 if (result == L2CAP_MR_PEND) {
4971 break;
4972 } else if (test_bit(CONN_LOCAL_BUSY,
4973 &chan->conn_state)) {
4974 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4975 } else {
4976 /* Logical link is up or moving to BR/EDR,
4977 * proceed with move
4978 */
4979 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4980 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4981 }
4982 break;
4983 case L2CAP_MOVE_WAIT_RSP:
4984 /* Moving to AMP */
4985 if (result == L2CAP_MR_SUCCESS) {
4986 /* Remote is ready, send confirm immediately
4987 * after logical link is ready
4988 */
4989 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4990 } else {
4991 /* Both logical link and move success
4992 * are required to confirm
4993 */
4994 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4995 }
4996
4997 /* Placeholder - get hci_chan for logical link */
4998 if (!hchan) {
4999 /* Logical link not available */
5000 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5001 break;
5002 }
5003
5004 /* If the logical link is not yet connected, do not
5005 * send confirmation.
5006 */
5007 if (hchan->state != BT_CONNECTED)
5008 break;
5009
5010 /* Logical link is already ready to go */
5011
5012 chan->hs_hcon = hchan->conn;
5013 chan->hs_hcon->l2cap_data = chan->conn;
5014
5015 if (result == L2CAP_MR_SUCCESS) {
5016 /* Can confirm now */
5017 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5018 } else {
5019 /* Now only need move success
5020 * to confirm
5021 */
5022 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5023 }
5024
5025 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5026 break;
5027 default:
5028 /* Any other amp move state means the move failed. */
5029 chan->move_id = chan->local_amp_id;
5030 l2cap_move_done(chan);
5031 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5032 }
5033
5034 l2cap_chan_unlock(chan);
5035}
5036
5037static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5038 u16 result)
5039{
5040 struct l2cap_chan *chan;
5041
5042 chan = l2cap_get_chan_by_ident(conn, ident);
5043 if (!chan) {
5044 /* Could not locate channel, icid is best guess */
5045 l2cap_send_move_chan_cfm_icid(conn, icid);
5046 return;
5047 }
5048
5049 __clear_chan_timer(chan);
5050
5051 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5052 if (result == L2CAP_MR_COLLISION) {
5053 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5054 } else {
5055 /* Cleanup - cancel move */
5056 chan->move_id = chan->local_amp_id;
5057 l2cap_move_done(chan);
5058 }
5059 }
5060
5061 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5062
5063 l2cap_chan_unlock(chan);
5064}
5065
5066static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5067 struct l2cap_cmd_hdr *cmd,
5068 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005069{
5070 struct l2cap_move_chan_rsp *rsp = data;
5071 u16 icid, result;
5072
5073 if (cmd_len != sizeof(*rsp))
5074 return -EPROTO;
5075
5076 icid = le16_to_cpu(rsp->icid);
5077 result = le16_to_cpu(rsp->result);
5078
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005079 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005080
Mat Martineau5b155ef2012-10-23 15:24:14 -07005081 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5082 l2cap_move_continue(conn, icid, result);
5083 else
5084 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005085
5086 return 0;
5087}
5088
Mat Martineau5f3847a2012-10-23 15:24:12 -07005089static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5090 struct l2cap_cmd_hdr *cmd,
5091 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005092{
5093 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005094 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005095 u16 icid, result;
5096
5097 if (cmd_len != sizeof(*cfm))
5098 return -EPROTO;
5099
5100 icid = le16_to_cpu(cfm->icid);
5101 result = le16_to_cpu(cfm->result);
5102
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005103 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005104
Mat Martineau5f3847a2012-10-23 15:24:12 -07005105 chan = l2cap_get_chan_by_dcid(conn, icid);
5106 if (!chan) {
5107 /* Spec requires a response even if the icid was not found */
5108 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5109 return 0;
5110 }
5111
5112 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5113 if (result == L2CAP_MC_CONFIRMED) {
5114 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005115 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005116 __release_logical_link(chan);
5117 } else {
5118 chan->move_id = chan->local_amp_id;
5119 }
5120
5121 l2cap_move_done(chan);
5122 }
5123
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005124 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5125
Mat Martineau5f3847a2012-10-23 15:24:12 -07005126 l2cap_chan_unlock(chan);
5127
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005128 return 0;
5129}
5130
5131static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005132 struct l2cap_cmd_hdr *cmd,
5133 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005134{
5135 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005136 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005137 u16 icid;
5138
5139 if (cmd_len != sizeof(*rsp))
5140 return -EPROTO;
5141
5142 icid = le16_to_cpu(rsp->icid);
5143
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005144 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005145
Mat Martineau3fd71a02012-10-23 15:24:16 -07005146 chan = l2cap_get_chan_by_scid(conn, icid);
5147 if (!chan)
5148 return 0;
5149
5150 __clear_chan_timer(chan);
5151
5152 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5153 chan->local_amp_id = chan->move_id;
5154
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005155 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005156 __release_logical_link(chan);
5157
5158 l2cap_move_done(chan);
5159 }
5160
5161 l2cap_chan_unlock(chan);
5162
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005163 return 0;
5164}
5165
Claudio Takahaside731152011-02-11 19:28:55 -02005166static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005167 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005168 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005169{
5170 struct hci_conn *hcon = conn->hcon;
5171 struct l2cap_conn_param_update_req *req;
5172 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005173 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005174 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005175
Johan Hedberg40bef302014-07-16 11:42:27 +03005176 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005177 return -EINVAL;
5178
Claudio Takahaside731152011-02-11 19:28:55 -02005179 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5180 return -EPROTO;
5181
5182 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005183 min = __le16_to_cpu(req->min);
5184 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005185 latency = __le16_to_cpu(req->latency);
5186 to_multiplier = __le16_to_cpu(req->to_multiplier);
5187
5188 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005189 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005190
5191 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005192
Andre Guedesd4905f22014-06-25 21:52:52 -03005193 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005194 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005195 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005196 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005197 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005198
5199 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005200 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005201
Andre Guedesffb5a8272014-07-01 18:10:11 -03005202 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005203 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005204
Johan Hedbergf4869e22014-07-02 17:37:32 +03005205 store_hint = hci_le_conn_update(hcon, min, max, latency,
5206 to_multiplier);
5207 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5208 store_hint, min, max, latency,
5209 to_multiplier);
5210
Andre Guedesffb5a8272014-07-01 18:10:11 -03005211 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005212
Claudio Takahaside731152011-02-11 19:28:55 -02005213 return 0;
5214}
5215
Johan Hedbergf1496de2013-05-13 14:15:56 +03005216static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5217 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5218 u8 *data)
5219{
5220 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5221 u16 dcid, mtu, mps, credits, result;
5222 struct l2cap_chan *chan;
5223 int err;
5224
5225 if (cmd_len < sizeof(*rsp))
5226 return -EPROTO;
5227
5228 dcid = __le16_to_cpu(rsp->dcid);
5229 mtu = __le16_to_cpu(rsp->mtu);
5230 mps = __le16_to_cpu(rsp->mps);
5231 credits = __le16_to_cpu(rsp->credits);
5232 result = __le16_to_cpu(rsp->result);
5233
5234 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5235 return -EPROTO;
5236
5237 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5238 dcid, mtu, mps, credits, result);
5239
5240 mutex_lock(&conn->chan_lock);
5241
5242 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5243 if (!chan) {
5244 err = -EBADSLT;
5245 goto unlock;
5246 }
5247
5248 err = 0;
5249
5250 l2cap_chan_lock(chan);
5251
5252 switch (result) {
5253 case L2CAP_CR_SUCCESS:
5254 chan->ident = 0;
5255 chan->dcid = dcid;
5256 chan->omtu = mtu;
5257 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005258 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005259 l2cap_chan_ready(chan);
5260 break;
5261
5262 default:
5263 l2cap_chan_del(chan, ECONNREFUSED);
5264 break;
5265 }
5266
5267 l2cap_chan_unlock(chan);
5268
5269unlock:
5270 mutex_unlock(&conn->chan_lock);
5271
5272 return err;
5273}
5274
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005275static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005276 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5277 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005278{
5279 int err = 0;
5280
5281 switch (cmd->code) {
5282 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005283 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005284 break;
5285
5286 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005287 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005288 break;
5289
5290 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005291 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005292 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005293 break;
5294
5295 case L2CAP_CONF_REQ:
5296 err = l2cap_config_req(conn, cmd, cmd_len, data);
5297 break;
5298
5299 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005300 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005301 break;
5302
5303 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005304 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005305 break;
5306
5307 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005308 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005309 break;
5310
5311 case L2CAP_ECHO_REQ:
5312 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5313 break;
5314
5315 case L2CAP_ECHO_RSP:
5316 break;
5317
5318 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005319 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005320 break;
5321
5322 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005323 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005324 break;
5325
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005326 case L2CAP_CREATE_CHAN_REQ:
5327 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5328 break;
5329
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005330 case L2CAP_MOVE_CHAN_REQ:
5331 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5332 break;
5333
5334 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005335 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005336 break;
5337
5338 case L2CAP_MOVE_CHAN_CFM:
5339 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5340 break;
5341
5342 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005343 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005344 break;
5345
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005346 default:
5347 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5348 err = -EINVAL;
5349 break;
5350 }
5351
5352 return err;
5353}
5354
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005355static int l2cap_le_connect_req(struct l2cap_conn *conn,
5356 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5357 u8 *data)
5358{
5359 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5360 struct l2cap_le_conn_rsp rsp;
5361 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005362 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005363 __le16 psm;
5364 u8 result;
5365
5366 if (cmd_len != sizeof(*req))
5367 return -EPROTO;
5368
5369 scid = __le16_to_cpu(req->scid);
5370 mtu = __le16_to_cpu(req->mtu);
5371 mps = __le16_to_cpu(req->mps);
5372 psm = req->psm;
5373 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005374 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005375
5376 if (mtu < 23 || mps < 23)
5377 return -EPROTO;
5378
5379 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5380 scid, mtu, mps);
5381
5382 /* Check if we have socket listening on psm */
5383 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5384 &conn->hcon->dst, LE_LINK);
5385 if (!pchan) {
5386 result = L2CAP_CR_BAD_PSM;
5387 chan = NULL;
5388 goto response;
5389 }
5390
5391 mutex_lock(&conn->chan_lock);
5392 l2cap_chan_lock(pchan);
5393
5394 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5395 result = L2CAP_CR_AUTHENTICATION;
5396 chan = NULL;
5397 goto response_unlock;
5398 }
5399
5400 /* Check if we already have channel with that dcid */
5401 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5402 result = L2CAP_CR_NO_MEM;
5403 chan = NULL;
5404 goto response_unlock;
5405 }
5406
5407 chan = pchan->ops->new_connection(pchan);
5408 if (!chan) {
5409 result = L2CAP_CR_NO_MEM;
5410 goto response_unlock;
5411 }
5412
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005413 l2cap_le_flowctl_init(chan);
5414
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005415 bacpy(&chan->src, &conn->hcon->src);
5416 bacpy(&chan->dst, &conn->hcon->dst);
5417 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5418 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5419 chan->psm = psm;
5420 chan->dcid = scid;
5421 chan->omtu = mtu;
5422 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005423 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005424
5425 __l2cap_chan_add(conn, chan);
5426 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005427 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005428
5429 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5430
5431 chan->ident = cmd->ident;
5432
5433 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5434 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005435 /* The following result value is actually not defined
5436 * for LE CoC but we use it to let the function know
5437 * that it should bail out after doing its cleanup
5438 * instead of sending a response.
5439 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005440 result = L2CAP_CR_PEND;
5441 chan->ops->defer(chan);
5442 } else {
5443 l2cap_chan_ready(chan);
5444 result = L2CAP_CR_SUCCESS;
5445 }
5446
5447response_unlock:
5448 l2cap_chan_unlock(pchan);
5449 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005450 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005451
5452 if (result == L2CAP_CR_PEND)
5453 return 0;
5454
5455response:
5456 if (chan) {
5457 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005458 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005459 } else {
5460 rsp.mtu = 0;
5461 rsp.mps = 0;
5462 }
5463
5464 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005465 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005466 rsp.result = cpu_to_le16(result);
5467
5468 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5469
5470 return 0;
5471}
5472
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005473static inline int l2cap_le_credits(struct l2cap_conn *conn,
5474 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5475 u8 *data)
5476{
5477 struct l2cap_le_credits *pkt;
5478 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005479 u16 cid, credits, max_credits;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005480
5481 if (cmd_len != sizeof(*pkt))
5482 return -EPROTO;
5483
5484 pkt = (struct l2cap_le_credits *) data;
5485 cid = __le16_to_cpu(pkt->cid);
5486 credits = __le16_to_cpu(pkt->credits);
5487
5488 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5489
5490 chan = l2cap_get_chan_by_dcid(conn, cid);
5491 if (!chan)
5492 return -EBADSLT;
5493
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005494 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5495 if (credits > max_credits) {
5496 BT_ERR("LE credits overflow");
5497 l2cap_send_disconn_req(chan, ECONNRESET);
Martin Townsendee930532014-10-13 19:24:45 +01005498 l2cap_chan_unlock(chan);
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005499
5500 /* Return 0 so that we don't trigger an unnecessary
5501 * command reject packet.
5502 */
5503 return 0;
5504 }
5505
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005506 chan->tx_credits += credits;
5507
5508 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5509 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5510 chan->tx_credits--;
5511 }
5512
5513 if (chan->tx_credits)
5514 chan->ops->resume(chan);
5515
5516 l2cap_chan_unlock(chan);
5517
5518 return 0;
5519}
5520
Johan Hedberg71fb4192013-12-10 10:52:48 +02005521static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5522 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5523 u8 *data)
5524{
5525 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5526 struct l2cap_chan *chan;
5527
5528 if (cmd_len < sizeof(*rej))
5529 return -EPROTO;
5530
5531 mutex_lock(&conn->chan_lock);
5532
5533 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5534 if (!chan)
5535 goto done;
5536
5537 l2cap_chan_lock(chan);
5538 l2cap_chan_del(chan, ECONNREFUSED);
5539 l2cap_chan_unlock(chan);
5540
5541done:
5542 mutex_unlock(&conn->chan_lock);
5543 return 0;
5544}
5545
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005546static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005547 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5548 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005549{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005550 int err = 0;
5551
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005552 switch (cmd->code) {
5553 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005554 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005555 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005556
5557 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005558 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5559 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005560
5561 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005562 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005563
Johan Hedbergf1496de2013-05-13 14:15:56 +03005564 case L2CAP_LE_CONN_RSP:
5565 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005566 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005567
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005568 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005569 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5570 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005571
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005572 case L2CAP_LE_CREDITS:
5573 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5574 break;
5575
Johan Hedberg3defe012013-05-15 10:16:06 +03005576 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005577 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5578 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005579
5580 case L2CAP_DISCONN_RSP:
5581 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005582 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005583
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005584 default:
5585 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005586 err = -EINVAL;
5587 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005588 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005589
5590 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005591}
5592
Johan Hedbergc5623552013-04-29 19:35:33 +03005593static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5594 struct sk_buff *skb)
5595{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005596 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005597 struct l2cap_cmd_hdr *cmd;
5598 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005599 int err;
5600
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005601 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005602 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005603
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005604 if (skb->len < L2CAP_CMD_HDR_SIZE)
5605 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005606
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005607 cmd = (void *) skb->data;
5608 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005609
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005610 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005611
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005612 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005613
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005614 if (len != skb->len || !cmd->ident) {
5615 BT_DBG("corrupted command");
5616 goto drop;
5617 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005618
Johan Hedberg203e6392013-05-15 10:07:15 +03005619 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005620 if (err) {
5621 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005622
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005623 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005624
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005625 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005626 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5627 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005628 }
5629
Marcel Holtmann3b166292013-10-02 08:28:21 -07005630drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005631 kfree_skb(skb);
5632}
5633
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005634static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005635 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005636{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005637 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005638 u8 *data = skb->data;
5639 int len = skb->len;
5640 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005641 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642
5643 l2cap_raw_recv(conn, skb);
5644
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005645 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005646 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005647
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005649 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5651 data += L2CAP_CMD_HDR_SIZE;
5652 len -= L2CAP_CMD_HDR_SIZE;
5653
Al Viro88219a02007-07-29 00:17:25 -07005654 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655
Gustavo Padovan2d792812012-10-06 10:07:01 +01005656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5657 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658
Al Viro88219a02007-07-29 00:17:25 -07005659 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660 BT_DBG("corrupted command");
5661 break;
5662 }
5663
Johan Hedbergc5623552013-04-29 19:35:33 +03005664 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005665 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005666 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005667
5668 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005669
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005670 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005671 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5672 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 }
5674
Al Viro88219a02007-07-29 00:17:25 -07005675 data += cmd_len;
5676 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005677 }
5678
Marcel Holtmann3b166292013-10-02 08:28:21 -07005679drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005680 kfree_skb(skb);
5681}
5682
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005683static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005684{
5685 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005686 int hdr_size;
5687
5688 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5689 hdr_size = L2CAP_EXT_HDR_SIZE;
5690 else
5691 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005692
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005693 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005694 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005695 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5696 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5697
5698 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005699 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005700 }
5701 return 0;
5702}
5703
Mat Martineau6ea00482012-05-17 20:53:52 -07005704static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005705{
Mat Martineaue31f7632012-05-17 20:53:41 -07005706 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005707
Mat Martineaue31f7632012-05-17 20:53:41 -07005708 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005709
Mat Martineaue31f7632012-05-17 20:53:41 -07005710 memset(&control, 0, sizeof(control));
5711 control.sframe = 1;
5712 control.final = 1;
5713 control.reqseq = chan->buffer_seq;
5714 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005715
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005716 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005717 control.super = L2CAP_SUPER_RNR;
5718 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005719 }
5720
Mat Martineaue31f7632012-05-17 20:53:41 -07005721 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5722 chan->unacked_frames > 0)
5723 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005724
Mat Martineaue31f7632012-05-17 20:53:41 -07005725 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005726 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005727
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005728 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005729 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5730 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5731 * send it now.
5732 */
5733 control.super = L2CAP_SUPER_RR;
5734 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005735 }
5736}
5737
Gustavo Padovan2d792812012-10-06 10:07:01 +01005738static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5739 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005740{
Mat Martineau84084a32011-07-22 14:54:00 -07005741 /* skb->len reflects data in skb as well as all fragments
5742 * skb->data_len reflects only data in fragments
5743 */
5744 if (!skb_has_frag_list(skb))
5745 skb_shinfo(skb)->frag_list = new_frag;
5746
5747 new_frag->next = NULL;
5748
5749 (*last_frag)->next = new_frag;
5750 *last_frag = new_frag;
5751
5752 skb->len += new_frag->len;
5753 skb->data_len += new_frag->len;
5754 skb->truesize += new_frag->truesize;
5755}
5756
Mat Martineau4b51dae92012-05-17 20:53:37 -07005757static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5758 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005759{
5760 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005761
Mat Martineau4b51dae92012-05-17 20:53:37 -07005762 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005763 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005764 if (chan->sdu)
5765 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005766
Gustavo Padovan80b98022012-05-27 22:27:51 -03005767 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005768 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005769
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005770 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005771 if (chan->sdu)
5772 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005773
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005774 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005775 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005776
Mat Martineau84084a32011-07-22 14:54:00 -07005777 if (chan->sdu_len > chan->imtu) {
5778 err = -EMSGSIZE;
5779 break;
5780 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005781
Mat Martineau84084a32011-07-22 14:54:00 -07005782 if (skb->len >= chan->sdu_len)
5783 break;
5784
5785 chan->sdu = skb;
5786 chan->sdu_last_frag = skb;
5787
5788 skb = NULL;
5789 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005790 break;
5791
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005792 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005793 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005794 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005795
Mat Martineau84084a32011-07-22 14:54:00 -07005796 append_skb_frag(chan->sdu, skb,
5797 &chan->sdu_last_frag);
5798 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005799
Mat Martineau84084a32011-07-22 14:54:00 -07005800 if (chan->sdu->len >= chan->sdu_len)
5801 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005802
Mat Martineau84084a32011-07-22 14:54:00 -07005803 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005804 break;
5805
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005806 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005807 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005808 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005809
Mat Martineau84084a32011-07-22 14:54:00 -07005810 append_skb_frag(chan->sdu, skb,
5811 &chan->sdu_last_frag);
5812 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005813
Mat Martineau84084a32011-07-22 14:54:00 -07005814 if (chan->sdu->len != chan->sdu_len)
5815 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005816
Gustavo Padovan80b98022012-05-27 22:27:51 -03005817 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005818
Mat Martineau84084a32011-07-22 14:54:00 -07005819 if (!err) {
5820 /* Reassembly complete */
5821 chan->sdu = NULL;
5822 chan->sdu_last_frag = NULL;
5823 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005824 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005825 break;
5826 }
5827
Mat Martineau84084a32011-07-22 14:54:00 -07005828 if (err) {
5829 kfree_skb(skb);
5830 kfree_skb(chan->sdu);
5831 chan->sdu = NULL;
5832 chan->sdu_last_frag = NULL;
5833 chan->sdu_len = 0;
5834 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005835
Mat Martineau84084a32011-07-22 14:54:00 -07005836 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005837}
5838
Mat Martineau32b32732012-10-23 15:24:11 -07005839static int l2cap_resegment(struct l2cap_chan *chan)
5840{
5841 /* Placeholder */
5842 return 0;
5843}
5844
Mat Martineaue3281402011-07-07 09:39:02 -07005845void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132eb2010-06-21 19:39:50 -03005846{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005847 u8 event;
5848
5849 if (chan->mode != L2CAP_MODE_ERTM)
5850 return;
5851
5852 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005853 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005854}
5855
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005856static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5857{
Mat Martineau63838722012-05-17 20:53:45 -07005858 int err = 0;
5859 /* Pass sequential frames to l2cap_reassemble_sdu()
5860 * until a gap is encountered.
5861 */
5862
5863 BT_DBG("chan %p", chan);
5864
5865 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5866 struct sk_buff *skb;
5867 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5868 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5869
5870 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5871
5872 if (!skb)
5873 break;
5874
5875 skb_unlink(skb, &chan->srej_q);
5876 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5877 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5878 if (err)
5879 break;
5880 }
5881
5882 if (skb_queue_empty(&chan->srej_q)) {
5883 chan->rx_state = L2CAP_RX_STATE_RECV;
5884 l2cap_send_ack(chan);
5885 }
5886
5887 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005888}
5889
5890static void l2cap_handle_srej(struct l2cap_chan *chan,
5891 struct l2cap_ctrl *control)
5892{
Mat Martineauf80842a2012-05-17 20:53:46 -07005893 struct sk_buff *skb;
5894
5895 BT_DBG("chan %p, control %p", chan, control);
5896
5897 if (control->reqseq == chan->next_tx_seq) {
5898 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005899 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005900 return;
5901 }
5902
5903 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5904
5905 if (skb == NULL) {
5906 BT_DBG("Seq %d not available for retransmission",
5907 control->reqseq);
5908 return;
5909 }
5910
5911 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5912 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005913 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005914 return;
5915 }
5916
5917 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5918
5919 if (control->poll) {
5920 l2cap_pass_to_tx(chan, control);
5921
5922 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5923 l2cap_retransmit(chan, control);
5924 l2cap_ertm_send(chan);
5925
5926 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5927 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5928 chan->srej_save_reqseq = control->reqseq;
5929 }
5930 } else {
5931 l2cap_pass_to_tx_fbit(chan, control);
5932
5933 if (control->final) {
5934 if (chan->srej_save_reqseq != control->reqseq ||
5935 !test_and_clear_bit(CONN_SREJ_ACT,
5936 &chan->conn_state))
5937 l2cap_retransmit(chan, control);
5938 } else {
5939 l2cap_retransmit(chan, control);
5940 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5941 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5942 chan->srej_save_reqseq = control->reqseq;
5943 }
5944 }
5945 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005946}
5947
5948static void l2cap_handle_rej(struct l2cap_chan *chan,
5949 struct l2cap_ctrl *control)
5950{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005951 struct sk_buff *skb;
5952
5953 BT_DBG("chan %p, control %p", chan, control);
5954
5955 if (control->reqseq == chan->next_tx_seq) {
5956 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005957 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005958 return;
5959 }
5960
5961 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5962
5963 if (chan->max_tx && skb &&
5964 bt_cb(skb)->control.retries >= chan->max_tx) {
5965 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005966 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005967 return;
5968 }
5969
5970 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5971
5972 l2cap_pass_to_tx(chan, control);
5973
5974 if (control->final) {
5975 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5976 l2cap_retransmit_all(chan, control);
5977 } else {
5978 l2cap_retransmit_all(chan, control);
5979 l2cap_ertm_send(chan);
5980 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5981 set_bit(CONN_REJ_ACT, &chan->conn_state);
5982 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005983}
5984
Mat Martineau4b51dae92012-05-17 20:53:37 -07005985static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5986{
5987 BT_DBG("chan %p, txseq %d", chan, txseq);
5988
5989 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5990 chan->expected_tx_seq);
5991
5992 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5993 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01005994 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07005995 /* See notes below regarding "double poll" and
5996 * invalid packets.
5997 */
5998 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5999 BT_DBG("Invalid/Ignore - after SREJ");
6000 return L2CAP_TXSEQ_INVALID_IGNORE;
6001 } else {
6002 BT_DBG("Invalid - in window after SREJ sent");
6003 return L2CAP_TXSEQ_INVALID;
6004 }
6005 }
6006
6007 if (chan->srej_list.head == txseq) {
6008 BT_DBG("Expected SREJ");
6009 return L2CAP_TXSEQ_EXPECTED_SREJ;
6010 }
6011
6012 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6013 BT_DBG("Duplicate SREJ - txseq already stored");
6014 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6015 }
6016
6017 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6018 BT_DBG("Unexpected SREJ - not requested");
6019 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6020 }
6021 }
6022
6023 if (chan->expected_tx_seq == txseq) {
6024 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6025 chan->tx_win) {
6026 BT_DBG("Invalid - txseq outside tx window");
6027 return L2CAP_TXSEQ_INVALID;
6028 } else {
6029 BT_DBG("Expected");
6030 return L2CAP_TXSEQ_EXPECTED;
6031 }
6032 }
6033
6034 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006035 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006036 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6037 return L2CAP_TXSEQ_DUPLICATE;
6038 }
6039
6040 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6041 /* A source of invalid packets is a "double poll" condition,
6042 * where delays cause us to send multiple poll packets. If
6043 * the remote stack receives and processes both polls,
6044 * sequence numbers can wrap around in such a way that a
6045 * resent frame has a sequence number that looks like new data
6046 * with a sequence gap. This would trigger an erroneous SREJ
6047 * request.
6048 *
6049 * Fortunately, this is impossible with a tx window that's
6050 * less than half of the maximum sequence number, which allows
6051 * invalid frames to be safely ignored.
6052 *
6053 * With tx window sizes greater than half of the tx window
6054 * maximum, the frame is invalid and cannot be ignored. This
6055 * causes a disconnect.
6056 */
6057
6058 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6059 BT_DBG("Invalid/Ignore - txseq outside tx window");
6060 return L2CAP_TXSEQ_INVALID_IGNORE;
6061 } else {
6062 BT_DBG("Invalid - txseq outside tx window");
6063 return L2CAP_TXSEQ_INVALID;
6064 }
6065 } else {
6066 BT_DBG("Unexpected - txseq indicates missing frames");
6067 return L2CAP_TXSEQ_UNEXPECTED;
6068 }
6069}
6070
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006071static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6072 struct l2cap_ctrl *control,
6073 struct sk_buff *skb, u8 event)
6074{
6075 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006076 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006077
6078 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6079 event);
6080
6081 switch (event) {
6082 case L2CAP_EV_RECV_IFRAME:
6083 switch (l2cap_classify_txseq(chan, control->txseq)) {
6084 case L2CAP_TXSEQ_EXPECTED:
6085 l2cap_pass_to_tx(chan, control);
6086
6087 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6088 BT_DBG("Busy, discarding expected seq %d",
6089 control->txseq);
6090 break;
6091 }
6092
6093 chan->expected_tx_seq = __next_seq(chan,
6094 control->txseq);
6095
6096 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006097 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006098
6099 err = l2cap_reassemble_sdu(chan, skb, control);
6100 if (err)
6101 break;
6102
6103 if (control->final) {
6104 if (!test_and_clear_bit(CONN_REJ_ACT,
6105 &chan->conn_state)) {
6106 control->final = 0;
6107 l2cap_retransmit_all(chan, control);
6108 l2cap_ertm_send(chan);
6109 }
6110 }
6111
6112 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6113 l2cap_send_ack(chan);
6114 break;
6115 case L2CAP_TXSEQ_UNEXPECTED:
6116 l2cap_pass_to_tx(chan, control);
6117
6118 /* Can't issue SREJ frames in the local busy state.
6119 * Drop this frame, it will be seen as missing
6120 * when local busy is exited.
6121 */
6122 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6123 BT_DBG("Busy, discarding unexpected seq %d",
6124 control->txseq);
6125 break;
6126 }
6127
6128 /* There was a gap in the sequence, so an SREJ
6129 * must be sent for each missing frame. The
6130 * current frame is stored for later use.
6131 */
6132 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006133 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006134 BT_DBG("Queued %p (queue len %d)", skb,
6135 skb_queue_len(&chan->srej_q));
6136
6137 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6138 l2cap_seq_list_clear(&chan->srej_list);
6139 l2cap_send_srej(chan, control->txseq);
6140
6141 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6142 break;
6143 case L2CAP_TXSEQ_DUPLICATE:
6144 l2cap_pass_to_tx(chan, control);
6145 break;
6146 case L2CAP_TXSEQ_INVALID_IGNORE:
6147 break;
6148 case L2CAP_TXSEQ_INVALID:
6149 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006150 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006151 break;
6152 }
6153 break;
6154 case L2CAP_EV_RECV_RR:
6155 l2cap_pass_to_tx(chan, control);
6156 if (control->final) {
6157 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6158
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006159 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6160 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006161 control->final = 0;
6162 l2cap_retransmit_all(chan, control);
6163 }
6164
6165 l2cap_ertm_send(chan);
6166 } else if (control->poll) {
6167 l2cap_send_i_or_rr_or_rnr(chan);
6168 } else {
6169 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6170 &chan->conn_state) &&
6171 chan->unacked_frames)
6172 __set_retrans_timer(chan);
6173
6174 l2cap_ertm_send(chan);
6175 }
6176 break;
6177 case L2CAP_EV_RECV_RNR:
6178 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6179 l2cap_pass_to_tx(chan, control);
6180 if (control && control->poll) {
6181 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6182 l2cap_send_rr_or_rnr(chan, 0);
6183 }
6184 __clear_retrans_timer(chan);
6185 l2cap_seq_list_clear(&chan->retrans_list);
6186 break;
6187 case L2CAP_EV_RECV_REJ:
6188 l2cap_handle_rej(chan, control);
6189 break;
6190 case L2CAP_EV_RECV_SREJ:
6191 l2cap_handle_srej(chan, control);
6192 break;
6193 default:
6194 break;
6195 }
6196
6197 if (skb && !skb_in_use) {
6198 BT_DBG("Freeing %p", skb);
6199 kfree_skb(skb);
6200 }
6201
6202 return err;
6203}
6204
6205static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6206 struct l2cap_ctrl *control,
6207 struct sk_buff *skb, u8 event)
6208{
6209 int err = 0;
6210 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006211 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006212
6213 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6214 event);
6215
6216 switch (event) {
6217 case L2CAP_EV_RECV_IFRAME:
6218 switch (l2cap_classify_txseq(chan, txseq)) {
6219 case L2CAP_TXSEQ_EXPECTED:
6220 /* Keep frame for reassembly later */
6221 l2cap_pass_to_tx(chan, control);
6222 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006223 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006224 BT_DBG("Queued %p (queue len %d)", skb,
6225 skb_queue_len(&chan->srej_q));
6226
6227 chan->expected_tx_seq = __next_seq(chan, txseq);
6228 break;
6229 case L2CAP_TXSEQ_EXPECTED_SREJ:
6230 l2cap_seq_list_pop(&chan->srej_list);
6231
6232 l2cap_pass_to_tx(chan, control);
6233 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006234 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006235 BT_DBG("Queued %p (queue len %d)", skb,
6236 skb_queue_len(&chan->srej_q));
6237
6238 err = l2cap_rx_queued_iframes(chan);
6239 if (err)
6240 break;
6241
6242 break;
6243 case L2CAP_TXSEQ_UNEXPECTED:
6244 /* Got a frame that can't be reassembled yet.
6245 * Save it for later, and send SREJs to cover
6246 * the missing frames.
6247 */
6248 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006249 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006250 BT_DBG("Queued %p (queue len %d)", skb,
6251 skb_queue_len(&chan->srej_q));
6252
6253 l2cap_pass_to_tx(chan, control);
6254 l2cap_send_srej(chan, control->txseq);
6255 break;
6256 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6257 /* This frame was requested with an SREJ, but
6258 * some expected retransmitted frames are
6259 * missing. Request retransmission of missing
6260 * SREJ'd frames.
6261 */
6262 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006263 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006264 BT_DBG("Queued %p (queue len %d)", skb,
6265 skb_queue_len(&chan->srej_q));
6266
6267 l2cap_pass_to_tx(chan, control);
6268 l2cap_send_srej_list(chan, control->txseq);
6269 break;
6270 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6271 /* We've already queued this frame. Drop this copy. */
6272 l2cap_pass_to_tx(chan, control);
6273 break;
6274 case L2CAP_TXSEQ_DUPLICATE:
6275 /* Expecting a later sequence number, so this frame
6276 * was already received. Ignore it completely.
6277 */
6278 break;
6279 case L2CAP_TXSEQ_INVALID_IGNORE:
6280 break;
6281 case L2CAP_TXSEQ_INVALID:
6282 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006283 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006284 break;
6285 }
6286 break;
6287 case L2CAP_EV_RECV_RR:
6288 l2cap_pass_to_tx(chan, control);
6289 if (control->final) {
6290 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6291
6292 if (!test_and_clear_bit(CONN_REJ_ACT,
6293 &chan->conn_state)) {
6294 control->final = 0;
6295 l2cap_retransmit_all(chan, control);
6296 }
6297
6298 l2cap_ertm_send(chan);
6299 } else if (control->poll) {
6300 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6301 &chan->conn_state) &&
6302 chan->unacked_frames) {
6303 __set_retrans_timer(chan);
6304 }
6305
6306 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6307 l2cap_send_srej_tail(chan);
6308 } else {
6309 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6310 &chan->conn_state) &&
6311 chan->unacked_frames)
6312 __set_retrans_timer(chan);
6313
6314 l2cap_send_ack(chan);
6315 }
6316 break;
6317 case L2CAP_EV_RECV_RNR:
6318 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6319 l2cap_pass_to_tx(chan, control);
6320 if (control->poll) {
6321 l2cap_send_srej_tail(chan);
6322 } else {
6323 struct l2cap_ctrl rr_control;
6324 memset(&rr_control, 0, sizeof(rr_control));
6325 rr_control.sframe = 1;
6326 rr_control.super = L2CAP_SUPER_RR;
6327 rr_control.reqseq = chan->buffer_seq;
6328 l2cap_send_sframe(chan, &rr_control);
6329 }
6330
6331 break;
6332 case L2CAP_EV_RECV_REJ:
6333 l2cap_handle_rej(chan, control);
6334 break;
6335 case L2CAP_EV_RECV_SREJ:
6336 l2cap_handle_srej(chan, control);
6337 break;
6338 }
6339
6340 if (skb && !skb_in_use) {
6341 BT_DBG("Freeing %p", skb);
6342 kfree_skb(skb);
6343 }
6344
6345 return err;
6346}
6347
Mat Martineau32b32732012-10-23 15:24:11 -07006348static int l2cap_finish_move(struct l2cap_chan *chan)
6349{
6350 BT_DBG("chan %p", chan);
6351
6352 chan->rx_state = L2CAP_RX_STATE_RECV;
6353
6354 if (chan->hs_hcon)
6355 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6356 else
6357 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6358
6359 return l2cap_resegment(chan);
6360}
6361
6362static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6363 struct l2cap_ctrl *control,
6364 struct sk_buff *skb, u8 event)
6365{
6366 int err;
6367
6368 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6369 event);
6370
6371 if (!control->poll)
6372 return -EPROTO;
6373
6374 l2cap_process_reqseq(chan, control->reqseq);
6375
6376 if (!skb_queue_empty(&chan->tx_q))
6377 chan->tx_send_head = skb_peek(&chan->tx_q);
6378 else
6379 chan->tx_send_head = NULL;
6380
6381 /* Rewind next_tx_seq to the point expected
6382 * by the receiver.
6383 */
6384 chan->next_tx_seq = control->reqseq;
6385 chan->unacked_frames = 0;
6386
6387 err = l2cap_finish_move(chan);
6388 if (err)
6389 return err;
6390
6391 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6392 l2cap_send_i_or_rr_or_rnr(chan);
6393
6394 if (event == L2CAP_EV_RECV_IFRAME)
6395 return -EPROTO;
6396
6397 return l2cap_rx_state_recv(chan, control, NULL, event);
6398}
6399
6400static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6401 struct l2cap_ctrl *control,
6402 struct sk_buff *skb, u8 event)
6403{
6404 int err;
6405
6406 if (!control->final)
6407 return -EPROTO;
6408
6409 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6410
6411 chan->rx_state = L2CAP_RX_STATE_RECV;
6412 l2cap_process_reqseq(chan, control->reqseq);
6413
6414 if (!skb_queue_empty(&chan->tx_q))
6415 chan->tx_send_head = skb_peek(&chan->tx_q);
6416 else
6417 chan->tx_send_head = NULL;
6418
6419 /* Rewind next_tx_seq to the point expected
6420 * by the receiver.
6421 */
6422 chan->next_tx_seq = control->reqseq;
6423 chan->unacked_frames = 0;
6424
6425 if (chan->hs_hcon)
6426 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6427 else
6428 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6429
6430 err = l2cap_resegment(chan);
6431
6432 if (!err)
6433 err = l2cap_rx_state_recv(chan, control, skb, event);
6434
6435 return err;
6436}
6437
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006438static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6439{
6440 /* Make sure reqseq is for a packet that has been sent but not acked */
6441 u16 unacked;
6442
6443 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6444 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6445}
6446
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006447static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6448 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006449{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006450 int err = 0;
6451
6452 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6453 control, skb, event, chan->rx_state);
6454
6455 if (__valid_reqseq(chan, control->reqseq)) {
6456 switch (chan->rx_state) {
6457 case L2CAP_RX_STATE_RECV:
6458 err = l2cap_rx_state_recv(chan, control, skb, event);
6459 break;
6460 case L2CAP_RX_STATE_SREJ_SENT:
6461 err = l2cap_rx_state_srej_sent(chan, control, skb,
6462 event);
6463 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006464 case L2CAP_RX_STATE_WAIT_P:
6465 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6466 break;
6467 case L2CAP_RX_STATE_WAIT_F:
6468 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6469 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006470 default:
6471 /* shut it down */
6472 break;
6473 }
6474 } else {
6475 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6476 control->reqseq, chan->next_tx_seq,
6477 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006478 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006479 }
6480
6481 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006482}
6483
6484static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6485 struct sk_buff *skb)
6486{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006487 int err = 0;
6488
6489 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6490 chan->rx_state);
6491
6492 if (l2cap_classify_txseq(chan, control->txseq) ==
6493 L2CAP_TXSEQ_EXPECTED) {
6494 l2cap_pass_to_tx(chan, control);
6495
6496 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6497 __next_seq(chan, chan->buffer_seq));
6498
6499 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6500
6501 l2cap_reassemble_sdu(chan, skb, control);
6502 } else {
6503 if (chan->sdu) {
6504 kfree_skb(chan->sdu);
6505 chan->sdu = NULL;
6506 }
6507 chan->sdu_last_frag = NULL;
6508 chan->sdu_len = 0;
6509
6510 if (skb) {
6511 BT_DBG("Freeing %p", skb);
6512 kfree_skb(skb);
6513 }
6514 }
6515
6516 chan->last_acked_seq = control->txseq;
6517 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6518
6519 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006520}
6521
6522static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6523{
6524 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6525 u16 len;
6526 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006527
Mat Martineaub76bbd62012-04-11 10:48:43 -07006528 __unpack_control(chan, skb);
6529
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006530 len = skb->len;
6531
6532 /*
6533 * We can just drop the corrupted I-frame here.
6534 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006535 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006536 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006537 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006538 goto drop;
6539
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006540 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006541 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006542
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006543 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006544 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006545
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006546 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006547 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006548 goto drop;
6549 }
6550
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006551 if (!control->sframe) {
6552 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006553
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006554 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6555 control->sar, control->reqseq, control->final,
6556 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006557
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006558 /* Validate F-bit - F=0 always valid, F=1 only
6559 * valid in TX WAIT_F
6560 */
6561 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006562 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006563
6564 if (chan->mode != L2CAP_MODE_STREAMING) {
6565 event = L2CAP_EV_RECV_IFRAME;
6566 err = l2cap_rx(chan, control, skb, event);
6567 } else {
6568 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006569 }
6570
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006571 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006572 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006573 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006574 const u8 rx_func_to_event[4] = {
6575 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6576 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6577 };
6578
6579 /* Only I-frames are expected in streaming mode */
6580 if (chan->mode == L2CAP_MODE_STREAMING)
6581 goto drop;
6582
6583 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6584 control->reqseq, control->final, control->poll,
6585 control->super);
6586
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006587 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006588 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006589 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006590 goto drop;
6591 }
6592
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006593 /* Validate F and P bits */
6594 if (control->final && (control->poll ||
6595 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6596 goto drop;
6597
6598 event = rx_func_to_event[control->super];
6599 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006600 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006601 }
6602
6603 return 0;
6604
6605drop:
6606 kfree_skb(skb);
6607 return 0;
6608}
6609
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006610static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6611{
6612 struct l2cap_conn *conn = chan->conn;
6613 struct l2cap_le_credits pkt;
6614 u16 return_credits;
6615
6616 /* We return more credits to the sender only after the amount of
6617 * credits falls below half of the initial amount.
6618 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006619 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006620 return;
6621
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006622 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006623
6624 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6625
6626 chan->rx_credits += return_credits;
6627
6628 pkt.cid = cpu_to_le16(chan->scid);
6629 pkt.credits = cpu_to_le16(return_credits);
6630
6631 chan->ident = l2cap_get_ident(conn);
6632
6633 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6634}
6635
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006636static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6637{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006638 int err;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006639
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006640 if (!chan->rx_credits) {
6641 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006642 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006643 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006644 }
6645
6646 if (chan->imtu < skb->len) {
6647 BT_ERR("Too big LE L2CAP PDU");
6648 return -ENOBUFS;
6649 }
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006650
6651 chan->rx_credits--;
6652 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6653
6654 l2cap_chan_le_send_credits(chan);
6655
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006656 err = 0;
6657
6658 if (!chan->sdu) {
6659 u16 sdu_len;
6660
6661 sdu_len = get_unaligned_le16(skb->data);
6662 skb_pull(skb, L2CAP_SDULEN_SIZE);
6663
6664 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6665 sdu_len, skb->len, chan->imtu);
6666
6667 if (sdu_len > chan->imtu) {
6668 BT_ERR("Too big LE L2CAP SDU length received");
6669 err = -EMSGSIZE;
6670 goto failed;
6671 }
6672
6673 if (skb->len > sdu_len) {
6674 BT_ERR("Too much LE L2CAP data received");
6675 err = -EINVAL;
6676 goto failed;
6677 }
6678
6679 if (skb->len == sdu_len)
6680 return chan->ops->recv(chan, skb);
6681
6682 chan->sdu = skb;
6683 chan->sdu_len = sdu_len;
6684 chan->sdu_last_frag = skb;
6685
6686 return 0;
6687 }
6688
6689 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6690 chan->sdu->len, skb->len, chan->sdu_len);
6691
6692 if (chan->sdu->len + skb->len > chan->sdu_len) {
6693 BT_ERR("Too much LE L2CAP data received");
6694 err = -EINVAL;
6695 goto failed;
6696 }
6697
6698 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6699 skb = NULL;
6700
6701 if (chan->sdu->len == chan->sdu_len) {
6702 err = chan->ops->recv(chan, chan->sdu);
6703 if (!err) {
6704 chan->sdu = NULL;
6705 chan->sdu_last_frag = NULL;
6706 chan->sdu_len = 0;
6707 }
6708 }
6709
6710failed:
6711 if (err) {
6712 kfree_skb(skb);
6713 kfree_skb(chan->sdu);
6714 chan->sdu = NULL;
6715 chan->sdu_last_frag = NULL;
6716 chan->sdu_len = 0;
6717 }
6718
6719 /* We can't return an error here since we took care of the skb
6720 * freeing internally. An error return would cause the caller to
6721 * do a double-free of the skb.
6722 */
6723 return 0;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006724}
6725
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006726static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6727 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006728{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006729 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006730
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006731 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006732 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006733 if (cid == L2CAP_CID_A2MP) {
6734 chan = a2mp_channel_create(conn, skb);
6735 if (!chan) {
6736 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006737 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006738 }
6739
6740 l2cap_chan_lock(chan);
6741 } else {
6742 BT_DBG("unknown cid 0x%4.4x", cid);
6743 /* Drop packet and return */
6744 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006745 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006747 }
6748
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006749 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006750
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006751 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006752 goto drop;
6753
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006754 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006755 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006756 if (l2cap_le_data_rcv(chan, skb) < 0)
6757 goto drop;
6758
6759 goto done;
6760
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006761 case L2CAP_MODE_BASIC:
6762 /* If socket recv buffers overflows we drop data here
6763 * which is *bad* because L2CAP has to be reliable.
6764 * But we don't have any other choice. L2CAP doesn't
6765 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006766
Szymon Janc2c96e032014-02-18 20:48:34 +01006767 if (chan->imtu < skb->len) {
6768 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006769 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006770 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006771
Gustavo Padovan80b98022012-05-27 22:27:51 -03006772 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006773 goto done;
6774 break;
6775
6776 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006777 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006778 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006779 goto done;
6780
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006781 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006782 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006783 break;
6784 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006785
6786drop:
6787 kfree_skb(skb);
6788
6789done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006790 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006791}
6792
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006793static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6794 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006795{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006796 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006797 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006798
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006799 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006800 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006801
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006802 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6803 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006804 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006805 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006807 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006809 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006810 goto drop;
6811
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006812 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006813 goto drop;
6814
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006815 /* Store remote BD_ADDR and PSM for msg_name */
Marcel Holtmann06ae3312013-10-18 03:43:00 -07006816 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006817 bt_cb(skb)->psm = psm;
6818
Johan Hedberga24cce12014-08-07 22:56:42 +03006819 if (!chan->ops->recv(chan, skb)) {
6820 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006821 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006822 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006823
6824drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006825 l2cap_chan_put(chan);
6826free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006827 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006828}
6829
6830static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6831{
6832 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006833 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006834 u16 cid, len;
6835 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836
Johan Hedberg61a939c2014-01-17 20:45:11 +02006837 if (hcon->state != BT_CONNECTED) {
6838 BT_DBG("queueing pending rx skb");
6839 skb_queue_tail(&conn->pending_rx, skb);
6840 return;
6841 }
6842
Linus Torvalds1da177e2005-04-16 15:20:36 -07006843 skb_pull(skb, L2CAP_HDR_SIZE);
6844 cid = __le16_to_cpu(lh->cid);
6845 len = __le16_to_cpu(lh->len);
6846
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006847 if (len != skb->len) {
6848 kfree_skb(skb);
6849 return;
6850 }
6851
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006852 /* Since we can't actively block incoming LE connections we must
6853 * at least ensure that we ignore incoming data from them.
6854 */
6855 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006856 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6857 bdaddr_type(hcon, hcon->dst_type))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006858 kfree_skb(skb);
6859 return;
6860 }
6861
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6863
6864 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006865 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866 l2cap_sig_channel(conn, skb);
6867 break;
6868
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006869 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006870 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006871 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006872 l2cap_conless_channel(conn, psm, skb);
6873 break;
6874
Marcel Holtmanna2877622013-10-02 23:46:54 -07006875 case L2CAP_CID_LE_SIGNALING:
6876 l2cap_le_sig_channel(conn, skb);
6877 break;
6878
Linus Torvalds1da177e2005-04-16 15:20:36 -07006879 default:
6880 l2cap_data_channel(conn, cid, skb);
6881 break;
6882 }
6883}
6884
Johan Hedberg61a939c2014-01-17 20:45:11 +02006885static void process_pending_rx(struct work_struct *work)
6886{
6887 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6888 pending_rx_work);
6889 struct sk_buff *skb;
6890
6891 BT_DBG("");
6892
6893 while ((skb = skb_dequeue(&conn->pending_rx)))
6894 l2cap_recv_frame(conn, skb);
6895}
6896
Johan Hedberg162b49e2014-01-17 20:45:10 +02006897static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6898{
6899 struct l2cap_conn *conn = hcon->l2cap_data;
6900 struct hci_chan *hchan;
6901
6902 if (conn)
6903 return conn;
6904
6905 hchan = hci_chan_create(hcon);
6906 if (!hchan)
6907 return NULL;
6908
Johan Hedberg27f70f32014-07-21 10:50:06 +03006909 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006910 if (!conn) {
6911 hci_chan_del(hchan);
6912 return NULL;
6913 }
6914
6915 kref_init(&conn->ref);
6916 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03006917 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006918 conn->hchan = hchan;
6919
6920 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6921
6922 switch (hcon->type) {
6923 case LE_LINK:
6924 if (hcon->hdev->le_mtu) {
6925 conn->mtu = hcon->hdev->le_mtu;
6926 break;
6927 }
6928 /* fall through */
6929 default:
6930 conn->mtu = hcon->hdev->acl_mtu;
6931 break;
6932 }
6933
6934 conn->feat_mask = 0;
6935
6936 if (hcon->type == ACL_LINK)
6937 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6938 &hcon->hdev->dev_flags);
6939
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02006940 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006941 mutex_init(&conn->chan_lock);
6942
6943 INIT_LIST_HEAD(&conn->chan_l);
6944 INIT_LIST_HEAD(&conn->users);
6945
Johan Hedberg276d8072014-08-11 22:06:41 +03006946 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006947
Johan Hedberg61a939c2014-01-17 20:45:11 +02006948 skb_queue_head_init(&conn->pending_rx);
6949 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03006950 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02006951
Johan Hedberg162b49e2014-01-17 20:45:10 +02006952 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6953
6954 return conn;
6955}
6956
6957static bool is_valid_psm(u16 psm, u8 dst_type) {
6958 if (!psm)
6959 return false;
6960
6961 if (bdaddr_type_is_le(dst_type))
6962 return (psm <= 0x00ff);
6963
6964 /* PSM must be odd and lsb of upper byte must be 0 */
6965 return ((psm & 0x0101) == 0x0001);
6966}
6967
6968int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6969 bdaddr_t *dst, u8 dst_type)
6970{
6971 struct l2cap_conn *conn;
6972 struct hci_conn *hcon;
6973 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006974 int err;
6975
6976 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6977 dst_type, __le16_to_cpu(psm));
6978
6979 hdev = hci_get_route(dst, &chan->src);
6980 if (!hdev)
6981 return -EHOSTUNREACH;
6982
6983 hci_dev_lock(hdev);
6984
Johan Hedberg162b49e2014-01-17 20:45:10 +02006985 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6986 chan->chan_type != L2CAP_CHAN_RAW) {
6987 err = -EINVAL;
6988 goto done;
6989 }
6990
Johan Hedberg21626e62014-01-24 10:35:41 +02006991 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6992 err = -EINVAL;
6993 goto done;
6994 }
6995
6996 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02006997 err = -EINVAL;
6998 goto done;
6999 }
7000
7001 switch (chan->mode) {
7002 case L2CAP_MODE_BASIC:
7003 break;
7004 case L2CAP_MODE_LE_FLOWCTL:
7005 l2cap_le_flowctl_init(chan);
7006 break;
7007 case L2CAP_MODE_ERTM:
7008 case L2CAP_MODE_STREAMING:
7009 if (!disable_ertm)
7010 break;
7011 /* fall through */
7012 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007013 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007014 goto done;
7015 }
7016
7017 switch (chan->state) {
7018 case BT_CONNECT:
7019 case BT_CONNECT2:
7020 case BT_CONFIG:
7021 /* Already connecting */
7022 err = 0;
7023 goto done;
7024
7025 case BT_CONNECTED:
7026 /* Already connected */
7027 err = -EISCONN;
7028 goto done;
7029
7030 case BT_OPEN:
7031 case BT_BOUND:
7032 /* Can connect */
7033 break;
7034
7035 default:
7036 err = -EBADFD;
7037 goto done;
7038 }
7039
7040 /* Set destination address and psm */
7041 bacpy(&chan->dst, dst);
7042 chan->dst_type = dst_type;
7043
7044 chan->psm = psm;
7045 chan->dcid = cid;
7046
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007047 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007048 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007049
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007050 /* Convert from L2CAP channel address type to HCI address type
7051 */
7052 if (dst_type == BDADDR_LE_PUBLIC)
7053 dst_type = ADDR_LE_DEV_PUBLIC;
7054 else
7055 dst_type = ADDR_LE_DEV_RANDOM;
7056
Johan Hedberge804d252014-07-16 11:42:28 +03007057 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7058 role = HCI_ROLE_SLAVE;
7059 else
7060 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007061
Andre Guedes04a6c582014-02-26 20:21:44 -03007062 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
Johan Hedberge804d252014-07-16 11:42:28 +03007063 HCI_LE_CONN_TIMEOUT, role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007064 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007065 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007066 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007067 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007068
7069 if (IS_ERR(hcon)) {
7070 err = PTR_ERR(hcon);
7071 goto done;
7072 }
7073
7074 conn = l2cap_conn_add(hcon);
7075 if (!conn) {
7076 hci_conn_drop(hcon);
7077 err = -ENOMEM;
7078 goto done;
7079 }
7080
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007081 mutex_lock(&conn->chan_lock);
7082 l2cap_chan_lock(chan);
7083
Johan Hedberg162b49e2014-01-17 20:45:10 +02007084 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7085 hci_conn_drop(hcon);
7086 err = -EBUSY;
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007087 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007088 }
7089
7090 /* Update source addr of the socket */
7091 bacpy(&chan->src, &hcon->src);
7092 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7093
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007094 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007095
7096 /* l2cap_chan_add takes its own ref so we can drop this one */
7097 hci_conn_drop(hcon);
7098
7099 l2cap_state_change(chan, BT_CONNECT);
7100 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7101
Johan Hedberg61202e42014-01-28 15:16:48 -08007102 /* Release chan->sport so that it can be reused by other
7103 * sockets (as it's only used for listening sockets).
7104 */
7105 write_lock(&chan_list_lock);
7106 chan->sport = 0;
7107 write_unlock(&chan_list_lock);
7108
Johan Hedberg162b49e2014-01-17 20:45:10 +02007109 if (hcon->state == BT_CONNECTED) {
7110 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7111 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007112 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007113 l2cap_state_change(chan, BT_CONNECTED);
7114 } else
7115 l2cap_do_start(chan);
7116 }
7117
7118 err = 0;
7119
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007120chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007121 l2cap_chan_unlock(chan);
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007122 mutex_unlock(&conn->chan_lock);
7123done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007124 hci_dev_unlock(hdev);
7125 hci_dev_put(hdev);
7126 return err;
7127}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007128EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007129
Linus Torvalds1da177e2005-04-16 15:20:36 -07007130/* ---- L2CAP interface with lower layer (HCI) ---- */
7131
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007132int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007133{
7134 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007135 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007137 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007138
7139 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007140 read_lock(&chan_list_lock);
7141 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007142 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007143 continue;
7144
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007145 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007146 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007147 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007148 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007150 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007151 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007152 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007153 lm2 |= HCI_LM_MASTER;
7154 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007156 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157
7158 return exact ? lm1 : lm2;
7159}
7160
Johan Hedberge760ec12014-08-07 22:56:47 +03007161/* Find the next fixed channel in BT_LISTEN state, continue iteration
7162 * from an existing channel in the list or from the beginning of the
7163 * global list (by passing NULL as first parameter).
7164 */
7165static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg54a1b622014-08-07 22:56:48 +03007166 bdaddr_t *src, u8 link_type)
Johan Hedberge760ec12014-08-07 22:56:47 +03007167{
7168 read_lock(&chan_list_lock);
7169
7170 if (c)
7171 c = list_next_entry(c, global_l);
7172 else
7173 c = list_entry(chan_list.next, typeof(*c), global_l);
7174
7175 list_for_each_entry_from(c, &chan_list, global_l) {
7176 if (c->chan_type != L2CAP_CHAN_FIXED)
7177 continue;
7178 if (c->state != BT_LISTEN)
7179 continue;
7180 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7181 continue;
Johan Hedberg54a1b622014-08-07 22:56:48 +03007182 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7183 continue;
7184 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7185 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007186
7187 l2cap_chan_hold(c);
7188 read_unlock(&chan_list_lock);
7189 return c;
7190 }
7191
7192 read_unlock(&chan_list_lock);
7193
7194 return NULL;
7195}
7196
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007197void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007198{
Johan Hedberge760ec12014-08-07 22:56:47 +03007199 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007200 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007201 struct l2cap_chan *pchan;
7202 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007203
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007204 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007206 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007207 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007208 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007209 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007210
7211 conn = l2cap_conn_add(hcon);
7212 if (!conn)
7213 return;
7214
Johan Hedberge760ec12014-08-07 22:56:47 +03007215 dst_type = bdaddr_type(hcon, hcon->dst_type);
7216
7217 /* If device is blocked, do not create channels for it */
7218 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7219 return;
7220
7221 /* Find fixed channels and notify them of the new connection. We
7222 * use multiple individual lookups, continuing each time where
7223 * we left off, because the list lock would prevent calling the
7224 * potentially sleeping l2cap_chan_lock() function.
7225 */
Johan Hedberg54a1b622014-08-07 22:56:48 +03007226 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007227 while (pchan) {
7228 struct l2cap_chan *chan, *next;
7229
7230 /* Client fixed channels should override server ones */
7231 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7232 goto next;
7233
7234 l2cap_chan_lock(pchan);
7235 chan = pchan->ops->new_connection(pchan);
7236 if (chan) {
7237 bacpy(&chan->src, &hcon->src);
7238 bacpy(&chan->dst, &hcon->dst);
7239 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7240 chan->dst_type = dst_type;
7241
7242 __l2cap_chan_add(conn, chan);
7243 }
7244
7245 l2cap_chan_unlock(pchan);
7246next:
Johan Hedberg54a1b622014-08-07 22:56:48 +03007247 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7248 hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007249 l2cap_chan_put(pchan);
7250 pchan = next;
7251 }
7252
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007253 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254}
7255
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007256int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007257{
7258 struct l2cap_conn *conn = hcon->l2cap_data;
7259
7260 BT_DBG("hcon %p", hcon);
7261
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007262 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007263 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007264 return conn->disc_reason;
7265}
7266
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007267void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007268{
7269 BT_DBG("hcon %p reason %d", hcon, reason);
7270
Joe Perchese1750722011-06-29 18:18:29 -07007271 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007272}
7273
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007274static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007275{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007276 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007277 return;
7278
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007279 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007280 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007281 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007282 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7283 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007284 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007285 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007286 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007287 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007288 }
7289}
7290
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007291int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007292{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007293 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007294 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295
Marcel Holtmann01394182006-07-03 10:02:46 +02007296 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007297 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007298
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007299 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007301 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007302
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007303 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007304 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007305
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007306 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7307 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007308
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007309 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007310 l2cap_chan_unlock(chan);
7311 continue;
7312 }
7313
Johan Hedberg191eb392014-08-07 22:56:45 +03007314 if (!status && encrypt)
7315 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007316
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007317 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007318 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007319 continue;
7320 }
7321
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007322 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007323 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007324 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007325 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007326 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007327 continue;
7328 }
7329
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007330 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007331 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007332 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007333 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007334 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergfa37c1a2014-11-13 10:55:17 +02007335 } else if (chan->state == BT_CONNECT2 &&
7336 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007337 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007338 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007339
7340 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007341 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007342 res = L2CAP_CR_PEND;
7343 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007344 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007345 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007346 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007347 res = L2CAP_CR_SUCCESS;
7348 stat = L2CAP_CS_NO_INFO;
7349 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007350 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007351 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007352 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007353 res = L2CAP_CR_SEC_BLOCK;
7354 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007355 }
7356
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007357 rsp.scid = cpu_to_le16(chan->dcid);
7358 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007359 rsp.result = cpu_to_le16(res);
7360 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007361 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007362 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007363
7364 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7365 res == L2CAP_CR_SUCCESS) {
7366 char buf[128];
7367 set_bit(CONF_REQ_SENT, &chan->conf_state);
7368 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7369 L2CAP_CONF_REQ,
7370 l2cap_build_conf_req(chan, buf),
7371 buf);
7372 chan->num_conf_req++;
7373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007374 }
7375
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007376 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007377 }
7378
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007379 mutex_unlock(&conn->chan_lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007380
Linus Torvalds1da177e2005-04-16 15:20:36 -07007381 return 0;
7382}
7383
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007384int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007385{
7386 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007387 struct l2cap_hdr *hdr;
7388 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007389
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007390 /* For AMP controller do not create l2cap conn */
7391 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7392 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007393
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007394 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007395 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007396
7397 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007398 goto drop;
7399
7400 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7401
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007402 switch (flags) {
7403 case ACL_START:
7404 case ACL_START_NO_FLUSH:
7405 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007406 if (conn->rx_len) {
7407 BT_ERR("Unexpected start frame (len %d)", skb->len);
7408 kfree_skb(conn->rx_skb);
7409 conn->rx_skb = NULL;
7410 conn->rx_len = 0;
7411 l2cap_conn_unreliable(conn, ECOMM);
7412 }
7413
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007414 /* Start fragment always begin with Basic L2CAP header */
7415 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416 BT_ERR("Frame is too short (len %d)", skb->len);
7417 l2cap_conn_unreliable(conn, ECOMM);
7418 goto drop;
7419 }
7420
7421 hdr = (struct l2cap_hdr *) skb->data;
7422 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7423
7424 if (len == skb->len) {
7425 /* Complete frame received */
7426 l2cap_recv_frame(conn, skb);
7427 return 0;
7428 }
7429
7430 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7431
7432 if (skb->len > len) {
7433 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007434 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435 l2cap_conn_unreliable(conn, ECOMM);
7436 goto drop;
7437 }
7438
7439 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007440 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007441 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 goto drop;
7443
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007444 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007445 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007447 break;
7448
7449 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007450 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7451
7452 if (!conn->rx_len) {
7453 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7454 l2cap_conn_unreliable(conn, ECOMM);
7455 goto drop;
7456 }
7457
7458 if (skb->len > conn->rx_len) {
7459 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007460 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461 kfree_skb(conn->rx_skb);
7462 conn->rx_skb = NULL;
7463 conn->rx_len = 0;
7464 l2cap_conn_unreliable(conn, ECOMM);
7465 goto drop;
7466 }
7467
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007468 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007469 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470 conn->rx_len -= skb->len;
7471
7472 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007473 /* Complete frame received. l2cap_recv_frame
7474 * takes ownership of the skb so set the global
7475 * rx_skb pointer to NULL first.
7476 */
7477 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007478 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007479 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007481 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007482 }
7483
7484drop:
7485 kfree_skb(skb);
7486 return 0;
7487}
7488
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007489static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007490{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007491 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007493 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007495 list_for_each_entry(c, &chan_list, global_l) {
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007496 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007497 &c->src, &c->dst,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007498 c->state, __le16_to_cpu(c->psm),
7499 c->scid, c->dcid, c->imtu, c->omtu,
7500 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007501 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007502
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007503 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007504
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007505 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506}
7507
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007508static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7509{
7510 return single_open(file, l2cap_debugfs_show, inode->i_private);
7511}
7512
7513static const struct file_operations l2cap_debugfs_fops = {
7514 .open = l2cap_debugfs_open,
7515 .read = seq_read,
7516 .llseek = seq_lseek,
7517 .release = single_release,
7518};
7519
7520static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007522int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007523{
7524 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007525
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007526 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527 if (err < 0)
7528 return err;
7529
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007530 if (IS_ERR_OR_NULL(bt_debugfs))
7531 return 0;
7532
7533 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7534 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007535
Samuel Ortiz40b93972014-05-14 17:53:35 +02007536 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007537 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007538 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007539 &le_default_mps);
7540
Linus Torvalds1da177e2005-04-16 15:20:36 -07007541 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542}
7543
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007544void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007545{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007546 debugfs_remove(l2cap_debugfs);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007547 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548}
7549
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007550module_param(disable_ertm, bool, 0644);
7551MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");