blob: 5201d6167acb3669d68c562e5caeceea5d2d643e [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Marcel Holtmanna6801ca2014-07-11 06:03:08 +020049static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -030061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Marcel Holtmann01394182006-07-03 10:02:46 +020079/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030080
Gustavo Padovan2d792812012-10-06 10:07:01 +010081static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020083{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020084 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030085
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020086 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020089 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020090 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +020091}
92
Gustavo Padovan2d792812012-10-06 10:07:01 +010093static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020095{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030097
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020098 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200102 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
105/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700106 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300110 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300111
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300113 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700114 if (c)
115 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200116 mutex_unlock(&conn->chan_lock);
117
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300118 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200119}
120
Mat Martineaub1a130b2012-10-23 15:24:09 -0700121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
Gustavo Padovan2d792812012-10-06 10:07:01 +0100138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200140{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200141 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300142
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200146 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200147 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148}
149
Mat Martineau5b155ef2012-10-23 15:24:14 -0700150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300165{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300166 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300167
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300168 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700169 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100170 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300171 }
Szymon Janc250938c2011-11-16 09:32:22 +0100172 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300177 int err;
178
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200179 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300182 err = -EADDRINUSE;
183 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300184 }
185
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300192
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200204 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300206}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300207EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200211 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Johan Hedberg14824302014-08-07 22:56:50 +0300213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217 chan->scid = scid;
218
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200219 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300220
221 return 0;
222}
223
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200225{
Johan Hedberge77af752013-10-08 10:31:00 +0200226 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200227
Johan Hedberge77af752013-10-08 10:31:00 +0200228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300234 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200235 return cid;
236 }
237
238 return 0;
239}
240
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200241static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300242{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100244 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200245
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300246 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300247 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300248}
249
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200252{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300253 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300254 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200260}
261
Mat Martineau4239d162012-05-17 20:53:49 -0700262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
Mat Martineau608bcc62012-05-17 20:53:32 -0700280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
Mat Martineau3c588192012-04-11 10:48:42 -0700293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
Mat Martineau3c588192012-04-11 10:48:42 -0700339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300357 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700358
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700377
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700385}
386
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300387static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300388{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100390 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200391 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300392 int reason;
393
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300395
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200396 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200397 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300398
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300400 reason = ECONNREFUSED;
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300401 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100402 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300407 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200409 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300410
Gustavo Padovan80b98022012-05-27 22:27:51 -0300411 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200412 mutex_unlock(&conn->chan_lock);
413
Ulisses Furquim371fd832011-12-21 20:02:36 -0200414 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415}
416
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300417struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200418{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300419 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200420
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200424
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200425 mutex_init(&chan->lock);
426
Johan Hedbergff714112014-11-13 09:46:04 +0200427 /* Set default lock nesting level */
428 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
429
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200430 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300431 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200432 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300433
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300434 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300435
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300436 chan->state = BT_OPEN;
437
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530438 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300439
Mat Martineau28270112012-05-17 21:14:09 -0700440 /* This flag is cleared in l2cap_chan_ready() */
441 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
442
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300443 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100444
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300445 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300447EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200448
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530449static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300450{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530451 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
452
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530453 BT_DBG("chan %p", chan);
454
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200455 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300456 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200457 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300458
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530459 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300460}
461
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530462void l2cap_chan_hold(struct l2cap_chan *c)
463{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530464 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530465
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530466 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530467}
468
469void l2cap_chan_put(struct l2cap_chan *c)
470{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530471 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530472
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530473 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530474}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300475EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530476
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300477void l2cap_chan_set_defaults(struct l2cap_chan *chan)
478{
479 chan->fcs = L2CAP_FCS_CRC16;
480 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
481 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
482 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300483 chan->remote_max_tx = chan->max_tx;
484 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700485 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300486 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300487 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
488 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
489 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
490 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300491
492 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
493}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300494EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300495
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200496static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300497{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200498 chan->sdu = NULL;
499 chan->sdu_last_frag = NULL;
500 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300501 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200502 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800503 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200504
505 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300506}
507
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300508void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200509{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300510 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200511 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200512
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200513 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100514
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300515 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200516
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200517 switch (chan->chan_type) {
518 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200519 /* Alloc CID for connection-oriented socket */
520 chan->scid = l2cap_alloc_cid(conn);
521 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300522 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200523 break;
524
525 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200526 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300527 chan->scid = L2CAP_CID_CONN_LESS;
528 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300529 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200530 break;
531
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200532 case L2CAP_CHAN_FIXED:
533 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300534 break;
535
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200536 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200537 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300538 chan->scid = L2CAP_CID_SIGNALING;
539 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300540 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200541 }
542
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300543 chan->local_id = L2CAP_BESTEFFORT_ID;
544 chan->local_stype = L2CAP_SERV_BESTEFFORT;
545 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
546 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
547 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300548 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300549
Ulisses Furquim371fd832011-12-21 20:02:36 -0200550 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300551
Johan Hedbergc16900c2014-08-15 21:17:06 +0300552 /* Only keep a reference for fixed channels if they requested it */
553 if (chan->chan_type != L2CAP_CHAN_FIXED ||
554 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
555 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300556
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200557 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200558}
559
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300560void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200561{
562 mutex_lock(&conn->chan_lock);
563 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200564 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200565}
566
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300567void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200568{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300569 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200570
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300571 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200572
Johan Hedberg49d11742014-11-13 14:37:50 +0200573 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
574 state_to_string(chan->state));
Marcel Holtmann01394182006-07-03 10:02:46 +0200575
Johan Hedberg72847ce2014-08-08 09:28:03 +0300576 chan->ops->teardown(chan, err);
577
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900578 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300579 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300580 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200581 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200582
Ulisses Furquim371fd832011-12-21 20:02:36 -0200583 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300584
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300585 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300586
Johan Hedbergc16900c2014-08-15 21:17:06 +0300587 /* Reference was only held for non-fixed channels or
588 * fixed channels that explicitly requested it using the
589 * FLAG_HOLD_HCI_CONN flag.
590 */
591 if (chan->chan_type != L2CAP_CHAN_FIXED ||
592 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200593 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300594
595 if (mgr && mgr->bredr_chan == chan)
596 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200597 }
598
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200599 if (chan->hs_hchan) {
600 struct hci_chan *hs_hchan = chan->hs_hchan;
601
602 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
603 amp_disconnect_logical_link(hs_hchan);
604 }
605
Mat Martineau28270112012-05-17 21:14:09 -0700606 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300607 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300608
Gustavo Padovanee556f62012-05-18 20:22:38 -0300609 switch(chan->mode) {
610 case L2CAP_MODE_BASIC:
611 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300612
Johan Hedberg38319712013-05-17 12:49:23 +0300613 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300614 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300615 break;
616
Gustavo Padovanee556f62012-05-18 20:22:38 -0300617 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300618 __clear_retrans_timer(chan);
619 __clear_monitor_timer(chan);
620 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300621
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300622 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300623
Mat Martineau3c588192012-04-11 10:48:42 -0700624 l2cap_seq_list_free(&chan->srej_list);
625 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300626
627 /* fall through */
628
629 case L2CAP_MODE_STREAMING:
630 skb_queue_purge(&chan->tx_q);
631 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300632 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300633
634 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200635}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300636EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200637
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300638static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200639{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300640 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
641 id_addr_update_work);
642 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200643 struct l2cap_chan *chan;
644
645 mutex_lock(&conn->chan_lock);
646
647 list_for_each_entry(chan, &conn->chan_l, list) {
648 l2cap_chan_lock(chan);
649 bacpy(&chan->dst, &hcon->dst);
650 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
651 l2cap_chan_unlock(chan);
652 }
653
654 mutex_unlock(&conn->chan_lock);
655}
656
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300657static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
658{
659 struct l2cap_conn *conn = chan->conn;
660 struct l2cap_le_conn_rsp rsp;
661 u16 result;
662
663 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
664 result = L2CAP_CR_AUTHORIZATION;
665 else
666 result = L2CAP_CR_BAD_PSM;
667
668 l2cap_state_change(chan, BT_DISCONN);
669
670 rsp.dcid = cpu_to_le16(chan->scid);
671 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200672 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300673 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300674 rsp.result = cpu_to_le16(result);
675
676 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
677 &rsp);
678}
679
Johan Hedberg791d60f2013-05-14 22:24:44 +0300680static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
681{
682 struct l2cap_conn *conn = chan->conn;
683 struct l2cap_conn_rsp rsp;
684 u16 result;
685
686 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
687 result = L2CAP_CR_SEC_BLOCK;
688 else
689 result = L2CAP_CR_BAD_PSM;
690
691 l2cap_state_change(chan, BT_DISCONN);
692
693 rsp.scid = cpu_to_le16(chan->dcid);
694 rsp.dcid = cpu_to_le16(chan->scid);
695 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700696 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300697
698 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
699}
700
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300701void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300702{
703 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300704
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700705 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300706
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300707 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300708 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100709 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300710 break;
711
712 case BT_CONNECTED:
713 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800714 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300715 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200716 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300717 } else
718 l2cap_chan_del(chan, reason);
719 break;
720
721 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300722 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
723 if (conn->hcon->type == ACL_LINK)
724 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300725 else if (conn->hcon->type == LE_LINK)
726 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300727 }
728
729 l2cap_chan_del(chan, reason);
730 break;
731
732 case BT_CONNECT:
733 case BT_DISCONN:
734 l2cap_chan_del(chan, reason);
735 break;
736
737 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100738 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300739 break;
740 }
741}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300742EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300743
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300744static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530745{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700746 switch (chan->chan_type) {
747 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300748 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530749 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800750 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530751 return HCI_AT_DEDICATED_BONDING_MITM;
752 case BT_SECURITY_MEDIUM:
753 return HCI_AT_DEDICATED_BONDING;
754 default:
755 return HCI_AT_NO_BONDING;
756 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700757 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700758 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700759 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700760 if (chan->sec_level == BT_SECURITY_LOW)
761 chan->sec_level = BT_SECURITY_SDP;
762 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800763 if (chan->sec_level == BT_SECURITY_HIGH ||
764 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700765 return HCI_AT_NO_BONDING_MITM;
766 else
767 return HCI_AT_NO_BONDING;
768 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700769 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700770 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700771 if (chan->sec_level == BT_SECURITY_LOW)
772 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530773
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800774 if (chan->sec_level == BT_SECURITY_HIGH ||
775 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700776 return HCI_AT_NO_BONDING_MITM;
777 else
778 return HCI_AT_NO_BONDING;
779 }
780 /* fall through */
781 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300782 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530783 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800784 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530785 return HCI_AT_GENERAL_BONDING_MITM;
786 case BT_SECURITY_MEDIUM:
787 return HCI_AT_GENERAL_BONDING;
788 default:
789 return HCI_AT_NO_BONDING;
790 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700791 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530792 }
793}
794
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200795/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300796int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200797{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300798 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100799 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200800
Johan Hedberga17de2f2013-05-14 13:25:37 +0300801 if (conn->hcon->type == LE_LINK)
802 return smp_conn_security(conn->hcon, chan->sec_level);
803
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300804 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100805
Johan Hedberge7cafc42014-07-17 15:35:38 +0300806 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
807 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200808}
809
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200810static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200811{
812 u8 id;
813
814 /* Get next available identificator.
815 * 1 - 128 are used by kernel.
816 * 129 - 199 are reserved.
817 * 200 - 254 are used by utilities like l2ping, etc.
818 */
819
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200820 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200821
822 if (++conn->tx_ident > 128)
823 conn->tx_ident = 1;
824
825 id = conn->tx_ident;
826
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200827 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200828
829 return id;
830}
831
Gustavo Padovan2d792812012-10-06 10:07:01 +0100832static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
833 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200834{
835 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200836 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200837
838 BT_DBG("code 0x%2.2x", code);
839
840 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300841 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200842
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200843 if (lmp_no_flush_capable(conn->hcon->hdev))
844 flags = ACL_START_NO_FLUSH;
845 else
846 flags = ACL_START;
847
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700848 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200849 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700850
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200851 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200852}
853
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700854static bool __chan_is_moving(struct l2cap_chan *chan)
855{
856 return chan->move_state != L2CAP_MOVE_STABLE &&
857 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
858}
859
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200860static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
861{
862 struct hci_conn *hcon = chan->conn->hcon;
863 u16 flags;
864
865 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100866 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200867
Mat Martineaud5f8a752012-10-23 15:24:18 -0700868 if (chan->hs_hcon && !__chan_is_moving(chan)) {
869 if (chan->hs_hchan)
870 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
871 else
872 kfree_skb(skb);
873
874 return;
875 }
876
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200877 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100878 lmp_no_flush_capable(hcon->hdev))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200879 flags = ACL_START_NO_FLUSH;
880 else
881 flags = ACL_START;
882
883 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
884 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885}
886
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700887static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
888{
889 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
890 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
891
892 if (enh & L2CAP_CTRL_FRAME_TYPE) {
893 /* S-Frame */
894 control->sframe = 1;
895 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
896 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
897
898 control->sar = 0;
899 control->txseq = 0;
900 } else {
901 /* I-Frame */
902 control->sframe = 0;
903 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
904 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
905
906 control->poll = 0;
907 control->super = 0;
908 }
909}
910
911static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
912{
913 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
914 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
915
916 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
917 /* S-Frame */
918 control->sframe = 1;
919 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
920 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
921
922 control->sar = 0;
923 control->txseq = 0;
924 } else {
925 /* I-Frame */
926 control->sframe = 0;
927 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
928 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
929
930 control->poll = 0;
931 control->super = 0;
932 }
933}
934
935static inline void __unpack_control(struct l2cap_chan *chan,
936 struct sk_buff *skb)
937{
938 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
939 __unpack_extended_control(get_unaligned_le32(skb->data),
940 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700941 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700942 } else {
943 __unpack_enhanced_control(get_unaligned_le16(skb->data),
944 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700945 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700946 }
947}
948
949static u32 __pack_extended_control(struct l2cap_ctrl *control)
950{
951 u32 packed;
952
953 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
954 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
955
956 if (control->sframe) {
957 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
958 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
959 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
960 } else {
961 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
962 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
963 }
964
965 return packed;
966}
967
968static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
969{
970 u16 packed;
971
972 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
973 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
974
975 if (control->sframe) {
976 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
977 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
978 packed |= L2CAP_CTRL_FRAME_TYPE;
979 } else {
980 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
981 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
982 }
983
984 return packed;
985}
986
987static inline void __pack_control(struct l2cap_chan *chan,
988 struct l2cap_ctrl *control,
989 struct sk_buff *skb)
990{
991 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
992 put_unaligned_le32(__pack_extended_control(control),
993 skb->data + L2CAP_HDR_SIZE);
994 } else {
995 put_unaligned_le16(__pack_enhanced_control(control),
996 skb->data + L2CAP_HDR_SIZE);
997 }
998}
999
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001000static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1001{
1002 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1003 return L2CAP_EXT_HDR_SIZE;
1004 else
1005 return L2CAP_ENH_HDR_SIZE;
1006}
1007
Mat Martineaua67d7f62012-05-17 20:53:35 -07001008static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1009 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001010{
1011 struct sk_buff *skb;
1012 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001013 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001014
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001015 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001016 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001017
Mat Martineaua67d7f62012-05-17 20:53:35 -07001018 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001019
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001020 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001021 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001022
1023 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001024 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001025 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001026
Mat Martineaua67d7f62012-05-17 20:53:35 -07001027 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1028 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1029 else
1030 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001031
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001032 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001033 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001034 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001035 }
1036
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001037 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001038 return skb;
1039}
1040
1041static void l2cap_send_sframe(struct l2cap_chan *chan,
1042 struct l2cap_ctrl *control)
1043{
1044 struct sk_buff *skb;
1045 u32 control_field;
1046
1047 BT_DBG("chan %p, control %p", chan, control);
1048
1049 if (!control->sframe)
1050 return;
1051
Mat Martineaub99e13a2012-10-23 15:24:19 -07001052 if (__chan_is_moving(chan))
1053 return;
1054
Mat Martineaua67d7f62012-05-17 20:53:35 -07001055 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1056 !control->poll)
1057 control->final = 1;
1058
1059 if (control->super == L2CAP_SUPER_RR)
1060 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1061 else if (control->super == L2CAP_SUPER_RNR)
1062 set_bit(CONN_RNR_SENT, &chan->conn_state);
1063
1064 if (control->super != L2CAP_SUPER_SREJ) {
1065 chan->last_acked_seq = control->reqseq;
1066 __clear_ack_timer(chan);
1067 }
1068
1069 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1070 control->final, control->poll, control->super);
1071
1072 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1073 control_field = __pack_extended_control(control);
1074 else
1075 control_field = __pack_enhanced_control(control);
1076
1077 skb = l2cap_create_sframe_pdu(chan, control_field);
1078 if (!IS_ERR(skb))
1079 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001080}
1081
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001082static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001083{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001084 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001085
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001086 BT_DBG("chan %p, poll %d", chan, poll);
1087
1088 memset(&control, 0, sizeof(control));
1089 control.sframe = 1;
1090 control.poll = poll;
1091
1092 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1093 control.super = L2CAP_SUPER_RNR;
1094 else
1095 control.super = L2CAP_SUPER_RR;
1096
1097 control.reqseq = chan->buffer_seq;
1098 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001099}
1100
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001101static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001102{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001103 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1104 return true;
1105
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001106 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001107}
1108
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001109static bool __amp_capable(struct l2cap_chan *chan)
1110{
1111 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001112 struct hci_dev *hdev;
1113 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001114
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001115 if (!conn->hs_enabled)
1116 return false;
1117
1118 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1119 return false;
1120
1121 read_lock(&hci_dev_list_lock);
1122 list_for_each_entry(hdev, &hci_dev_list, list) {
1123 if (hdev->amp_type != AMP_TYPE_BREDR &&
1124 test_bit(HCI_UP, &hdev->flags)) {
1125 amp_available = true;
1126 break;
1127 }
1128 }
1129 read_unlock(&hci_dev_list_lock);
1130
1131 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1132 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001133
1134 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001135}
1136
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001137static bool l2cap_check_efs(struct l2cap_chan *chan)
1138{
1139 /* Check EFS parameters */
1140 return true;
1141}
1142
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001143void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001144{
1145 struct l2cap_conn *conn = chan->conn;
1146 struct l2cap_conn_req req;
1147
1148 req.scid = cpu_to_le16(chan->scid);
1149 req.psm = chan->psm;
1150
1151 chan->ident = l2cap_get_ident(conn);
1152
1153 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1154
1155 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1156}
1157
Mat Martineau8eb200b2012-10-23 15:24:17 -07001158static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1159{
1160 struct l2cap_create_chan_req req;
1161 req.scid = cpu_to_le16(chan->scid);
1162 req.psm = chan->psm;
1163 req.amp_id = amp_id;
1164
1165 chan->ident = l2cap_get_ident(chan->conn);
1166
1167 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1168 sizeof(req), &req);
1169}
1170
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001171static void l2cap_move_setup(struct l2cap_chan *chan)
1172{
1173 struct sk_buff *skb;
1174
1175 BT_DBG("chan %p", chan);
1176
1177 if (chan->mode != L2CAP_MODE_ERTM)
1178 return;
1179
1180 __clear_retrans_timer(chan);
1181 __clear_monitor_timer(chan);
1182 __clear_ack_timer(chan);
1183
1184 chan->retry_count = 0;
1185 skb_queue_walk(&chan->tx_q, skb) {
1186 if (bt_cb(skb)->control.retries)
1187 bt_cb(skb)->control.retries = 1;
1188 else
1189 break;
1190 }
1191
1192 chan->expected_tx_seq = chan->buffer_seq;
1193
1194 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1195 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1196 l2cap_seq_list_clear(&chan->retrans_list);
1197 l2cap_seq_list_clear(&chan->srej_list);
1198 skb_queue_purge(&chan->srej_q);
1199
1200 chan->tx_state = L2CAP_TX_STATE_XMIT;
1201 chan->rx_state = L2CAP_RX_STATE_MOVE;
1202
1203 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1204}
1205
Mat Martineau5f3847a2012-10-23 15:24:12 -07001206static void l2cap_move_done(struct l2cap_chan *chan)
1207{
1208 u8 move_role = chan->move_role;
1209 BT_DBG("chan %p", chan);
1210
1211 chan->move_state = L2CAP_MOVE_STABLE;
1212 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1213
1214 if (chan->mode != L2CAP_MODE_ERTM)
1215 return;
1216
1217 switch (move_role) {
1218 case L2CAP_MOVE_ROLE_INITIATOR:
1219 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1220 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1221 break;
1222 case L2CAP_MOVE_ROLE_RESPONDER:
1223 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1224 break;
1225 }
1226}
1227
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001228static void l2cap_chan_ready(struct l2cap_chan *chan)
1229{
Mat Martineau28270112012-05-17 21:14:09 -07001230 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001231 chan->conf_state = 0;
1232 __clear_chan_timer(chan);
1233
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001234 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1235 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001236
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001237 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001238
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001239 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001240}
1241
Johan Hedbergf1496de2013-05-13 14:15:56 +03001242static void l2cap_le_connect(struct l2cap_chan *chan)
1243{
1244 struct l2cap_conn *conn = chan->conn;
1245 struct l2cap_le_conn_req req;
1246
Johan Hedberg595177f2013-12-02 22:12:22 +02001247 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1248 return;
1249
Johan Hedbergf1496de2013-05-13 14:15:56 +03001250 req.psm = chan->psm;
1251 req.scid = cpu_to_le16(chan->scid);
1252 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001253 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001254 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001255
1256 chan->ident = l2cap_get_ident(conn);
1257
1258 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1259 sizeof(req), &req);
1260}
1261
1262static void l2cap_le_start(struct l2cap_chan *chan)
1263{
1264 struct l2cap_conn *conn = chan->conn;
1265
1266 if (!smp_conn_security(conn->hcon, chan->sec_level))
1267 return;
1268
1269 if (!chan->psm) {
1270 l2cap_chan_ready(chan);
1271 return;
1272 }
1273
1274 if (chan->state == BT_CONNECT)
1275 l2cap_le_connect(chan);
1276}
1277
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001278static void l2cap_start_connection(struct l2cap_chan *chan)
1279{
1280 if (__amp_capable(chan)) {
1281 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1282 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001283 } else if (chan->conn->hcon->type == LE_LINK) {
1284 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001285 } else {
1286 l2cap_send_conn_req(chan);
1287 }
1288}
1289
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001290static void l2cap_request_info(struct l2cap_conn *conn)
1291{
1292 struct l2cap_info_req req;
1293
1294 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1295 return;
1296
1297 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1298
1299 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1300 conn->info_ident = l2cap_get_ident(conn);
1301
1302 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1303
1304 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1305 sizeof(req), &req);
1306}
1307
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001308static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001309{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001310 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001311
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001312 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001313 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001314 return;
1315 }
1316
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001317 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1318 l2cap_request_info(conn);
1319 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001320 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001321
1322 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1323 return;
1324
1325 if (l2cap_chan_check_security(chan, true) &&
1326 __l2cap_no_conn_pending(chan))
1327 l2cap_start_connection(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001328}
1329
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001330static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1331{
1332 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001333 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001334 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1335
1336 switch (mode) {
1337 case L2CAP_MODE_ERTM:
1338 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1339 case L2CAP_MODE_STREAMING:
1340 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1341 default:
1342 return 0x00;
1343 }
1344}
1345
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001346static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001347{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001348 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001349 struct l2cap_disconn_req req;
1350
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001351 if (!conn)
1352 return;
1353
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001354 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001355 __clear_retrans_timer(chan);
1356 __clear_monitor_timer(chan);
1357 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001358 }
1359
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001360 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001361 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001362 return;
1363 }
1364
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001365 req.dcid = cpu_to_le16(chan->dcid);
1366 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001367 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1368 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001369
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001370 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001371}
1372
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001374static void l2cap_conn_start(struct l2cap_conn *conn)
1375{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001376 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001377
1378 BT_DBG("conn %p", conn);
1379
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001380 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001381
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001382 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001383 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001384
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001385 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001386 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001387 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001388 continue;
1389 }
1390
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001391 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001392 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001393 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001394 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001395 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001396 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001397
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001398 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001399 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001400 &chan->conf_state)) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001401 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001402 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001403 continue;
1404 }
1405
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001406 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001407
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001408 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001409 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001410 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001411 rsp.scid = cpu_to_le16(chan->dcid);
1412 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001413
Johan Hedberge7cafc42014-07-17 15:35:38 +03001414 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001415 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001416 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1417 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001418 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001419
1420 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001421 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001422 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1423 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001424 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001425 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001426 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1427 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001428 }
1429
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001430 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001431 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001432
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001433 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001434 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001435 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001436 continue;
1437 }
1438
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001439 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001440 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001441 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001442 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001443 }
1444
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001445 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001446 }
1447
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001448 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001449}
1450
Ville Tervob62f3282011-02-10 22:38:50 -03001451static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1452{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001453 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001454 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001455
Johan Hedberge760ec12014-08-07 22:56:47 +03001456 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001457
Johan Hedberge760ec12014-08-07 22:56:47 +03001458 /* For outgoing pairing which doesn't necessarily have an
1459 * associated socket (e.g. mgmt_pair_device).
1460 */
1461 if (hcon->out)
1462 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001463
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001464 /* For LE slave connections, make sure the connection interval
1465 * is in the range of the minium and maximum interval that has
1466 * been configured for this connection. If not, then trigger
1467 * the connection update procedure.
1468 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001469 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001470 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1471 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1472 struct l2cap_conn_param_update_req req;
1473
1474 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1475 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1476 req.latency = cpu_to_le16(hcon->le_conn_latency);
1477 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1478
1479 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1480 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1481 }
Ville Tervob62f3282011-02-10 22:38:50 -03001482}
1483
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001484static void l2cap_conn_ready(struct l2cap_conn *conn)
1485{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001486 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001487 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001488
1489 BT_DBG("conn %p", conn);
1490
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001491 if (hcon->type == ACL_LINK)
1492 l2cap_request_info(conn);
1493
Johan Hedberge760ec12014-08-07 22:56:47 +03001494 mutex_lock(&conn->chan_lock);
1495
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001496 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001497
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001498 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001499
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001500 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001501 l2cap_chan_unlock(chan);
1502 continue;
1503 }
1504
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001505 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001506 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001507 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001508 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1509 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001510 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001511 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001512 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001513
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001514 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001515 }
1516
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001517 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001518
Johan Hedberg79a05722014-08-08 09:28:04 +03001519 if (hcon->type == LE_LINK)
1520 l2cap_le_conn_ready(conn);
1521
Johan Hedberg61a939c2014-01-17 20:45:11 +02001522 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001523}
1524
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001525/* Notify sockets that we cannot guaranty reliability anymore */
1526static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1527{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001528 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001529
1530 BT_DBG("conn %p", conn);
1531
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001532 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001533
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001534 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001535 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001536 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001537 }
1538
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001539 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001540}
1541
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001542static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001543{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001544 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001545 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001546
Marcel Holtmann984947d2009-02-06 23:35:19 +01001547 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001548 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001549
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001550 l2cap_conn_start(conn);
1551}
1552
David Herrmann2c8e1412013-04-06 20:28:45 +02001553/*
1554 * l2cap_user
1555 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1556 * callback is called during registration. The ->remove callback is called
1557 * during unregistration.
1558 * An l2cap_user object can either be explicitly unregistered or when the
1559 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1560 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1561 * External modules must own a reference to the l2cap_conn object if they intend
1562 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1563 * any time if they don't.
1564 */
1565
1566int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1567{
1568 struct hci_dev *hdev = conn->hcon->hdev;
1569 int ret;
1570
1571 /* We need to check whether l2cap_conn is registered. If it is not, we
1572 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1573 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1574 * relies on the parent hci_conn object to be locked. This itself relies
1575 * on the hci_dev object to be locked. So we must lock the hci device
1576 * here, too. */
1577
1578 hci_dev_lock(hdev);
1579
1580 if (user->list.next || user->list.prev) {
1581 ret = -EINVAL;
1582 goto out_unlock;
1583 }
1584
1585 /* conn->hchan is NULL after l2cap_conn_del() was called */
1586 if (!conn->hchan) {
1587 ret = -ENODEV;
1588 goto out_unlock;
1589 }
1590
1591 ret = user->probe(conn, user);
1592 if (ret)
1593 goto out_unlock;
1594
1595 list_add(&user->list, &conn->users);
1596 ret = 0;
1597
1598out_unlock:
1599 hci_dev_unlock(hdev);
1600 return ret;
1601}
1602EXPORT_SYMBOL(l2cap_register_user);
1603
1604void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1605{
1606 struct hci_dev *hdev = conn->hcon->hdev;
1607
1608 hci_dev_lock(hdev);
1609
1610 if (!user->list.next || !user->list.prev)
1611 goto out_unlock;
1612
1613 list_del(&user->list);
1614 user->list.next = NULL;
1615 user->list.prev = NULL;
1616 user->remove(conn, user);
1617
1618out_unlock:
1619 hci_dev_unlock(hdev);
1620}
1621EXPORT_SYMBOL(l2cap_unregister_user);
1622
1623static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1624{
1625 struct l2cap_user *user;
1626
1627 while (!list_empty(&conn->users)) {
1628 user = list_first_entry(&conn->users, struct l2cap_user, list);
1629 list_del(&user->list);
1630 user->list.next = NULL;
1631 user->list.prev = NULL;
1632 user->remove(conn, user);
1633 }
1634}
1635
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001636static void l2cap_conn_del(struct hci_conn *hcon, int err)
1637{
1638 struct l2cap_conn *conn = hcon->l2cap_data;
1639 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001640
1641 if (!conn)
1642 return;
1643
1644 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1645
1646 kfree_skb(conn->rx_skb);
1647
Johan Hedberg61a939c2014-01-17 20:45:11 +02001648 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001649
1650 /* We can not call flush_work(&conn->pending_rx_work) here since we
1651 * might block if we are running on a worker from the same workqueue
1652 * pending_rx_work is waiting on.
1653 */
1654 if (work_pending(&conn->pending_rx_work))
1655 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001656
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001657 if (work_pending(&conn->id_addr_update_work))
1658 cancel_work_sync(&conn->id_addr_update_work);
1659
David Herrmann2c8e1412013-04-06 20:28:45 +02001660 l2cap_unregister_all_users(conn);
1661
Johan Hedberge31fb862014-08-18 20:33:28 +03001662 /* Force the connection to be immediately dropped */
1663 hcon->disc_timeout = 0;
1664
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001665 mutex_lock(&conn->chan_lock);
1666
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001667 /* Kill channels */
1668 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001669 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001670 l2cap_chan_lock(chan);
1671
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001672 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001673
1674 l2cap_chan_unlock(chan);
1675
Gustavo Padovan80b98022012-05-27 22:27:51 -03001676 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001677 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001678 }
1679
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001680 mutex_unlock(&conn->chan_lock);
1681
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001682 hci_chan_del(conn->hchan);
1683
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001684 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001685 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001686
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001687 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001688 conn->hchan = NULL;
1689 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001690}
1691
David Herrmann9c903e32013-04-06 20:28:44 +02001692static void l2cap_conn_free(struct kref *ref)
1693{
1694 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1695
1696 hci_conn_put(conn->hcon);
1697 kfree(conn);
1698}
1699
Johan Hedberg51bb84572014-08-15 21:06:57 +03001700struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001701{
1702 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001703 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001704}
1705EXPORT_SYMBOL(l2cap_conn_get);
1706
1707void l2cap_conn_put(struct l2cap_conn *conn)
1708{
1709 kref_put(&conn->ref, l2cap_conn_free);
1710}
1711EXPORT_SYMBOL(l2cap_conn_put);
1712
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714
Ido Yarivc2287682012-04-20 15:46:07 -03001715/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001716 * Returns closest match.
1717 */
Ido Yarivc2287682012-04-20 15:46:07 -03001718static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1719 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001720 bdaddr_t *dst,
1721 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001722{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001723 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001724
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001725 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001726
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001727 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001728 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001729 continue;
1730
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001731 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1732 continue;
1733
1734 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1735 continue;
1736
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001737 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001738 int src_match, dst_match;
1739 int src_any, dst_any;
1740
Linus Torvalds1da177e2005-04-16 15:20:36 -07001741 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001742 src_match = !bacmp(&c->src, src);
1743 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001744 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001745 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001746 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001747 return c;
1748 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749
1750 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001751 src_any = !bacmp(&c->src, BDADDR_ANY);
1752 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001753 if ((src_match && dst_any) || (src_any && dst_match) ||
1754 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001755 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001756 }
1757 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758
Johan Hedberga24cce12014-08-07 22:56:42 +03001759 if (c1)
1760 l2cap_chan_hold(c1);
1761
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001762 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001763
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001764 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001765}
1766
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001767static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001768{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001769 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001770 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001771
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001772 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001773
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001774 l2cap_chan_lock(chan);
1775
Mat Martineau80909e02012-05-17 20:53:50 -07001776 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001777 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001778 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001779 return;
1780 }
1781
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001782 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001783
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001784 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001785 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001786}
1787
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001788static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001789{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001790 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001791 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001792
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001793 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001794
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001795 l2cap_chan_lock(chan);
1796
Mat Martineau80909e02012-05-17 20:53:50 -07001797 if (!chan->conn) {
1798 l2cap_chan_unlock(chan);
1799 l2cap_chan_put(chan);
1800 return;
1801 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001802
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001803 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001804 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001805 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001806}
1807
Gustavo Padovand6603662012-05-21 13:58:22 -03001808static void l2cap_streaming_send(struct l2cap_chan *chan,
1809 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001810{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001811 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001812 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001813
Mat Martineau37339372012-05-17 20:53:33 -07001814 BT_DBG("chan %p, skbs %p", chan, skbs);
1815
Mat Martineaub99e13a2012-10-23 15:24:19 -07001816 if (__chan_is_moving(chan))
1817 return;
1818
Mat Martineau37339372012-05-17 20:53:33 -07001819 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1820
1821 while (!skb_queue_empty(&chan->tx_q)) {
1822
1823 skb = skb_dequeue(&chan->tx_q);
1824
1825 bt_cb(skb)->control.retries = 1;
1826 control = &bt_cb(skb)->control;
1827
1828 control->reqseq = 0;
1829 control->txseq = chan->next_tx_seq;
1830
1831 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001832
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001833 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001834 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1835 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001836 }
1837
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001838 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001839
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001840 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001841
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001842 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001843 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001844 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001845}
1846
Szymon Janc67c9e842011-07-28 16:24:33 +02001847static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001848{
1849 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001850 struct l2cap_ctrl *control;
1851 int sent = 0;
1852
1853 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001854
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001855 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001856 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001857
Mat Martineau94122bb2012-05-02 09:42:02 -07001858 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1859 return 0;
1860
Mat Martineaub99e13a2012-10-23 15:24:19 -07001861 if (__chan_is_moving(chan))
1862 return 0;
1863
Mat Martineau18a48e72012-05-17 20:53:34 -07001864 while (chan->tx_send_head &&
1865 chan->unacked_frames < chan->remote_tx_win &&
1866 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001867
Mat Martineau18a48e72012-05-17 20:53:34 -07001868 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001869
Mat Martineau18a48e72012-05-17 20:53:34 -07001870 bt_cb(skb)->control.retries = 1;
1871 control = &bt_cb(skb)->control;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001872
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001873 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001874 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001875
Mat Martineau18a48e72012-05-17 20:53:34 -07001876 control->reqseq = chan->buffer_seq;
1877 chan->last_acked_seq = chan->buffer_seq;
1878 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001879
Mat Martineau18a48e72012-05-17 20:53:34 -07001880 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001881
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001882 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001883 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1884 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001885 }
1886
Mat Martineau18a48e72012-05-17 20:53:34 -07001887 /* Clone after data has been modified. Data is assumed to be
1888 read-only (for locking purposes) on cloned sk_buffs.
1889 */
1890 tx_skb = skb_clone(skb, GFP_KERNEL);
1891
1892 if (!tx_skb)
1893 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001894
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001895 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001896
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001897 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001898 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001899 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001900 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001901
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001902 if (skb_queue_is_last(&chan->tx_q, skb))
1903 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001904 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001905 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001906
1907 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001908 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001909 }
1910
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001911 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1912 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001913
1914 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001915}
1916
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001917static void l2cap_ertm_resend(struct l2cap_chan *chan)
1918{
1919 struct l2cap_ctrl control;
1920 struct sk_buff *skb;
1921 struct sk_buff *tx_skb;
1922 u16 seq;
1923
1924 BT_DBG("chan %p", chan);
1925
1926 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1927 return;
1928
Mat Martineaub99e13a2012-10-23 15:24:19 -07001929 if (__chan_is_moving(chan))
1930 return;
1931
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001932 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1933 seq = l2cap_seq_list_pop(&chan->retrans_list);
1934
1935 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1936 if (!skb) {
1937 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001938 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001939 continue;
1940 }
1941
1942 bt_cb(skb)->control.retries++;
1943 control = bt_cb(skb)->control;
1944
1945 if (chan->max_tx != 0 &&
1946 bt_cb(skb)->control.retries > chan->max_tx) {
1947 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001948 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001949 l2cap_seq_list_clear(&chan->retrans_list);
1950 break;
1951 }
1952
1953 control.reqseq = chan->buffer_seq;
1954 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1955 control.final = 1;
1956 else
1957 control.final = 0;
1958
1959 if (skb_cloned(skb)) {
1960 /* Cloned sk_buffs are read-only, so we need a
1961 * writeable copy
1962 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001963 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001964 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001965 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001966 }
1967
1968 if (!tx_skb) {
1969 l2cap_seq_list_clear(&chan->retrans_list);
1970 break;
1971 }
1972
1973 /* Update skb contents */
1974 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1975 put_unaligned_le32(__pack_extended_control(&control),
1976 tx_skb->data + L2CAP_HDR_SIZE);
1977 } else {
1978 put_unaligned_le16(__pack_enhanced_control(&control),
1979 tx_skb->data + L2CAP_HDR_SIZE);
1980 }
1981
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001982 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001983 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001984 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1985 tx_skb->len - L2CAP_FCS_SIZE);
1986 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1987 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001988 }
1989
1990 l2cap_do_send(chan, tx_skb);
1991
1992 BT_DBG("Resent txseq %d", control.txseq);
1993
1994 chan->last_acked_seq = chan->buffer_seq;
1995 }
1996}
1997
Mat Martineauf80842a2012-05-17 20:53:46 -07001998static void l2cap_retransmit(struct l2cap_chan *chan,
1999 struct l2cap_ctrl *control)
2000{
2001 BT_DBG("chan %p, control %p", chan, control);
2002
2003 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2004 l2cap_ertm_resend(chan);
2005}
2006
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002007static void l2cap_retransmit_all(struct l2cap_chan *chan,
2008 struct l2cap_ctrl *control)
2009{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002010 struct sk_buff *skb;
2011
2012 BT_DBG("chan %p, control %p", chan, control);
2013
2014 if (control->poll)
2015 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2016
2017 l2cap_seq_list_clear(&chan->retrans_list);
2018
2019 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2020 return;
2021
2022 if (chan->unacked_frames) {
2023 skb_queue_walk(&chan->tx_q, skb) {
2024 if (bt_cb(skb)->control.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002025 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002026 break;
2027 }
2028
2029 skb_queue_walk_from(&chan->tx_q, skb) {
2030 if (skb == chan->tx_send_head)
2031 break;
2032
2033 l2cap_seq_list_append(&chan->retrans_list,
2034 bt_cb(skb)->control.txseq);
2035 }
2036
2037 l2cap_ertm_resend(chan);
2038 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002039}
2040
Szymon Jancb17e73b2012-01-11 10:59:47 +01002041static void l2cap_send_ack(struct l2cap_chan *chan)
2042{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002043 struct l2cap_ctrl control;
2044 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2045 chan->last_acked_seq);
2046 int threshold;
2047
2048 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2049 chan, chan->last_acked_seq, chan->buffer_seq);
2050
2051 memset(&control, 0, sizeof(control));
2052 control.sframe = 1;
2053
2054 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2055 chan->rx_state == L2CAP_RX_STATE_RECV) {
2056 __clear_ack_timer(chan);
2057 control.super = L2CAP_SUPER_RNR;
2058 control.reqseq = chan->buffer_seq;
2059 l2cap_send_sframe(chan, &control);
2060 } else {
2061 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2062 l2cap_ertm_send(chan);
2063 /* If any i-frames were sent, they included an ack */
2064 if (chan->buffer_seq == chan->last_acked_seq)
2065 frames_to_ack = 0;
2066 }
2067
Mat Martineauc20f8e32012-07-10 05:47:07 -07002068 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002069 * Calculate without mul or div
2070 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002071 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002072 threshold += threshold << 1;
2073 threshold >>= 2;
2074
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002075 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002076 threshold);
2077
2078 if (frames_to_ack >= threshold) {
2079 __clear_ack_timer(chan);
2080 control.super = L2CAP_SUPER_RR;
2081 control.reqseq = chan->buffer_seq;
2082 l2cap_send_sframe(chan, &control);
2083 frames_to_ack = 0;
2084 }
2085
2086 if (frames_to_ack)
2087 __set_ack_timer(chan);
2088 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002089}
2090
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002091static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2092 struct msghdr *msg, int len,
2093 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002095 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002096 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002097 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002098
Jukka Rissanen04988782014-06-18 16:37:07 +03002099 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
Al Viro56c39fb2014-11-24 16:44:09 -05002100 msg, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002101 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002102
2103 sent += count;
2104 len -= count;
2105
2106 /* Continuation fragments (no L2CAP header) */
2107 frag = &skb_shinfo(skb)->frag_list;
2108 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002109 struct sk_buff *tmp;
2110
Linus Torvalds1da177e2005-04-16 15:20:36 -07002111 count = min_t(unsigned int, conn->mtu, len);
2112
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002113 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002114 msg->msg_flags & MSG_DONTWAIT);
2115 if (IS_ERR(tmp))
2116 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002117
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002118 *frag = tmp;
2119
Jukka Rissanen04988782014-06-18 16:37:07 +03002120 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
Al Viro56c39fb2014-11-24 16:44:09 -05002121 msg, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002122 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002123
2124 sent += count;
2125 len -= count;
2126
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002127 skb->len += (*frag)->len;
2128 skb->data_len += (*frag)->len;
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 frag = &(*frag)->next;
2131 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002132
2133 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002134}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002136static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002137 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002138{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002139 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002140 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002141 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002142 struct l2cap_hdr *lh;
2143
Marcel Holtmann8d463212014-06-05 15:22:51 +02002144 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2145 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002146
2147 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002148
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002149 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002150 msg->msg_flags & MSG_DONTWAIT);
2151 if (IS_ERR(skb))
2152 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002153
2154 /* Create L2CAP header */
2155 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002156 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002157 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002158 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002159
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002160 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002161 if (unlikely(err < 0)) {
2162 kfree_skb(skb);
2163 return ERR_PTR(err);
2164 }
2165 return skb;
2166}
2167
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002168static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002169 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002170{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002171 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002172 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002173 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002174 struct l2cap_hdr *lh;
2175
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002176 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002177
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002178 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002179
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002180 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002181 msg->msg_flags & MSG_DONTWAIT);
2182 if (IS_ERR(skb))
2183 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002184
2185 /* Create L2CAP header */
2186 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002187 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002188 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002189
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002190 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002191 if (unlikely(err < 0)) {
2192 kfree_skb(skb);
2193 return ERR_PTR(err);
2194 }
2195 return skb;
2196}
2197
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002198static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002199 struct msghdr *msg, size_t len,
2200 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002201{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002202 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002203 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002204 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002205 struct l2cap_hdr *lh;
2206
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002207 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002208
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002209 if (!conn)
2210 return ERR_PTR(-ENOTCONN);
2211
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002212 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002213
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002214 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002215 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002216
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002217 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002218 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002219
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002220 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002221
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002222 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002223 msg->msg_flags & MSG_DONTWAIT);
2224 if (IS_ERR(skb))
2225 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002226
2227 /* Create L2CAP header */
2228 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002229 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002230 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002231
Mat Martineau18a48e72012-05-17 20:53:34 -07002232 /* Control header is populated later */
2233 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2234 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2235 else
2236 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002237
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002238 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002239 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002240
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002241 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002242 if (unlikely(err < 0)) {
2243 kfree_skb(skb);
2244 return ERR_PTR(err);
2245 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002246
Mat Martineau18a48e72012-05-17 20:53:34 -07002247 bt_cb(skb)->control.fcs = chan->fcs;
Mat Martineau3ce35142012-04-25 16:36:14 -07002248 bt_cb(skb)->control.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002249 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002250}
2251
Mat Martineau94122bb2012-05-02 09:42:02 -07002252static int l2cap_segment_sdu(struct l2cap_chan *chan,
2253 struct sk_buff_head *seg_queue,
2254 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002255{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002256 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002257 u16 sdu_len;
2258 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002259 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002260
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002261 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002262
Mat Martineau94122bb2012-05-02 09:42:02 -07002263 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2264 * so fragmented skbs are not used. The HCI layer's handling
2265 * of fragmented skbs is not compatible with ERTM's queueing.
2266 */
2267
2268 /* PDU size is derived from the HCI MTU */
2269 pdu_len = chan->conn->mtu;
2270
Mat Martineaua5495742012-10-23 15:24:21 -07002271 /* Constrain PDU size for BR/EDR connections */
2272 if (!chan->hs_hcon)
2273 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002274
2275 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002276 if (chan->fcs)
2277 pdu_len -= L2CAP_FCS_SIZE;
2278
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002279 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002280
2281 /* Remote device may have requested smaller PDUs */
2282 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2283
2284 if (len <= pdu_len) {
2285 sar = L2CAP_SAR_UNSEGMENTED;
2286 sdu_len = 0;
2287 pdu_len = len;
2288 } else {
2289 sar = L2CAP_SAR_START;
2290 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002291 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002292
2293 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002294 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002295
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002296 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002297 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002298 return PTR_ERR(skb);
2299 }
2300
Mat Martineau94122bb2012-05-02 09:42:02 -07002301 bt_cb(skb)->control.sar = sar;
2302 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002303
Mat Martineau94122bb2012-05-02 09:42:02 -07002304 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002305 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002306 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002307
2308 if (len <= pdu_len) {
2309 sar = L2CAP_SAR_END;
2310 pdu_len = len;
2311 } else {
2312 sar = L2CAP_SAR_CONTINUE;
2313 }
2314 }
2315
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002316 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002317}
2318
Johan Hedberg177f8f22013-05-31 17:54:51 +03002319static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2320 struct msghdr *msg,
2321 size_t len, u16 sdulen)
2322{
2323 struct l2cap_conn *conn = chan->conn;
2324 struct sk_buff *skb;
2325 int err, count, hlen;
2326 struct l2cap_hdr *lh;
2327
2328 BT_DBG("chan %p len %zu", chan, len);
2329
2330 if (!conn)
2331 return ERR_PTR(-ENOTCONN);
2332
2333 hlen = L2CAP_HDR_SIZE;
2334
2335 if (sdulen)
2336 hlen += L2CAP_SDULEN_SIZE;
2337
2338 count = min_t(unsigned int, (conn->mtu - hlen), len);
2339
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002340 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002341 msg->msg_flags & MSG_DONTWAIT);
2342 if (IS_ERR(skb))
2343 return skb;
2344
2345 /* Create L2CAP header */
2346 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2347 lh->cid = cpu_to_le16(chan->dcid);
2348 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2349
2350 if (sdulen)
2351 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2352
2353 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2354 if (unlikely(err < 0)) {
2355 kfree_skb(skb);
2356 return ERR_PTR(err);
2357 }
2358
2359 return skb;
2360}
2361
2362static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2363 struct sk_buff_head *seg_queue,
2364 struct msghdr *msg, size_t len)
2365{
2366 struct sk_buff *skb;
2367 size_t pdu_len;
2368 u16 sdu_len;
2369
2370 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2371
Johan Hedberg177f8f22013-05-31 17:54:51 +03002372 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002373 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002374
2375 while (len > 0) {
2376 if (len <= pdu_len)
2377 pdu_len = len;
2378
2379 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2380 if (IS_ERR(skb)) {
2381 __skb_queue_purge(seg_queue);
2382 return PTR_ERR(skb);
2383 }
2384
2385 __skb_queue_tail(seg_queue, skb);
2386
2387 len -= pdu_len;
2388
2389 if (sdu_len) {
2390 sdu_len = 0;
2391 pdu_len += L2CAP_SDULEN_SIZE;
2392 }
2393 }
2394
2395 return 0;
2396}
2397
Marcel Holtmann8d463212014-06-05 15:22:51 +02002398int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002399{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002400 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002401 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002402 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002403
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002404 if (!chan->conn)
2405 return -ENOTCONN;
2406
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002407 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002408 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002409 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002410 if (IS_ERR(skb))
2411 return PTR_ERR(skb);
2412
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002413 /* Channel lock is released before requesting new skb and then
2414 * reacquired thus we need to recheck channel state.
2415 */
2416 if (chan->state != BT_CONNECTED) {
2417 kfree_skb(skb);
2418 return -ENOTCONN;
2419 }
2420
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002421 l2cap_do_send(chan, skb);
2422 return len;
2423 }
2424
2425 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002426 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002427 /* Check outgoing MTU */
2428 if (len > chan->omtu)
2429 return -EMSGSIZE;
2430
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002431 if (!chan->tx_credits)
2432 return -EAGAIN;
2433
Johan Hedberg177f8f22013-05-31 17:54:51 +03002434 __skb_queue_head_init(&seg_queue);
2435
2436 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2437
2438 if (chan->state != BT_CONNECTED) {
2439 __skb_queue_purge(&seg_queue);
2440 err = -ENOTCONN;
2441 }
2442
2443 if (err)
2444 return err;
2445
2446 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2447
2448 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2449 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2450 chan->tx_credits--;
2451 }
2452
2453 if (!chan->tx_credits)
2454 chan->ops->suspend(chan);
2455
2456 err = len;
2457
2458 break;
2459
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002460 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002461 /* Check outgoing MTU */
2462 if (len > chan->omtu)
2463 return -EMSGSIZE;
2464
2465 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002466 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002467 if (IS_ERR(skb))
2468 return PTR_ERR(skb);
2469
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002470 /* Channel lock is released before requesting new skb and then
2471 * reacquired thus we need to recheck channel state.
2472 */
2473 if (chan->state != BT_CONNECTED) {
2474 kfree_skb(skb);
2475 return -ENOTCONN;
2476 }
2477
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002478 l2cap_do_send(chan, skb);
2479 err = len;
2480 break;
2481
2482 case L2CAP_MODE_ERTM:
2483 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002484 /* Check outgoing MTU */
2485 if (len > chan->omtu) {
2486 err = -EMSGSIZE;
2487 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002488 }
2489
Mat Martineau94122bb2012-05-02 09:42:02 -07002490 __skb_queue_head_init(&seg_queue);
2491
2492 /* Do segmentation before calling in to the state machine,
2493 * since it's possible to block while waiting for memory
2494 * allocation.
2495 */
2496 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2497
2498 /* The channel could have been closed while segmenting,
2499 * check that it is still connected.
2500 */
2501 if (chan->state != BT_CONNECTED) {
2502 __skb_queue_purge(&seg_queue);
2503 err = -ENOTCONN;
2504 }
2505
2506 if (err)
2507 break;
2508
Mat Martineau37339372012-05-17 20:53:33 -07002509 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002510 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002511 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002512 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002513
Gustavo Padovand6603662012-05-21 13:58:22 -03002514 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002515
Mat Martineau94122bb2012-05-02 09:42:02 -07002516 /* If the skbs were not queued for sending, they'll still be in
2517 * seg_queue and need to be purged.
2518 */
2519 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002520 break;
2521
2522 default:
2523 BT_DBG("bad state %1.1x", chan->mode);
2524 err = -EBADFD;
2525 }
2526
2527 return err;
2528}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002529EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002530
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002531static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2532{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002533 struct l2cap_ctrl control;
2534 u16 seq;
2535
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002536 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002537
2538 memset(&control, 0, sizeof(control));
2539 control.sframe = 1;
2540 control.super = L2CAP_SUPER_SREJ;
2541
2542 for (seq = chan->expected_tx_seq; seq != txseq;
2543 seq = __next_seq(chan, seq)) {
2544 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2545 control.reqseq = seq;
2546 l2cap_send_sframe(chan, &control);
2547 l2cap_seq_list_append(&chan->srej_list, seq);
2548 }
2549 }
2550
2551 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002552}
2553
2554static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2555{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002556 struct l2cap_ctrl control;
2557
2558 BT_DBG("chan %p", chan);
2559
2560 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2561 return;
2562
2563 memset(&control, 0, sizeof(control));
2564 control.sframe = 1;
2565 control.super = L2CAP_SUPER_SREJ;
2566 control.reqseq = chan->srej_list.tail;
2567 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002568}
2569
2570static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2571{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002572 struct l2cap_ctrl control;
2573 u16 initial_head;
2574 u16 seq;
2575
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002576 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002577
2578 memset(&control, 0, sizeof(control));
2579 control.sframe = 1;
2580 control.super = L2CAP_SUPER_SREJ;
2581
2582 /* Capture initial list head to allow only one pass through the list. */
2583 initial_head = chan->srej_list.head;
2584
2585 do {
2586 seq = l2cap_seq_list_pop(&chan->srej_list);
2587 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2588 break;
2589
2590 control.reqseq = seq;
2591 l2cap_send_sframe(chan, &control);
2592 l2cap_seq_list_append(&chan->srej_list, seq);
2593 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002594}
2595
Mat Martineau608bcc62012-05-17 20:53:32 -07002596static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2597{
2598 struct sk_buff *acked_skb;
2599 u16 ackseq;
2600
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002601 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002602
2603 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2604 return;
2605
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002606 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002607 chan->expected_ack_seq, chan->unacked_frames);
2608
2609 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2610 ackseq = __next_seq(chan, ackseq)) {
2611
2612 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2613 if (acked_skb) {
2614 skb_unlink(acked_skb, &chan->tx_q);
2615 kfree_skb(acked_skb);
2616 chan->unacked_frames--;
2617 }
2618 }
2619
2620 chan->expected_ack_seq = reqseq;
2621
2622 if (chan->unacked_frames == 0)
2623 __clear_retrans_timer(chan);
2624
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002625 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002626}
2627
2628static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2629{
2630 BT_DBG("chan %p", chan);
2631
2632 chan->expected_tx_seq = chan->buffer_seq;
2633 l2cap_seq_list_clear(&chan->srej_list);
2634 skb_queue_purge(&chan->srej_q);
2635 chan->rx_state = L2CAP_RX_STATE_RECV;
2636}
2637
Gustavo Padovand6603662012-05-21 13:58:22 -03002638static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2639 struct l2cap_ctrl *control,
2640 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002641{
Mat Martineau608bcc62012-05-17 20:53:32 -07002642 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2643 event);
2644
2645 switch (event) {
2646 case L2CAP_EV_DATA_REQUEST:
2647 if (chan->tx_send_head == NULL)
2648 chan->tx_send_head = skb_peek(skbs);
2649
2650 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2651 l2cap_ertm_send(chan);
2652 break;
2653 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2654 BT_DBG("Enter LOCAL_BUSY");
2655 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2656
2657 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2658 /* The SREJ_SENT state must be aborted if we are to
2659 * enter the LOCAL_BUSY state.
2660 */
2661 l2cap_abort_rx_srej_sent(chan);
2662 }
2663
2664 l2cap_send_ack(chan);
2665
2666 break;
2667 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2668 BT_DBG("Exit LOCAL_BUSY");
2669 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2670
2671 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2672 struct l2cap_ctrl local_control;
2673
2674 memset(&local_control, 0, sizeof(local_control));
2675 local_control.sframe = 1;
2676 local_control.super = L2CAP_SUPER_RR;
2677 local_control.poll = 1;
2678 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002679 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002680
2681 chan->retry_count = 1;
2682 __set_monitor_timer(chan);
2683 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2684 }
2685 break;
2686 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2687 l2cap_process_reqseq(chan, control->reqseq);
2688 break;
2689 case L2CAP_EV_EXPLICIT_POLL:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 __clear_ack_timer(chan);
2694 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2695 break;
2696 case L2CAP_EV_RETRANS_TO:
2697 l2cap_send_rr_or_rnr(chan, 1);
2698 chan->retry_count = 1;
2699 __set_monitor_timer(chan);
2700 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2701 break;
2702 case L2CAP_EV_RECV_FBIT:
2703 /* Nothing to process */
2704 break;
2705 default:
2706 break;
2707 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002708}
2709
Gustavo Padovand6603662012-05-21 13:58:22 -03002710static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2711 struct l2cap_ctrl *control,
2712 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002713{
Mat Martineau608bcc62012-05-17 20:53:32 -07002714 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2715 event);
2716
2717 switch (event) {
2718 case L2CAP_EV_DATA_REQUEST:
2719 if (chan->tx_send_head == NULL)
2720 chan->tx_send_head = skb_peek(skbs);
2721 /* Queue data, but don't send. */
2722 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2723 break;
2724 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2725 BT_DBG("Enter LOCAL_BUSY");
2726 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2727
2728 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2729 /* The SREJ_SENT state must be aborted if we are to
2730 * enter the LOCAL_BUSY state.
2731 */
2732 l2cap_abort_rx_srej_sent(chan);
2733 }
2734
2735 l2cap_send_ack(chan);
2736
2737 break;
2738 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2739 BT_DBG("Exit LOCAL_BUSY");
2740 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2741
2742 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2743 struct l2cap_ctrl local_control;
2744 memset(&local_control, 0, sizeof(local_control));
2745 local_control.sframe = 1;
2746 local_control.super = L2CAP_SUPER_RR;
2747 local_control.poll = 1;
2748 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002749 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002750
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 }
2755 break;
2756 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2757 l2cap_process_reqseq(chan, control->reqseq);
2758
2759 /* Fall through */
2760
2761 case L2CAP_EV_RECV_FBIT:
2762 if (control && control->final) {
2763 __clear_monitor_timer(chan);
2764 if (chan->unacked_frames > 0)
2765 __set_retrans_timer(chan);
2766 chan->retry_count = 0;
2767 chan->tx_state = L2CAP_TX_STATE_XMIT;
2768 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2769 }
2770 break;
2771 case L2CAP_EV_EXPLICIT_POLL:
2772 /* Ignore */
2773 break;
2774 case L2CAP_EV_MONITOR_TO:
2775 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2776 l2cap_send_rr_or_rnr(chan, 1);
2777 __set_monitor_timer(chan);
2778 chan->retry_count++;
2779 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002780 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002781 }
2782 break;
2783 default:
2784 break;
2785 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002786}
2787
Gustavo Padovand6603662012-05-21 13:58:22 -03002788static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2789 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002790{
Mat Martineau608bcc62012-05-17 20:53:32 -07002791 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2792 chan, control, skbs, event, chan->tx_state);
2793
2794 switch (chan->tx_state) {
2795 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002796 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002797 break;
2798 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002799 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002800 break;
2801 default:
2802 /* Ignore event */
2803 break;
2804 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002805}
2806
Mat Martineau4b51dae92012-05-17 20:53:37 -07002807static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2809{
2810 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002812}
2813
Mat Martineauf80842a2012-05-17 20:53:46 -07002814static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2815 struct l2cap_ctrl *control)
2816{
2817 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002818 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002819}
2820
Linus Torvalds1da177e2005-04-16 15:20:36 -07002821/* Copy frame to all raw sockets on that connection */
2822static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2823{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002825 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826
2827 BT_DBG("conn %p", conn);
2828
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002829 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002830
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002831 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002832 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 continue;
2834
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002835 /* Don't send frame to the channel it came from */
2836 if (bt_cb(skb)->chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002838
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002839 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002840 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002842 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 kfree_skb(nskb);
2844 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002845
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002846 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002847}
2848
2849/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002850static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2851 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852{
2853 struct sk_buff *skb, **frag;
2854 struct l2cap_cmd_hdr *cmd;
2855 struct l2cap_hdr *lh;
2856 int len, count;
2857
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002858 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2859 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860
Anderson Lizardo300b9622013-06-02 16:30:40 -04002861 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2862 return NULL;
2863
Linus Torvalds1da177e2005-04-16 15:20:36 -07002864 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2865 count = min_t(unsigned int, conn->mtu, len);
2866
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002867 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 if (!skb)
2869 return NULL;
2870
2871 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002872 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002873
2874 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002875 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002876 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002877 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002878
2879 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2880 cmd->code = code;
2881 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002882 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883
2884 if (dlen) {
2885 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2886 memcpy(skb_put(skb, count), data, count);
2887 data += count;
2888 }
2889
2890 len -= skb->len;
2891
2892 /* Continuation fragments (no L2CAP header) */
2893 frag = &skb_shinfo(skb)->frag_list;
2894 while (len) {
2895 count = min_t(unsigned int, conn->mtu, len);
2896
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002897 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002898 if (!*frag)
2899 goto fail;
2900
2901 memcpy(skb_put(*frag, count), data, count);
2902
2903 len -= count;
2904 data += count;
2905
2906 frag = &(*frag)->next;
2907 }
2908
2909 return skb;
2910
2911fail:
2912 kfree_skb(skb);
2913 return NULL;
2914}
2915
Gustavo Padovan2d792812012-10-06 10:07:01 +01002916static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2917 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002918{
2919 struct l2cap_conf_opt *opt = *ptr;
2920 int len;
2921
2922 len = L2CAP_CONF_OPT_SIZE + opt->len;
2923 *ptr += len;
2924
2925 *type = opt->type;
2926 *olen = opt->len;
2927
2928 switch (opt->len) {
2929 case 1:
2930 *val = *((u8 *) opt->val);
2931 break;
2932
2933 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002934 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002935 break;
2936
2937 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002938 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002939 break;
2940
2941 default:
2942 *val = (unsigned long) opt->val;
2943 break;
2944 }
2945
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002946 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002947 return len;
2948}
2949
Linus Torvalds1da177e2005-04-16 15:20:36 -07002950static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2951{
2952 struct l2cap_conf_opt *opt = *ptr;
2953
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002954 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002955
2956 opt->type = type;
2957 opt->len = len;
2958
2959 switch (len) {
2960 case 1:
2961 *((u8 *) opt->val) = val;
2962 break;
2963
2964 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002965 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 break;
2967
2968 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002969 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002970 break;
2971
2972 default:
2973 memcpy(opt->val, (void *) val, len);
2974 break;
2975 }
2976
2977 *ptr += L2CAP_CONF_OPT_SIZE + len;
2978}
2979
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002980static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2981{
2982 struct l2cap_conf_efs efs;
2983
Szymon Janc1ec918c2011-11-16 09:32:21 +01002984 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002985 case L2CAP_MODE_ERTM:
2986 efs.id = chan->local_id;
2987 efs.stype = chan->local_stype;
2988 efs.msdu = cpu_to_le16(chan->local_msdu);
2989 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002990 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2991 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002992 break;
2993
2994 case L2CAP_MODE_STREAMING:
2995 efs.id = 1;
2996 efs.stype = L2CAP_SERV_BESTEFFORT;
2997 efs.msdu = cpu_to_le16(chan->local_msdu);
2998 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2999 efs.acc_lat = 0;
3000 efs.flush_to = 0;
3001 break;
3002
3003 default:
3004 return;
3005 }
3006
3007 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003008 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003009}
3010
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003011static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003012{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003013 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003014 ack_timer.work);
3015 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003016
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003017 BT_DBG("chan %p", chan);
3018
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003019 l2cap_chan_lock(chan);
3020
Mat Martineau03625202012-05-17 20:53:51 -07003021 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3022 chan->last_acked_seq);
3023
3024 if (frames_to_ack)
3025 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003026
3027 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003028 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003029}
3030
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003031int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003032{
Mat Martineau3c588192012-04-11 10:48:42 -07003033 int err;
3034
Mat Martineau105bdf92012-04-27 16:50:48 -07003035 chan->next_tx_seq = 0;
3036 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003037 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003038 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003039 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003040 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003041 chan->last_acked_seq = 0;
3042 chan->sdu = NULL;
3043 chan->sdu_last_frag = NULL;
3044 chan->sdu_len = 0;
3045
Mat Martineaud34c34f2012-05-14 14:49:27 -07003046 skb_queue_head_init(&chan->tx_q);
3047
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003048 chan->local_amp_id = AMP_ID_BREDR;
3049 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003050 chan->move_state = L2CAP_MOVE_STABLE;
3051 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3052
Mat Martineau105bdf92012-04-27 16:50:48 -07003053 if (chan->mode != L2CAP_MODE_ERTM)
3054 return 0;
3055
3056 chan->rx_state = L2CAP_RX_STATE_RECV;
3057 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003058
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003059 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3060 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3061 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003062
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003063 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003064
Mat Martineau3c588192012-04-11 10:48:42 -07003065 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3066 if (err < 0)
3067 return err;
3068
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003069 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3070 if (err < 0)
3071 l2cap_seq_list_free(&chan->srej_list);
3072
3073 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003074}
3075
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003076static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3077{
3078 switch (mode) {
3079 case L2CAP_MODE_STREAMING:
3080 case L2CAP_MODE_ERTM:
3081 if (l2cap_mode_supported(mode, remote_feat_mask))
3082 return mode;
3083 /* fall through */
3084 default:
3085 return L2CAP_MODE_BASIC;
3086 }
3087}
3088
Marcel Holtmann848566b2013-10-01 22:59:22 -07003089static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003090{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003091 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003092}
3093
Marcel Holtmann848566b2013-10-01 22:59:22 -07003094static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003095{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003096 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003097}
3098
Mat Martineau36c86c82012-10-23 15:24:20 -07003099static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3100 struct l2cap_conf_rfc *rfc)
3101{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003102 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003103 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3104
3105 /* Class 1 devices have must have ERTM timeouts
3106 * exceeding the Link Supervision Timeout. The
3107 * default Link Supervision Timeout for AMP
3108 * controllers is 10 seconds.
3109 *
3110 * Class 1 devices use 0xffffffff for their
3111 * best-effort flush timeout, so the clamping logic
3112 * will result in a timeout that meets the above
3113 * requirement. ERTM timeouts are 16-bit values, so
3114 * the maximum timeout is 65.535 seconds.
3115 */
3116
3117 /* Convert timeout to milliseconds and round */
3118 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3119
3120 /* This is the recommended formula for class 2 devices
3121 * that start ERTM timers when packets are sent to the
3122 * controller.
3123 */
3124 ertm_to = 3 * ertm_to + 500;
3125
3126 if (ertm_to > 0xffff)
3127 ertm_to = 0xffff;
3128
3129 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3130 rfc->monitor_timeout = rfc->retrans_timeout;
3131 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003132 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3133 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003134 }
3135}
3136
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003137static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3138{
3139 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003140 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003141 /* use extended control field */
3142 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003143 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3144 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003145 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003146 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003147 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3148 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003149 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003150}
3151
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003152static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003154 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003155 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003156 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003157 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003158
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003159 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003160
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003161 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003162 goto done;
3163
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003164 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003165 case L2CAP_MODE_STREAMING:
3166 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003167 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003168 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003169
Marcel Holtmann848566b2013-10-01 22:59:22 -07003170 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003171 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3172
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003173 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003174 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003175 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003176 break;
3177 }
3178
3179done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003180 if (chan->imtu != L2CAP_DEFAULT_MTU)
3181 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003182
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003183 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003184 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003185 if (disable_ertm)
3186 break;
3187
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003188 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003189 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003190 break;
3191
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003192 rfc.mode = L2CAP_MODE_BASIC;
3193 rfc.txwin_size = 0;
3194 rfc.max_transmit = 0;
3195 rfc.retrans_timeout = 0;
3196 rfc.monitor_timeout = 0;
3197 rfc.max_pdu_size = 0;
3198
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003200 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003201 break;
3202
3203 case L2CAP_MODE_ERTM:
3204 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003205 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003206
3207 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003208
3209 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003210 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3211 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003212 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003213
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003214 l2cap_txwin_setup(chan);
3215
3216 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003217 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003218
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003220 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003221
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003222 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3223 l2cap_add_opt_efs(&ptr, chan);
3224
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003225 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003227 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003228
3229 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3230 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003231 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003232 chan->fcs = L2CAP_FCS_NONE;
3233 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3234 chan->fcs);
3235 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003236 break;
3237
3238 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003239 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003240 rfc.mode = L2CAP_MODE_STREAMING;
3241 rfc.txwin_size = 0;
3242 rfc.max_transmit = 0;
3243 rfc.retrans_timeout = 0;
3244 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003245
3246 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003247 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3248 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003249 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003250
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003251 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003252 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003253
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003254 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3255 l2cap_add_opt_efs(&ptr, chan);
3256
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003257 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3258 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003259 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003260 chan->fcs = L2CAP_FCS_NONE;
3261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3262 chan->fcs);
3263 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003264 break;
3265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003267 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003268 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003269
3270 return ptr - data;
3271}
3272
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003273static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003274{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003275 struct l2cap_conf_rsp *rsp = data;
3276 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003277 void *req = chan->conf_req;
3278 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003279 int type, hint, olen;
3280 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003281 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003282 struct l2cap_conf_efs efs;
3283 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003284 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003285 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003286 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003288 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003289
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003290 while (len >= L2CAP_CONF_OPT_SIZE) {
3291 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003292
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003293 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003294 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003295
3296 switch (type) {
3297 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003298 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003299 break;
3300
3301 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003302 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003303 break;
3304
3305 case L2CAP_CONF_QOS:
3306 break;
3307
Marcel Holtmann6464f352007-10-20 13:39:51 +02003308 case L2CAP_CONF_RFC:
3309 if (olen == sizeof(rfc))
3310 memcpy(&rfc, (void *) val, olen);
3311 break;
3312
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003313 case L2CAP_CONF_FCS:
3314 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003315 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003316 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003317
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003318 case L2CAP_CONF_EFS:
3319 remote_efs = 1;
3320 if (olen == sizeof(efs))
3321 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003322 break;
3323
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003324 case L2CAP_CONF_EWS:
Marcel Holtmann848566b2013-10-01 22:59:22 -07003325 if (!chan->conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003326 return -ECONNREFUSED;
3327
3328 set_bit(FLAG_EXT_CTRL, &chan->flags);
3329 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003330 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003331 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003332 break;
3333
3334 default:
3335 if (hint)
3336 break;
3337
3338 result = L2CAP_CONF_UNKNOWN;
3339 *((u8 *) ptr++) = type;
3340 break;
3341 }
3342 }
3343
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003344 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003345 goto done;
3346
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003347 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003348 case L2CAP_MODE_STREAMING:
3349 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003350 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003351 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003352 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003353 break;
3354 }
3355
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003356 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003357 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003358 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3359 else
3360 return -ECONNREFUSED;
3361 }
3362
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003363 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003364 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003365
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003366 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003367 }
3368
3369done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003370 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003371 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003372 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003373
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003374 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003375 return -ECONNREFUSED;
3376
Gustavo Padovan2d792812012-10-06 10:07:01 +01003377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3378 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003379 }
3380
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003381 if (result == L2CAP_CONF_SUCCESS) {
3382 /* Configure output options and let the other side know
3383 * which ones we don't like. */
3384
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003385 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3386 result = L2CAP_CONF_UNACCEPT;
3387 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003388 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003389 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003390 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003392
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003393 if (remote_efs) {
3394 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003395 efs.stype != L2CAP_SERV_NOTRAFIC &&
3396 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003397
3398 result = L2CAP_CONF_UNACCEPT;
3399
3400 if (chan->num_conf_req >= 1)
3401 return -ECONNREFUSED;
3402
3403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003404 sizeof(efs),
3405 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003406 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003407 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003408 result = L2CAP_CONF_PENDING;
3409 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003410 }
3411 }
3412
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003413 switch (rfc.mode) {
3414 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003415 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003416 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003417 break;
3418
3419 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003420 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3421 chan->remote_tx_win = rfc.txwin_size;
3422 else
3423 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3424
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003425 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003426
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003427 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003428 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3429 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003430 rfc.max_pdu_size = cpu_to_le16(size);
3431 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003432
Mat Martineau36c86c82012-10-23 15:24:20 -07003433 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003434
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003435 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003436
3437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003438 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003439
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003440 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3441 chan->remote_id = efs.id;
3442 chan->remote_stype = efs.stype;
3443 chan->remote_msdu = le16_to_cpu(efs.msdu);
3444 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003445 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003446 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003447 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003448 chan->remote_sdu_itime =
3449 le32_to_cpu(efs.sdu_itime);
3450 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003451 sizeof(efs),
3452 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003453 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003454 break;
3455
3456 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003457 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003458 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3459 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003460 rfc.max_pdu_size = cpu_to_le16(size);
3461 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003462
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003463 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003464
Gustavo Padovan2d792812012-10-06 10:07:01 +01003465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3466 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003467
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003468 break;
3469
3470 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003471 result = L2CAP_CONF_UNACCEPT;
3472
3473 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003474 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003475 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003476
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003477 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003478 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003479 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003480 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003481 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003482 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003483
3484 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003485}
3486
Gustavo Padovan2d792812012-10-06 10:07:01 +01003487static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3488 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003490 struct l2cap_conf_req *req = data;
3491 void *ptr = req->data;
3492 int type, olen;
3493 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003494 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003495 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003496
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003497 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003498
3499 while (len >= L2CAP_CONF_OPT_SIZE) {
3500 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3501
3502 switch (type) {
3503 case L2CAP_CONF_MTU:
3504 if (val < L2CAP_DEFAULT_MIN_MTU) {
3505 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003506 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003507 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003508 chan->imtu = val;
3509 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510 break;
3511
3512 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003513 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003514 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003515 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003516 break;
3517
3518 case L2CAP_CONF_RFC:
3519 if (olen == sizeof(rfc))
3520 memcpy(&rfc, (void *)val, olen);
3521
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003522 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003523 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003524 return -ECONNREFUSED;
3525
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003526 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003527
3528 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003529 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003530 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003531
3532 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003533 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003535 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003536 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003537
3538 case L2CAP_CONF_EFS:
3539 if (olen == sizeof(efs))
3540 memcpy(&efs, (void *)val, olen);
3541
3542 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003543 efs.stype != L2CAP_SERV_NOTRAFIC &&
3544 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003545 return -ECONNREFUSED;
3546
Gustavo Padovan2d792812012-10-06 10:07:01 +01003547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3548 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003549 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003550
3551 case L2CAP_CONF_FCS:
3552 if (*result == L2CAP_CONF_PENDING)
3553 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003554 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003555 &chan->conf_state);
3556 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003557 }
3558 }
3559
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003560 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003561 return -ECONNREFUSED;
3562
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003563 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003564
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003565 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003566 switch (rfc.mode) {
3567 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003568 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3569 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3570 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003571 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3572 chan->ack_win = min_t(u16, chan->ack_win,
3573 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003574
3575 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3576 chan->local_msdu = le16_to_cpu(efs.msdu);
3577 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003578 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003579 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3580 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003581 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003582 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003583 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003584
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003585 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003586 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003587 }
3588 }
3589
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003590 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003591 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003592
3593 return ptr - data;
3594}
3595
Gustavo Padovan2d792812012-10-06 10:07:01 +01003596static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3597 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003598{
3599 struct l2cap_conf_rsp *rsp = data;
3600 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003601
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003602 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003603
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003604 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003605 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003606 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003607
3608 return ptr - data;
3609}
3610
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003611void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3612{
3613 struct l2cap_le_conn_rsp rsp;
3614 struct l2cap_conn *conn = chan->conn;
3615
3616 BT_DBG("chan %p", chan);
3617
3618 rsp.dcid = cpu_to_le16(chan->scid);
3619 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003620 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003621 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003622 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003623
3624 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3625 &rsp);
3626}
3627
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003628void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003629{
3630 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003631 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003632 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003633 u8 rsp_code;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003634
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003635 rsp.scid = cpu_to_le16(chan->dcid);
3636 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003637 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3638 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003639
3640 if (chan->hs_hcon)
3641 rsp_code = L2CAP_CREATE_CHAN_RSP;
3642 else
3643 rsp_code = L2CAP_CONN_RSP;
3644
3645 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3646
3647 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003648
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003649 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003650 return;
3651
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003652 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003653 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003654 chan->num_conf_req++;
3655}
3656
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003657static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003658{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003659 int type, olen;
3660 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003661 /* Use sane default values in case a misbehaving remote device
3662 * did not send an RFC or extended window size option.
3663 */
3664 u16 txwin_ext = chan->ack_win;
3665 struct l2cap_conf_rfc rfc = {
3666 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003667 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3668 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003669 .max_pdu_size = cpu_to_le16(chan->imtu),
3670 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3671 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003672
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003673 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003674
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003675 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003676 return;
3677
3678 while (len >= L2CAP_CONF_OPT_SIZE) {
3679 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3680
Mat Martineauc20f8e32012-07-10 05:47:07 -07003681 switch (type) {
3682 case L2CAP_CONF_RFC:
3683 if (olen == sizeof(rfc))
3684 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003685 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003686 case L2CAP_CONF_EWS:
3687 txwin_ext = val;
3688 break;
3689 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003690 }
3691
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003692 switch (rfc.mode) {
3693 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003694 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3695 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003696 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3697 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3698 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3699 else
3700 chan->ack_win = min_t(u16, chan->ack_win,
3701 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003702 break;
3703 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003704 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003705 }
3706}
3707
Gustavo Padovan2d792812012-10-06 10:07:01 +01003708static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003709 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3710 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003711{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003712 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003713
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003714 if (cmd_len < sizeof(*rej))
3715 return -EPROTO;
3716
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003717 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003718 return 0;
3719
3720 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003721 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003722 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003723
3724 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003725 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003726
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003727 l2cap_conn_start(conn);
3728 }
3729
3730 return 0;
3731}
3732
Mat Martineau17009152012-10-23 15:24:07 -07003733static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3734 struct l2cap_cmd_hdr *cmd,
3735 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003736{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3738 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003739 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003740 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003741
3742 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003743 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003745 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003746
3747 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003748 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003749 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003750 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003751 result = L2CAP_CR_BAD_PSM;
3752 goto sendresp;
3753 }
3754
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003755 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003756 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003757
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003758 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003759 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003760 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003761 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003762 result = L2CAP_CR_SEC_BLOCK;
3763 goto response;
3764 }
3765
Linus Torvalds1da177e2005-04-16 15:20:36 -07003766 result = L2CAP_CR_NO_MEM;
3767
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003768 /* Check if we already have channel with that dcid */
3769 if (__l2cap_get_chan_by_dcid(conn, scid))
3770 goto response;
3771
Gustavo Padovan80b98022012-05-27 22:27:51 -03003772 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003773 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003774 goto response;
3775
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003776 /* For certain devices (ex: HID mouse), support for authentication,
3777 * pairing and bonding is optional. For such devices, inorder to avoid
3778 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3779 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3780 */
3781 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3782
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003783 bacpy(&chan->src, &conn->hcon->src);
3784 bacpy(&chan->dst, &conn->hcon->dst);
Marcel Holtmann4f1654e2013-10-13 08:50:41 -07003785 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3786 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003787 chan->psm = psm;
3788 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003789 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003790
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003791 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003792
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003793 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003794
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003795 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003796
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003797 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003798
Marcel Holtmann984947d2009-02-06 23:35:19 +01003799 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003800 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003801 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003802 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003803 result = L2CAP_CR_PEND;
3804 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003805 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003806 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003807 /* Force pending result for AMP controllers.
3808 * The connection will succeed after the
3809 * physical link is up.
3810 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003811 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003812 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003813 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003814 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003815 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003816 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003817 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003818 status = L2CAP_CS_NO_INFO;
3819 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003820 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003821 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003822 result = L2CAP_CR_PEND;
3823 status = L2CAP_CS_AUTHEN_PEND;
3824 }
3825 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003826 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003827 result = L2CAP_CR_PEND;
3828 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829 }
3830
Linus Torvalds1da177e2005-04-16 15:20:36 -07003831response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003832 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003833 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003834 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835
3836sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003837 rsp.scid = cpu_to_le16(scid);
3838 rsp.dcid = cpu_to_le16(dcid);
3839 rsp.result = cpu_to_le16(result);
3840 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003841 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003842
3843 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3844 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003845 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003846
3847 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3848 conn->info_ident = l2cap_get_ident(conn);
3849
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003850 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003851
Gustavo Padovan2d792812012-10-06 10:07:01 +01003852 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3853 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003854 }
3855
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003856 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003857 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003858 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003859 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003860 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003861 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003862 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003863 }
Mat Martineau17009152012-10-23 15:24:07 -07003864
3865 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003866}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003867
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003868static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003869 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003870{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303871 struct hci_dev *hdev = conn->hcon->hdev;
3872 struct hci_conn *hcon = conn->hcon;
3873
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003874 if (cmd_len < sizeof(struct l2cap_conn_req))
3875 return -EPROTO;
3876
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303877 hci_dev_lock(hdev);
3878 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3879 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
Alfonso Acosta48ec92f2014-10-07 08:44:10 +00003880 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303881 hci_dev_unlock(hdev);
3882
Gustavo Padovan300229f2012-10-12 19:40:40 +08003883 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003884 return 0;
3885}
3886
Mat Martineau5909cf32012-10-23 15:24:08 -07003887static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003888 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3889 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003890{
3891 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3892 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003893 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003894 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003895 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003896
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003897 if (cmd_len < sizeof(*rsp))
3898 return -EPROTO;
3899
Linus Torvalds1da177e2005-04-16 15:20:36 -07003900 scid = __le16_to_cpu(rsp->scid);
3901 dcid = __le16_to_cpu(rsp->dcid);
3902 result = __le16_to_cpu(rsp->result);
3903 status = __le16_to_cpu(rsp->status);
3904
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003905 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003906 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003907
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003908 mutex_lock(&conn->chan_lock);
3909
Linus Torvalds1da177e2005-04-16 15:20:36 -07003910 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003911 chan = __l2cap_get_chan_by_scid(conn, scid);
3912 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003913 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003914 goto unlock;
3915 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003916 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003917 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3918 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003919 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003920 goto unlock;
3921 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003922 }
3923
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003924 err = 0;
3925
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003926 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003927
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928 switch (result) {
3929 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03003930 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003931 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003932 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003933 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003934
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003935 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003936 break;
3937
Linus Torvalds1da177e2005-04-16 15:20:36 -07003938 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003939 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003940 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003941 break;
3942
3943 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003944 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003945 break;
3946
3947 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003948 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 break;
3950 }
3951
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003952 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003953
3954unlock:
3955 mutex_unlock(&conn->chan_lock);
3956
3957 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003958}
3959
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003960static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003961{
3962 /* FCS is enabled only in ERTM or streaming mode, if one or both
3963 * sides request it.
3964 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003965 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003966 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003967 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003968 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003969}
3970
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003971static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3972 u8 ident, u16 flags)
3973{
3974 struct l2cap_conn *conn = chan->conn;
3975
3976 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3977 flags);
3978
3979 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3980 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3981
3982 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3983 l2cap_build_conf_rsp(chan, data,
3984 L2CAP_CONF_SUCCESS, flags), data);
3985}
3986
Johan Hedberg662d6522013-10-16 11:20:47 +03003987static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3988 u16 scid, u16 dcid)
3989{
3990 struct l2cap_cmd_rej_cid rej;
3991
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003992 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03003993 rej.scid = __cpu_to_le16(scid);
3994 rej.dcid = __cpu_to_le16(dcid);
3995
3996 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3997}
3998
Gustavo Padovan2d792812012-10-06 10:07:01 +01003999static inline int l2cap_config_req(struct l2cap_conn *conn,
4000 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4001 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002{
4003 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4004 u16 dcid, flags;
4005 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004006 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004007 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004008
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004009 if (cmd_len < sizeof(*req))
4010 return -EPROTO;
4011
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012 dcid = __le16_to_cpu(req->dcid);
4013 flags = __le16_to_cpu(req->flags);
4014
4015 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4016
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004017 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004018 if (!chan) {
4019 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4020 return 0;
4021 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
David S. Miller033b1142011-07-21 13:38:42 -07004023 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004024 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4025 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004026 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004027 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004028
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004029 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004030 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004031 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004033 l2cap_build_conf_rsp(chan, rsp,
4034 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004035 goto unlock;
4036 }
4037
4038 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004039 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4040 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004041
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004042 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043 /* Incomplete config. Send empty response. */
4044 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004045 l2cap_build_conf_rsp(chan, rsp,
4046 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004047 goto unlock;
4048 }
4049
4050 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004051 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004052 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004053 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004055 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056
Mat Martineau1500109b2012-10-23 15:24:15 -07004057 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004058 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004059 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004060
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004061 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004062 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004063
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004064 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004065 goto unlock;
4066
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004067 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004068 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004069
Mat Martineau105bdf92012-04-27 16:50:48 -07004070 if (chan->mode == L2CAP_MODE_ERTM ||
4071 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004072 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004073
Mat Martineau3c588192012-04-11 10:48:42 -07004074 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004075 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004076 else
4077 l2cap_chan_ready(chan);
4078
Marcel Holtmann876d9482007-10-20 13:35:42 +02004079 goto unlock;
4080 }
4081
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004082 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004083 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004085 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004086 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004087 }
4088
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004089 /* Got Conf Rsp PENDING from remote side and assume we sent
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004090 Conf Rsp PENDING in the code above */
4091 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004092 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004093
4094 /* check compatibility */
4095
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004096 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004097 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004098 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4099 else
4100 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004101 }
4102
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004104 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004105 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106}
4107
Gustavo Padovan2d792812012-10-06 10:07:01 +01004108static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004109 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4110 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004111{
4112 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4113 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004114 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004115 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004116 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004117
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004118 if (cmd_len < sizeof(*rsp))
4119 return -EPROTO;
4120
Linus Torvalds1da177e2005-04-16 15:20:36 -07004121 scid = __le16_to_cpu(rsp->scid);
4122 flags = __le16_to_cpu(rsp->flags);
4123 result = __le16_to_cpu(rsp->result);
4124
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4126 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004128 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004129 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004130 return 0;
4131
4132 switch (result) {
4133 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004134 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004135 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004136 break;
4137
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004138 case L2CAP_CONF_PENDING:
4139 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4140
4141 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4142 char buf[64];
4143
4144 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004145 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004146 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004147 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004148 goto done;
4149 }
4150
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004151 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004152 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4153 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004154 } else {
4155 if (l2cap_check_efs(chan)) {
4156 amp_create_logical_link(chan);
4157 chan->ident = cmd->ident;
4158 }
4159 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004160 }
4161 goto done;
4162
Linus Torvalds1da177e2005-04-16 15:20:36 -07004163 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004164 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004165 char req[64];
4166
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004167 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004168 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004169 goto done;
4170 }
4171
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004172 /* throw out any old stored conf requests */
4173 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004174 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004175 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004176 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004177 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004178 goto done;
4179 }
4180
4181 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004182 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004183 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004184 if (result != L2CAP_CONF_SUCCESS)
4185 goto done;
4186 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187 }
4188
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004189 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004190 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004191
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004192 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004193 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004194 goto done;
4195 }
4196
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004197 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004198 goto done;
4199
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004200 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004202 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004203 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004204
Mat Martineau105bdf92012-04-27 16:50:48 -07004205 if (chan->mode == L2CAP_MODE_ERTM ||
4206 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004207 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004208
Mat Martineau3c588192012-04-11 10:48:42 -07004209 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004210 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004211 else
4212 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213 }
4214
4215done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004216 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004217 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218}
4219
Gustavo Padovan2d792812012-10-06 10:07:01 +01004220static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004221 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4222 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223{
4224 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4225 struct l2cap_disconn_rsp rsp;
4226 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004227 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004228
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004229 if (cmd_len != sizeof(*req))
4230 return -EPROTO;
4231
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 scid = __le16_to_cpu(req->scid);
4233 dcid = __le16_to_cpu(req->dcid);
4234
4235 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4236
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004237 mutex_lock(&conn->chan_lock);
4238
4239 chan = __l2cap_get_chan_by_scid(conn, dcid);
4240 if (!chan) {
4241 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004242 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4243 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004246 l2cap_chan_lock(chan);
4247
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004248 rsp.dcid = cpu_to_le16(chan->scid);
4249 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4251
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004252 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
Mat Martineau61d6ef32012-04-27 16:50:50 -07004254 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004255 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004256
4257 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004258
Gustavo Padovan80b98022012-05-27 22:27:51 -03004259 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004260 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004261
4262 mutex_unlock(&conn->chan_lock);
4263
Linus Torvalds1da177e2005-04-16 15:20:36 -07004264 return 0;
4265}
4266
Gustavo Padovan2d792812012-10-06 10:07:01 +01004267static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004268 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4269 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004270{
4271 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4272 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004273 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004275 if (cmd_len != sizeof(*rsp))
4276 return -EPROTO;
4277
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 scid = __le16_to_cpu(rsp->scid);
4279 dcid = __le16_to_cpu(rsp->dcid);
4280
4281 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4282
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004283 mutex_lock(&conn->chan_lock);
4284
4285 chan = __l2cap_get_chan_by_scid(conn, scid);
4286 if (!chan) {
4287 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004288 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004289 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004291 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004292
Mat Martineau61d6ef32012-04-27 16:50:50 -07004293 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004294 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004295
4296 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297
Gustavo Padovan80b98022012-05-27 22:27:51 -03004298 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004299 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004300
4301 mutex_unlock(&conn->chan_lock);
4302
Linus Torvalds1da177e2005-04-16 15:20:36 -07004303 return 0;
4304}
4305
Gustavo Padovan2d792812012-10-06 10:07:01 +01004306static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004307 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4308 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309{
4310 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 u16 type;
4312
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004313 if (cmd_len != sizeof(*req))
4314 return -EPROTO;
4315
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 type = __le16_to_cpu(req->type);
4317
4318 BT_DBG("type 0x%4.4x", type);
4319
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004320 if (type == L2CAP_IT_FEAT_MASK) {
4321 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004322 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004323 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004324 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4325 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004326 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004327 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004328 | L2CAP_FEAT_FCS;
Marcel Holtmann848566b2013-10-01 22:59:22 -07004329 if (conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004330 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004331 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004332
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004333 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004334 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4335 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004336 } else if (type == L2CAP_IT_FIXED_CHAN) {
4337 u8 buf[12];
4338 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004339
Marcel Holtmann848566b2013-10-01 22:59:22 -07004340 if (conn->hs_enabled)
Mat Martineau50a147c2011-11-02 16:18:34 -07004341 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4342 else
4343 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4344
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004345 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4346 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Andrei Emeltchenkoc6337ea2011-10-20 17:02:44 +03004347 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
Gustavo Padovan2d792812012-10-06 10:07:01 +01004348 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4349 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004350 } else {
4351 struct l2cap_info_rsp rsp;
4352 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004353 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004354 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4355 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004356 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357
4358 return 0;
4359}
4360
Gustavo Padovan2d792812012-10-06 10:07:01 +01004361static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004362 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4363 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004364{
4365 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4366 u16 type, result;
4367
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304368 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004369 return -EPROTO;
4370
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371 type = __le16_to_cpu(rsp->type);
4372 result = __le16_to_cpu(rsp->result);
4373
4374 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4375
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004376 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4377 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004378 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004379 return 0;
4380
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004381 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004382
Ville Tervoadb08ed2010-08-04 09:43:33 +03004383 if (result != L2CAP_IR_SUCCESS) {
4384 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4385 conn->info_ident = 0;
4386
4387 l2cap_conn_start(conn);
4388
4389 return 0;
4390 }
4391
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004392 switch (type) {
4393 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004394 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004395
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004396 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004397 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004398 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004399
4400 conn->info_ident = l2cap_get_ident(conn);
4401
4402 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004403 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004404 } else {
4405 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4406 conn->info_ident = 0;
4407
4408 l2cap_conn_start(conn);
4409 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004410 break;
4411
4412 case L2CAP_IT_FIXED_CHAN:
4413 conn->fixed_chan_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004414 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004415 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004416
4417 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004418 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004419 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004420
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 return 0;
4422}
4423
Mat Martineau17009152012-10-23 15:24:07 -07004424static int l2cap_create_channel_req(struct l2cap_conn *conn,
4425 struct l2cap_cmd_hdr *cmd,
4426 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004427{
4428 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004429 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004430 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004431 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004432 u16 psm, scid;
4433
4434 if (cmd_len != sizeof(*req))
4435 return -EPROTO;
4436
Marcel Holtmann848566b2013-10-01 22:59:22 -07004437 if (!conn->hs_enabled)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004438 return -EINVAL;
4439
4440 psm = le16_to_cpu(req->psm);
4441 scid = le16_to_cpu(req->scid);
4442
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004443 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004444
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004445 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004446 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004447 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4448 req->amp_id);
4449 return 0;
4450 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004451
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004452 /* Validate AMP controller id */
4453 hdev = hci_dev_get(req->amp_id);
4454 if (!hdev)
4455 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004456
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004457 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004458 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004459 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004460 }
4461
4462 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4463 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004464 if (chan) {
4465 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4466 struct hci_conn *hs_hcon;
4467
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004468 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4469 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004470 if (!hs_hcon) {
4471 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004472 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4473 chan->dcid);
4474 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004475 }
4476
4477 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4478
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004479 mgr->bredr_chan = chan;
4480 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004481 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004482 conn->mtu = hdev->block_mtu;
4483 }
4484
4485 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004486
4487 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004488
4489error:
4490 rsp.dcid = 0;
4491 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004492 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4493 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004494
4495 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4496 sizeof(rsp), &rsp);
4497
Johan Hedbergdc280802013-09-16 13:05:13 +03004498 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004499}
4500
Mat Martineau8eb200b2012-10-23 15:24:17 -07004501static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4502{
4503 struct l2cap_move_chan_req req;
4504 u8 ident;
4505
4506 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4507
4508 ident = l2cap_get_ident(chan->conn);
4509 chan->ident = ident;
4510
4511 req.icid = cpu_to_le16(chan->scid);
4512 req.dest_amp_id = dest_amp_id;
4513
4514 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4515 &req);
4516
4517 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4518}
4519
Mat Martineau1500109b2012-10-23 15:24:15 -07004520static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004521{
4522 struct l2cap_move_chan_rsp rsp;
4523
Mat Martineau1500109b2012-10-23 15:24:15 -07004524 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004525
Mat Martineau1500109b2012-10-23 15:24:15 -07004526 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004527 rsp.result = cpu_to_le16(result);
4528
Mat Martineau1500109b2012-10-23 15:24:15 -07004529 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4530 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004531}
4532
Mat Martineau5b155ef2012-10-23 15:24:14 -07004533static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004534{
4535 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004536
Mat Martineau5b155ef2012-10-23 15:24:14 -07004537 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004538
Mat Martineau5b155ef2012-10-23 15:24:14 -07004539 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004540
Mat Martineau5b155ef2012-10-23 15:24:14 -07004541 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004542 cfm.result = cpu_to_le16(result);
4543
Mat Martineau5b155ef2012-10-23 15:24:14 -07004544 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4545 sizeof(cfm), &cfm);
4546
4547 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4548}
4549
4550static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4551{
4552 struct l2cap_move_chan_cfm cfm;
4553
4554 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4555
4556 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004557 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004558
4559 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4560 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004561}
4562
4563static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004564 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004565{
4566 struct l2cap_move_chan_cfm_rsp rsp;
4567
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004568 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004569
4570 rsp.icid = cpu_to_le16(icid);
4571 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4572}
4573
Mat Martineau5f3847a2012-10-23 15:24:12 -07004574static void __release_logical_link(struct l2cap_chan *chan)
4575{
4576 chan->hs_hchan = NULL;
4577 chan->hs_hcon = NULL;
4578
4579 /* Placeholder - release the logical link */
4580}
4581
Mat Martineau1500109b2012-10-23 15:24:15 -07004582static void l2cap_logical_fail(struct l2cap_chan *chan)
4583{
4584 /* Logical link setup failed */
4585 if (chan->state != BT_CONNECTED) {
4586 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004587 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004588 return;
4589 }
4590
4591 switch (chan->move_role) {
4592 case L2CAP_MOVE_ROLE_RESPONDER:
4593 l2cap_move_done(chan);
4594 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4595 break;
4596 case L2CAP_MOVE_ROLE_INITIATOR:
4597 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4598 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4599 /* Remote has only sent pending or
4600 * success responses, clean up
4601 */
4602 l2cap_move_done(chan);
4603 }
4604
4605 /* Other amp move states imply that the move
4606 * has already aborted
4607 */
4608 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4609 break;
4610 }
4611}
4612
4613static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4614 struct hci_chan *hchan)
4615{
4616 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004617
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004618 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004619 chan->hs_hcon->l2cap_data = chan->conn;
4620
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004621 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004622
4623 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004624 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004625
4626 set_default_fcs(chan);
4627
4628 err = l2cap_ertm_init(chan);
4629 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004630 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004631 else
4632 l2cap_chan_ready(chan);
4633 }
4634}
4635
4636static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4637 struct hci_chan *hchan)
4638{
4639 chan->hs_hcon = hchan->conn;
4640 chan->hs_hcon->l2cap_data = chan->conn;
4641
4642 BT_DBG("move_state %d", chan->move_state);
4643
4644 switch (chan->move_state) {
4645 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4646 /* Move confirm will be sent after a success
4647 * response is received
4648 */
4649 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4650 break;
4651 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4652 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4653 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4654 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4655 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4656 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4657 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4658 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4659 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4660 }
4661 break;
4662 default:
4663 /* Move was not in expected state, free the channel */
4664 __release_logical_link(chan);
4665
4666 chan->move_state = L2CAP_MOVE_STABLE;
4667 }
4668}
4669
4670/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004671void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4672 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004673{
Mat Martineau1500109b2012-10-23 15:24:15 -07004674 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4675
4676 if (status) {
4677 l2cap_logical_fail(chan);
4678 __release_logical_link(chan);
4679 return;
4680 }
4681
4682 if (chan->state != BT_CONNECTED) {
4683 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004684 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004685 l2cap_logical_finish_create(chan, hchan);
4686 } else {
4687 l2cap_logical_finish_move(chan, hchan);
4688 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004689}
4690
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004691void l2cap_move_start(struct l2cap_chan *chan)
4692{
4693 BT_DBG("chan %p", chan);
4694
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004695 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004696 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4697 return;
4698 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4699 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4700 /* Placeholder - start physical link setup */
4701 } else {
4702 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4703 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4704 chan->move_id = 0;
4705 l2cap_move_setup(chan);
4706 l2cap_send_move_chan_req(chan, 0);
4707 }
4708}
4709
Mat Martineau8eb200b2012-10-23 15:24:17 -07004710static void l2cap_do_create(struct l2cap_chan *chan, int result,
4711 u8 local_amp_id, u8 remote_amp_id)
4712{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004713 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4714 local_amp_id, remote_amp_id);
4715
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004716 chan->fcs = L2CAP_FCS_NONE;
4717
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004718 /* Outgoing channel on AMP */
4719 if (chan->state == BT_CONNECT) {
4720 if (result == L2CAP_CR_SUCCESS) {
4721 chan->local_amp_id = local_amp_id;
4722 l2cap_send_create_chan_req(chan, remote_amp_id);
4723 } else {
4724 /* Revert to BR/EDR connect */
4725 l2cap_send_conn_req(chan);
4726 }
4727
4728 return;
4729 }
4730
4731 /* Incoming channel on AMP */
4732 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004733 struct l2cap_conn_rsp rsp;
4734 char buf[128];
4735 rsp.scid = cpu_to_le16(chan->dcid);
4736 rsp.dcid = cpu_to_le16(chan->scid);
4737
Mat Martineau8eb200b2012-10-23 15:24:17 -07004738 if (result == L2CAP_CR_SUCCESS) {
4739 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004740 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4741 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004742 } else {
4743 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004744 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4745 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004746 }
4747
4748 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4749 sizeof(rsp), &rsp);
4750
4751 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004752 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004753 set_bit(CONF_REQ_SENT, &chan->conf_state);
4754 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4755 L2CAP_CONF_REQ,
4756 l2cap_build_conf_req(chan, buf), buf);
4757 chan->num_conf_req++;
4758 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004759 }
4760}
4761
4762static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4763 u8 remote_amp_id)
4764{
4765 l2cap_move_setup(chan);
4766 chan->move_id = local_amp_id;
4767 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4768
4769 l2cap_send_move_chan_req(chan, remote_amp_id);
4770}
4771
4772static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4773{
4774 struct hci_chan *hchan = NULL;
4775
4776 /* Placeholder - get hci_chan for logical link */
4777
4778 if (hchan) {
4779 if (hchan->state == BT_CONNECTED) {
4780 /* Logical link is ready to go */
4781 chan->hs_hcon = hchan->conn;
4782 chan->hs_hcon->l2cap_data = chan->conn;
4783 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4784 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4785
4786 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4787 } else {
4788 /* Wait for logical link to be ready */
4789 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4790 }
4791 } else {
4792 /* Logical link not available */
4793 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4794 }
4795}
4796
4797static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4798{
4799 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4800 u8 rsp_result;
4801 if (result == -EINVAL)
4802 rsp_result = L2CAP_MR_BAD_ID;
4803 else
4804 rsp_result = L2CAP_MR_NOT_ALLOWED;
4805
4806 l2cap_send_move_chan_rsp(chan, rsp_result);
4807 }
4808
4809 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4810 chan->move_state = L2CAP_MOVE_STABLE;
4811
4812 /* Restart data transmission */
4813 l2cap_ertm_send(chan);
4814}
4815
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004816/* Invoke with locked chan */
4817void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004818{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004819 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004820 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004821
Mat Martineau8eb200b2012-10-23 15:24:17 -07004822 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4823 chan, result, local_amp_id, remote_amp_id);
4824
Mat Martineau8eb200b2012-10-23 15:24:17 -07004825 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4826 l2cap_chan_unlock(chan);
4827 return;
4828 }
4829
4830 if (chan->state != BT_CONNECTED) {
4831 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4832 } else if (result != L2CAP_MR_SUCCESS) {
4833 l2cap_do_move_cancel(chan, result);
4834 } else {
4835 switch (chan->move_role) {
4836 case L2CAP_MOVE_ROLE_INITIATOR:
4837 l2cap_do_move_initiate(chan, local_amp_id,
4838 remote_amp_id);
4839 break;
4840 case L2CAP_MOVE_ROLE_RESPONDER:
4841 l2cap_do_move_respond(chan, result);
4842 break;
4843 default:
4844 l2cap_do_move_cancel(chan, result);
4845 break;
4846 }
4847 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004848}
4849
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004850static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004851 struct l2cap_cmd_hdr *cmd,
4852 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004853{
4854 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004855 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004856 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004857 u16 icid = 0;
4858 u16 result = L2CAP_MR_NOT_ALLOWED;
4859
4860 if (cmd_len != sizeof(*req))
4861 return -EPROTO;
4862
4863 icid = le16_to_cpu(req->icid);
4864
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004865 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004866
Marcel Holtmann848566b2013-10-01 22:59:22 -07004867 if (!conn->hs_enabled)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004868 return -EINVAL;
4869
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004870 chan = l2cap_get_chan_by_dcid(conn, icid);
4871 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004872 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004873 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004874 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4875 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004876 return 0;
4877 }
4878
Mat Martineau1500109b2012-10-23 15:24:15 -07004879 chan->ident = cmd->ident;
4880
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004881 if (chan->scid < L2CAP_CID_DYN_START ||
4882 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4883 (chan->mode != L2CAP_MODE_ERTM &&
4884 chan->mode != L2CAP_MODE_STREAMING)) {
4885 result = L2CAP_MR_NOT_ALLOWED;
4886 goto send_move_response;
4887 }
4888
4889 if (chan->local_amp_id == req->dest_amp_id) {
4890 result = L2CAP_MR_SAME_ID;
4891 goto send_move_response;
4892 }
4893
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004894 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004895 struct hci_dev *hdev;
4896 hdev = hci_dev_get(req->dest_amp_id);
4897 if (!hdev || hdev->dev_type != HCI_AMP ||
4898 !test_bit(HCI_UP, &hdev->flags)) {
4899 if (hdev)
4900 hci_dev_put(hdev);
4901
4902 result = L2CAP_MR_BAD_ID;
4903 goto send_move_response;
4904 }
4905 hci_dev_put(hdev);
4906 }
4907
4908 /* Detect a move collision. Only send a collision response
4909 * if this side has "lost", otherwise proceed with the move.
4910 * The winner has the larger bd_addr.
4911 */
4912 if ((__chan_is_moving(chan) ||
4913 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004914 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004915 result = L2CAP_MR_COLLISION;
4916 goto send_move_response;
4917 }
4918
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004919 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4920 l2cap_move_setup(chan);
4921 chan->move_id = req->dest_amp_id;
4922 icid = chan->dcid;
4923
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004924 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004925 /* Moving to BR/EDR */
4926 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4927 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4928 result = L2CAP_MR_PEND;
4929 } else {
4930 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4931 result = L2CAP_MR_SUCCESS;
4932 }
4933 } else {
4934 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4935 /* Placeholder - uncomment when amp functions are available */
4936 /*amp_accept_physical(chan, req->dest_amp_id);*/
4937 result = L2CAP_MR_PEND;
4938 }
4939
4940send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004941 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004942
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004943 l2cap_chan_unlock(chan);
4944
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004945 return 0;
4946}
4947
Mat Martineau5b155ef2012-10-23 15:24:14 -07004948static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4949{
4950 struct l2cap_chan *chan;
4951 struct hci_chan *hchan = NULL;
4952
4953 chan = l2cap_get_chan_by_scid(conn, icid);
4954 if (!chan) {
4955 l2cap_send_move_chan_cfm_icid(conn, icid);
4956 return;
4957 }
4958
4959 __clear_chan_timer(chan);
4960 if (result == L2CAP_MR_PEND)
4961 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4962
4963 switch (chan->move_state) {
4964 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4965 /* Move confirm will be sent when logical link
4966 * is complete.
4967 */
4968 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4969 break;
4970 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4971 if (result == L2CAP_MR_PEND) {
4972 break;
4973 } else if (test_bit(CONN_LOCAL_BUSY,
4974 &chan->conn_state)) {
4975 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4976 } else {
4977 /* Logical link is up or moving to BR/EDR,
4978 * proceed with move
4979 */
4980 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4981 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4982 }
4983 break;
4984 case L2CAP_MOVE_WAIT_RSP:
4985 /* Moving to AMP */
4986 if (result == L2CAP_MR_SUCCESS) {
4987 /* Remote is ready, send confirm immediately
4988 * after logical link is ready
4989 */
4990 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4991 } else {
4992 /* Both logical link and move success
4993 * are required to confirm
4994 */
4995 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4996 }
4997
4998 /* Placeholder - get hci_chan for logical link */
4999 if (!hchan) {
5000 /* Logical link not available */
5001 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5002 break;
5003 }
5004
5005 /* If the logical link is not yet connected, do not
5006 * send confirmation.
5007 */
5008 if (hchan->state != BT_CONNECTED)
5009 break;
5010
5011 /* Logical link is already ready to go */
5012
5013 chan->hs_hcon = hchan->conn;
5014 chan->hs_hcon->l2cap_data = chan->conn;
5015
5016 if (result == L2CAP_MR_SUCCESS) {
5017 /* Can confirm now */
5018 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5019 } else {
5020 /* Now only need move success
5021 * to confirm
5022 */
5023 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5024 }
5025
5026 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5027 break;
5028 default:
5029 /* Any other amp move state means the move failed. */
5030 chan->move_id = chan->local_amp_id;
5031 l2cap_move_done(chan);
5032 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5033 }
5034
5035 l2cap_chan_unlock(chan);
5036}
5037
5038static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5039 u16 result)
5040{
5041 struct l2cap_chan *chan;
5042
5043 chan = l2cap_get_chan_by_ident(conn, ident);
5044 if (!chan) {
5045 /* Could not locate channel, icid is best guess */
5046 l2cap_send_move_chan_cfm_icid(conn, icid);
5047 return;
5048 }
5049
5050 __clear_chan_timer(chan);
5051
5052 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5053 if (result == L2CAP_MR_COLLISION) {
5054 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5055 } else {
5056 /* Cleanup - cancel move */
5057 chan->move_id = chan->local_amp_id;
5058 l2cap_move_done(chan);
5059 }
5060 }
5061
5062 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5063
5064 l2cap_chan_unlock(chan);
5065}
5066
5067static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5068 struct l2cap_cmd_hdr *cmd,
5069 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005070{
5071 struct l2cap_move_chan_rsp *rsp = data;
5072 u16 icid, result;
5073
5074 if (cmd_len != sizeof(*rsp))
5075 return -EPROTO;
5076
5077 icid = le16_to_cpu(rsp->icid);
5078 result = le16_to_cpu(rsp->result);
5079
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005080 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005081
Mat Martineau5b155ef2012-10-23 15:24:14 -07005082 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5083 l2cap_move_continue(conn, icid, result);
5084 else
5085 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005086
5087 return 0;
5088}
5089
Mat Martineau5f3847a2012-10-23 15:24:12 -07005090static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5091 struct l2cap_cmd_hdr *cmd,
5092 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005093{
5094 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005095 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005096 u16 icid, result;
5097
5098 if (cmd_len != sizeof(*cfm))
5099 return -EPROTO;
5100
5101 icid = le16_to_cpu(cfm->icid);
5102 result = le16_to_cpu(cfm->result);
5103
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005104 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005105
Mat Martineau5f3847a2012-10-23 15:24:12 -07005106 chan = l2cap_get_chan_by_dcid(conn, icid);
5107 if (!chan) {
5108 /* Spec requires a response even if the icid was not found */
5109 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5110 return 0;
5111 }
5112
5113 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5114 if (result == L2CAP_MC_CONFIRMED) {
5115 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005116 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005117 __release_logical_link(chan);
5118 } else {
5119 chan->move_id = chan->local_amp_id;
5120 }
5121
5122 l2cap_move_done(chan);
5123 }
5124
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005125 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5126
Mat Martineau5f3847a2012-10-23 15:24:12 -07005127 l2cap_chan_unlock(chan);
5128
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005129 return 0;
5130}
5131
5132static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005133 struct l2cap_cmd_hdr *cmd,
5134 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005135{
5136 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005137 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005138 u16 icid;
5139
5140 if (cmd_len != sizeof(*rsp))
5141 return -EPROTO;
5142
5143 icid = le16_to_cpu(rsp->icid);
5144
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005145 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005146
Mat Martineau3fd71a02012-10-23 15:24:16 -07005147 chan = l2cap_get_chan_by_scid(conn, icid);
5148 if (!chan)
5149 return 0;
5150
5151 __clear_chan_timer(chan);
5152
5153 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5154 chan->local_amp_id = chan->move_id;
5155
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005156 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005157 __release_logical_link(chan);
5158
5159 l2cap_move_done(chan);
5160 }
5161
5162 l2cap_chan_unlock(chan);
5163
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005164 return 0;
5165}
5166
Claudio Takahaside731152011-02-11 19:28:55 -02005167static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005168 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005169 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005170{
5171 struct hci_conn *hcon = conn->hcon;
5172 struct l2cap_conn_param_update_req *req;
5173 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005174 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005175 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005176
Johan Hedberg40bef302014-07-16 11:42:27 +03005177 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005178 return -EINVAL;
5179
Claudio Takahaside731152011-02-11 19:28:55 -02005180 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5181 return -EPROTO;
5182
5183 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005184 min = __le16_to_cpu(req->min);
5185 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005186 latency = __le16_to_cpu(req->latency);
5187 to_multiplier = __le16_to_cpu(req->to_multiplier);
5188
5189 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005190 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005191
5192 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005193
Andre Guedesd4905f22014-06-25 21:52:52 -03005194 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005195 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005196 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005197 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005198 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005199
5200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005201 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005202
Andre Guedesffb5a8272014-07-01 18:10:11 -03005203 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005204 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005205
Johan Hedbergf4869e22014-07-02 17:37:32 +03005206 store_hint = hci_le_conn_update(hcon, min, max, latency,
5207 to_multiplier);
5208 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5209 store_hint, min, max, latency,
5210 to_multiplier);
5211
Andre Guedesffb5a8272014-07-01 18:10:11 -03005212 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005213
Claudio Takahaside731152011-02-11 19:28:55 -02005214 return 0;
5215}
5216
Johan Hedbergf1496de2013-05-13 14:15:56 +03005217static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5218 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5219 u8 *data)
5220{
5221 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005222 struct hci_conn *hcon = conn->hcon;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005223 u16 dcid, mtu, mps, credits, result;
5224 struct l2cap_chan *chan;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005225 int err, sec_level;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005226
5227 if (cmd_len < sizeof(*rsp))
5228 return -EPROTO;
5229
5230 dcid = __le16_to_cpu(rsp->dcid);
5231 mtu = __le16_to_cpu(rsp->mtu);
5232 mps = __le16_to_cpu(rsp->mps);
5233 credits = __le16_to_cpu(rsp->credits);
5234 result = __le16_to_cpu(rsp->result);
5235
5236 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5237 return -EPROTO;
5238
5239 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5240 dcid, mtu, mps, credits, result);
5241
5242 mutex_lock(&conn->chan_lock);
5243
5244 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5245 if (!chan) {
5246 err = -EBADSLT;
5247 goto unlock;
5248 }
5249
5250 err = 0;
5251
5252 l2cap_chan_lock(chan);
5253
5254 switch (result) {
5255 case L2CAP_CR_SUCCESS:
5256 chan->ident = 0;
5257 chan->dcid = dcid;
5258 chan->omtu = mtu;
5259 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005260 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005261 l2cap_chan_ready(chan);
5262 break;
5263
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005264 case L2CAP_CR_AUTHENTICATION:
5265 case L2CAP_CR_ENCRYPTION:
5266 /* If we already have MITM protection we can't do
5267 * anything.
5268 */
5269 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5270 l2cap_chan_del(chan, ECONNREFUSED);
5271 break;
5272 }
5273
5274 sec_level = hcon->sec_level + 1;
5275 if (chan->sec_level < sec_level)
5276 chan->sec_level = sec_level;
5277
5278 /* We'll need to send a new Connect Request */
5279 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5280
5281 smp_conn_security(hcon, chan->sec_level);
5282 break;
5283
Johan Hedbergf1496de2013-05-13 14:15:56 +03005284 default:
5285 l2cap_chan_del(chan, ECONNREFUSED);
5286 break;
5287 }
5288
5289 l2cap_chan_unlock(chan);
5290
5291unlock:
5292 mutex_unlock(&conn->chan_lock);
5293
5294 return err;
5295}
5296
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005297static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005298 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5299 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005300{
5301 int err = 0;
5302
5303 switch (cmd->code) {
5304 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005305 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005306 break;
5307
5308 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005309 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005310 break;
5311
5312 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005313 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005314 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005315 break;
5316
5317 case L2CAP_CONF_REQ:
5318 err = l2cap_config_req(conn, cmd, cmd_len, data);
5319 break;
5320
5321 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005322 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005323 break;
5324
5325 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005326 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005327 break;
5328
5329 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005330 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005331 break;
5332
5333 case L2CAP_ECHO_REQ:
5334 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5335 break;
5336
5337 case L2CAP_ECHO_RSP:
5338 break;
5339
5340 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005341 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005342 break;
5343
5344 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005345 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005346 break;
5347
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005348 case L2CAP_CREATE_CHAN_REQ:
5349 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5350 break;
5351
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005352 case L2CAP_MOVE_CHAN_REQ:
5353 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5354 break;
5355
5356 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005357 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005358 break;
5359
5360 case L2CAP_MOVE_CHAN_CFM:
5361 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5362 break;
5363
5364 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005365 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005366 break;
5367
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005368 default:
5369 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5370 err = -EINVAL;
5371 break;
5372 }
5373
5374 return err;
5375}
5376
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005377static int l2cap_le_connect_req(struct l2cap_conn *conn,
5378 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5379 u8 *data)
5380{
5381 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5382 struct l2cap_le_conn_rsp rsp;
5383 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005384 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005385 __le16 psm;
5386 u8 result;
5387
5388 if (cmd_len != sizeof(*req))
5389 return -EPROTO;
5390
5391 scid = __le16_to_cpu(req->scid);
5392 mtu = __le16_to_cpu(req->mtu);
5393 mps = __le16_to_cpu(req->mps);
5394 psm = req->psm;
5395 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005396 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005397
5398 if (mtu < 23 || mps < 23)
5399 return -EPROTO;
5400
5401 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5402 scid, mtu, mps);
5403
5404 /* Check if we have socket listening on psm */
5405 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5406 &conn->hcon->dst, LE_LINK);
5407 if (!pchan) {
5408 result = L2CAP_CR_BAD_PSM;
5409 chan = NULL;
5410 goto response;
5411 }
5412
5413 mutex_lock(&conn->chan_lock);
5414 l2cap_chan_lock(pchan);
5415
Johan Hedberg35dc6f82014-11-13 10:55:18 +02005416 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5417 SMP_ALLOW_STK)) {
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005418 result = L2CAP_CR_AUTHENTICATION;
5419 chan = NULL;
5420 goto response_unlock;
5421 }
5422
5423 /* Check if we already have channel with that dcid */
5424 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5425 result = L2CAP_CR_NO_MEM;
5426 chan = NULL;
5427 goto response_unlock;
5428 }
5429
5430 chan = pchan->ops->new_connection(pchan);
5431 if (!chan) {
5432 result = L2CAP_CR_NO_MEM;
5433 goto response_unlock;
5434 }
5435
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005436 l2cap_le_flowctl_init(chan);
5437
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005438 bacpy(&chan->src, &conn->hcon->src);
5439 bacpy(&chan->dst, &conn->hcon->dst);
5440 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5441 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5442 chan->psm = psm;
5443 chan->dcid = scid;
5444 chan->omtu = mtu;
5445 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005446 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005447
5448 __l2cap_chan_add(conn, chan);
5449 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005450 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005451
5452 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5453
5454 chan->ident = cmd->ident;
5455
5456 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5457 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005458 /* The following result value is actually not defined
5459 * for LE CoC but we use it to let the function know
5460 * that it should bail out after doing its cleanup
5461 * instead of sending a response.
5462 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005463 result = L2CAP_CR_PEND;
5464 chan->ops->defer(chan);
5465 } else {
5466 l2cap_chan_ready(chan);
5467 result = L2CAP_CR_SUCCESS;
5468 }
5469
5470response_unlock:
5471 l2cap_chan_unlock(pchan);
5472 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005473 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005474
5475 if (result == L2CAP_CR_PEND)
5476 return 0;
5477
5478response:
5479 if (chan) {
5480 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005481 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005482 } else {
5483 rsp.mtu = 0;
5484 rsp.mps = 0;
5485 }
5486
5487 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005488 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005489 rsp.result = cpu_to_le16(result);
5490
5491 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5492
5493 return 0;
5494}
5495
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005496static inline int l2cap_le_credits(struct l2cap_conn *conn,
5497 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5498 u8 *data)
5499{
5500 struct l2cap_le_credits *pkt;
5501 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005502 u16 cid, credits, max_credits;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005503
5504 if (cmd_len != sizeof(*pkt))
5505 return -EPROTO;
5506
5507 pkt = (struct l2cap_le_credits *) data;
5508 cid = __le16_to_cpu(pkt->cid);
5509 credits = __le16_to_cpu(pkt->credits);
5510
5511 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5512
5513 chan = l2cap_get_chan_by_dcid(conn, cid);
5514 if (!chan)
5515 return -EBADSLT;
5516
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005517 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5518 if (credits > max_credits) {
5519 BT_ERR("LE credits overflow");
5520 l2cap_send_disconn_req(chan, ECONNRESET);
Martin Townsendee930532014-10-13 19:24:45 +01005521 l2cap_chan_unlock(chan);
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005522
5523 /* Return 0 so that we don't trigger an unnecessary
5524 * command reject packet.
5525 */
5526 return 0;
5527 }
5528
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005529 chan->tx_credits += credits;
5530
5531 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5532 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5533 chan->tx_credits--;
5534 }
5535
5536 if (chan->tx_credits)
5537 chan->ops->resume(chan);
5538
5539 l2cap_chan_unlock(chan);
5540
5541 return 0;
5542}
5543
Johan Hedberg71fb4192013-12-10 10:52:48 +02005544static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5545 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5546 u8 *data)
5547{
5548 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5549 struct l2cap_chan *chan;
5550
5551 if (cmd_len < sizeof(*rej))
5552 return -EPROTO;
5553
5554 mutex_lock(&conn->chan_lock);
5555
5556 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5557 if (!chan)
5558 goto done;
5559
5560 l2cap_chan_lock(chan);
5561 l2cap_chan_del(chan, ECONNREFUSED);
5562 l2cap_chan_unlock(chan);
5563
5564done:
5565 mutex_unlock(&conn->chan_lock);
5566 return 0;
5567}
5568
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005569static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005570 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5571 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005572{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005573 int err = 0;
5574
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005575 switch (cmd->code) {
5576 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005577 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005578 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005579
5580 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005581 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5582 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005583
5584 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005585 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005586
Johan Hedbergf1496de2013-05-13 14:15:56 +03005587 case L2CAP_LE_CONN_RSP:
5588 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005589 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005590
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005591 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005592 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5593 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005594
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005595 case L2CAP_LE_CREDITS:
5596 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5597 break;
5598
Johan Hedberg3defe012013-05-15 10:16:06 +03005599 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005600 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5601 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005602
5603 case L2CAP_DISCONN_RSP:
5604 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005605 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005606
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005607 default:
5608 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005609 err = -EINVAL;
5610 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005611 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005612
5613 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005614}
5615
Johan Hedbergc5623552013-04-29 19:35:33 +03005616static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5617 struct sk_buff *skb)
5618{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005619 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005620 struct l2cap_cmd_hdr *cmd;
5621 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005622 int err;
5623
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005624 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005625 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005626
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005627 if (skb->len < L2CAP_CMD_HDR_SIZE)
5628 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005629
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005630 cmd = (void *) skb->data;
5631 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005632
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005633 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005634
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005635 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005636
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005637 if (len != skb->len || !cmd->ident) {
5638 BT_DBG("corrupted command");
5639 goto drop;
5640 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005641
Johan Hedberg203e6392013-05-15 10:07:15 +03005642 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005643 if (err) {
5644 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005645
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005646 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005647
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005648 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005649 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5650 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005651 }
5652
Marcel Holtmann3b166292013-10-02 08:28:21 -07005653drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005654 kfree_skb(skb);
5655}
5656
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005657static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005658 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005660 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005661 u8 *data = skb->data;
5662 int len = skb->len;
5663 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005664 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005665
5666 l2cap_raw_recv(conn, skb);
5667
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005668 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005669 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005670
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005672 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5674 data += L2CAP_CMD_HDR_SIZE;
5675 len -= L2CAP_CMD_HDR_SIZE;
5676
Al Viro88219a02007-07-29 00:17:25 -07005677 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005678
Gustavo Padovan2d792812012-10-06 10:07:01 +01005679 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5680 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005681
Al Viro88219a02007-07-29 00:17:25 -07005682 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683 BT_DBG("corrupted command");
5684 break;
5685 }
5686
Johan Hedbergc5623552013-04-29 19:35:33 +03005687 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005688 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005689 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005690
5691 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005692
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005693 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005694 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5695 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005696 }
5697
Al Viro88219a02007-07-29 00:17:25 -07005698 data += cmd_len;
5699 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005700 }
5701
Marcel Holtmann3b166292013-10-02 08:28:21 -07005702drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005703 kfree_skb(skb);
5704}
5705
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005706static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005707{
5708 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005709 int hdr_size;
5710
5711 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5712 hdr_size = L2CAP_EXT_HDR_SIZE;
5713 else
5714 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005715
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005716 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005717 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005718 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5719 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5720
5721 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005722 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005723 }
5724 return 0;
5725}
5726
Mat Martineau6ea00482012-05-17 20:53:52 -07005727static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005728{
Mat Martineaue31f7632012-05-17 20:53:41 -07005729 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005730
Mat Martineaue31f7632012-05-17 20:53:41 -07005731 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005732
Mat Martineaue31f7632012-05-17 20:53:41 -07005733 memset(&control, 0, sizeof(control));
5734 control.sframe = 1;
5735 control.final = 1;
5736 control.reqseq = chan->buffer_seq;
5737 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005738
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005739 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005740 control.super = L2CAP_SUPER_RNR;
5741 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005742 }
5743
Mat Martineaue31f7632012-05-17 20:53:41 -07005744 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5745 chan->unacked_frames > 0)
5746 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005747
Mat Martineaue31f7632012-05-17 20:53:41 -07005748 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005749 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005750
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005751 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005752 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5753 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5754 * send it now.
5755 */
5756 control.super = L2CAP_SUPER_RR;
5757 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005758 }
5759}
5760
Gustavo Padovan2d792812012-10-06 10:07:01 +01005761static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5762 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005763{
Mat Martineau84084a32011-07-22 14:54:00 -07005764 /* skb->len reflects data in skb as well as all fragments
5765 * skb->data_len reflects only data in fragments
5766 */
5767 if (!skb_has_frag_list(skb))
5768 skb_shinfo(skb)->frag_list = new_frag;
5769
5770 new_frag->next = NULL;
5771
5772 (*last_frag)->next = new_frag;
5773 *last_frag = new_frag;
5774
5775 skb->len += new_frag->len;
5776 skb->data_len += new_frag->len;
5777 skb->truesize += new_frag->truesize;
5778}
5779
Mat Martineau4b51dae92012-05-17 20:53:37 -07005780static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5781 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005782{
5783 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005784
Mat Martineau4b51dae92012-05-17 20:53:37 -07005785 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005786 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005787 if (chan->sdu)
5788 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005789
Gustavo Padovan80b98022012-05-27 22:27:51 -03005790 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005791 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005792
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005793 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005794 if (chan->sdu)
5795 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005796
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005797 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005798 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005799
Mat Martineau84084a32011-07-22 14:54:00 -07005800 if (chan->sdu_len > chan->imtu) {
5801 err = -EMSGSIZE;
5802 break;
5803 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005804
Mat Martineau84084a32011-07-22 14:54:00 -07005805 if (skb->len >= chan->sdu_len)
5806 break;
5807
5808 chan->sdu = skb;
5809 chan->sdu_last_frag = skb;
5810
5811 skb = NULL;
5812 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005813 break;
5814
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005815 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005816 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005817 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005818
Mat Martineau84084a32011-07-22 14:54:00 -07005819 append_skb_frag(chan->sdu, skb,
5820 &chan->sdu_last_frag);
5821 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005822
Mat Martineau84084a32011-07-22 14:54:00 -07005823 if (chan->sdu->len >= chan->sdu_len)
5824 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005825
Mat Martineau84084a32011-07-22 14:54:00 -07005826 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005827 break;
5828
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005829 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005830 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005831 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005832
Mat Martineau84084a32011-07-22 14:54:00 -07005833 append_skb_frag(chan->sdu, skb,
5834 &chan->sdu_last_frag);
5835 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005836
Mat Martineau84084a32011-07-22 14:54:00 -07005837 if (chan->sdu->len != chan->sdu_len)
5838 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005839
Gustavo Padovan80b98022012-05-27 22:27:51 -03005840 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005841
Mat Martineau84084a32011-07-22 14:54:00 -07005842 if (!err) {
5843 /* Reassembly complete */
5844 chan->sdu = NULL;
5845 chan->sdu_last_frag = NULL;
5846 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005847 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005848 break;
5849 }
5850
Mat Martineau84084a32011-07-22 14:54:00 -07005851 if (err) {
5852 kfree_skb(skb);
5853 kfree_skb(chan->sdu);
5854 chan->sdu = NULL;
5855 chan->sdu_last_frag = NULL;
5856 chan->sdu_len = 0;
5857 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005858
Mat Martineau84084a32011-07-22 14:54:00 -07005859 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005860}
5861
Mat Martineau32b32732012-10-23 15:24:11 -07005862static int l2cap_resegment(struct l2cap_chan *chan)
5863{
5864 /* Placeholder */
5865 return 0;
5866}
5867
Mat Martineaue3281402011-07-07 09:39:02 -07005868void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132eb2010-06-21 19:39:50 -03005869{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005870 u8 event;
5871
5872 if (chan->mode != L2CAP_MODE_ERTM)
5873 return;
5874
5875 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005876 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005877}
5878
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005879static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5880{
Mat Martineau63838722012-05-17 20:53:45 -07005881 int err = 0;
5882 /* Pass sequential frames to l2cap_reassemble_sdu()
5883 * until a gap is encountered.
5884 */
5885
5886 BT_DBG("chan %p", chan);
5887
5888 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5889 struct sk_buff *skb;
5890 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5891 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5892
5893 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5894
5895 if (!skb)
5896 break;
5897
5898 skb_unlink(skb, &chan->srej_q);
5899 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5900 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5901 if (err)
5902 break;
5903 }
5904
5905 if (skb_queue_empty(&chan->srej_q)) {
5906 chan->rx_state = L2CAP_RX_STATE_RECV;
5907 l2cap_send_ack(chan);
5908 }
5909
5910 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005911}
5912
5913static void l2cap_handle_srej(struct l2cap_chan *chan,
5914 struct l2cap_ctrl *control)
5915{
Mat Martineauf80842a2012-05-17 20:53:46 -07005916 struct sk_buff *skb;
5917
5918 BT_DBG("chan %p, control %p", chan, control);
5919
5920 if (control->reqseq == chan->next_tx_seq) {
5921 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005922 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005923 return;
5924 }
5925
5926 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5927
5928 if (skb == NULL) {
5929 BT_DBG("Seq %d not available for retransmission",
5930 control->reqseq);
5931 return;
5932 }
5933
5934 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5935 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005936 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005937 return;
5938 }
5939
5940 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5941
5942 if (control->poll) {
5943 l2cap_pass_to_tx(chan, control);
5944
5945 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5946 l2cap_retransmit(chan, control);
5947 l2cap_ertm_send(chan);
5948
5949 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5950 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5951 chan->srej_save_reqseq = control->reqseq;
5952 }
5953 } else {
5954 l2cap_pass_to_tx_fbit(chan, control);
5955
5956 if (control->final) {
5957 if (chan->srej_save_reqseq != control->reqseq ||
5958 !test_and_clear_bit(CONN_SREJ_ACT,
5959 &chan->conn_state))
5960 l2cap_retransmit(chan, control);
5961 } else {
5962 l2cap_retransmit(chan, control);
5963 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5964 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5965 chan->srej_save_reqseq = control->reqseq;
5966 }
5967 }
5968 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005969}
5970
5971static void l2cap_handle_rej(struct l2cap_chan *chan,
5972 struct l2cap_ctrl *control)
5973{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005974 struct sk_buff *skb;
5975
5976 BT_DBG("chan %p, control %p", chan, control);
5977
5978 if (control->reqseq == chan->next_tx_seq) {
5979 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005980 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005981 return;
5982 }
5983
5984 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5985
5986 if (chan->max_tx && skb &&
5987 bt_cb(skb)->control.retries >= chan->max_tx) {
5988 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005989 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005990 return;
5991 }
5992
5993 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5994
5995 l2cap_pass_to_tx(chan, control);
5996
5997 if (control->final) {
5998 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5999 l2cap_retransmit_all(chan, control);
6000 } else {
6001 l2cap_retransmit_all(chan, control);
6002 l2cap_ertm_send(chan);
6003 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6004 set_bit(CONN_REJ_ACT, &chan->conn_state);
6005 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006006}
6007
Mat Martineau4b51dae92012-05-17 20:53:37 -07006008static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6009{
6010 BT_DBG("chan %p, txseq %d", chan, txseq);
6011
6012 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6013 chan->expected_tx_seq);
6014
6015 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6016 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01006017 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006018 /* See notes below regarding "double poll" and
6019 * invalid packets.
6020 */
6021 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6022 BT_DBG("Invalid/Ignore - after SREJ");
6023 return L2CAP_TXSEQ_INVALID_IGNORE;
6024 } else {
6025 BT_DBG("Invalid - in window after SREJ sent");
6026 return L2CAP_TXSEQ_INVALID;
6027 }
6028 }
6029
6030 if (chan->srej_list.head == txseq) {
6031 BT_DBG("Expected SREJ");
6032 return L2CAP_TXSEQ_EXPECTED_SREJ;
6033 }
6034
6035 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6036 BT_DBG("Duplicate SREJ - txseq already stored");
6037 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6038 }
6039
6040 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6041 BT_DBG("Unexpected SREJ - not requested");
6042 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6043 }
6044 }
6045
6046 if (chan->expected_tx_seq == txseq) {
6047 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6048 chan->tx_win) {
6049 BT_DBG("Invalid - txseq outside tx window");
6050 return L2CAP_TXSEQ_INVALID;
6051 } else {
6052 BT_DBG("Expected");
6053 return L2CAP_TXSEQ_EXPECTED;
6054 }
6055 }
6056
6057 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006058 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006059 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6060 return L2CAP_TXSEQ_DUPLICATE;
6061 }
6062
6063 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6064 /* A source of invalid packets is a "double poll" condition,
6065 * where delays cause us to send multiple poll packets. If
6066 * the remote stack receives and processes both polls,
6067 * sequence numbers can wrap around in such a way that a
6068 * resent frame has a sequence number that looks like new data
6069 * with a sequence gap. This would trigger an erroneous SREJ
6070 * request.
6071 *
6072 * Fortunately, this is impossible with a tx window that's
6073 * less than half of the maximum sequence number, which allows
6074 * invalid frames to be safely ignored.
6075 *
6076 * With tx window sizes greater than half of the tx window
6077 * maximum, the frame is invalid and cannot be ignored. This
6078 * causes a disconnect.
6079 */
6080
6081 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6082 BT_DBG("Invalid/Ignore - txseq outside tx window");
6083 return L2CAP_TXSEQ_INVALID_IGNORE;
6084 } else {
6085 BT_DBG("Invalid - txseq outside tx window");
6086 return L2CAP_TXSEQ_INVALID;
6087 }
6088 } else {
6089 BT_DBG("Unexpected - txseq indicates missing frames");
6090 return L2CAP_TXSEQ_UNEXPECTED;
6091 }
6092}
6093
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006094static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6095 struct l2cap_ctrl *control,
6096 struct sk_buff *skb, u8 event)
6097{
6098 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006099 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006100
6101 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6102 event);
6103
6104 switch (event) {
6105 case L2CAP_EV_RECV_IFRAME:
6106 switch (l2cap_classify_txseq(chan, control->txseq)) {
6107 case L2CAP_TXSEQ_EXPECTED:
6108 l2cap_pass_to_tx(chan, control);
6109
6110 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6111 BT_DBG("Busy, discarding expected seq %d",
6112 control->txseq);
6113 break;
6114 }
6115
6116 chan->expected_tx_seq = __next_seq(chan,
6117 control->txseq);
6118
6119 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006120 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006121
6122 err = l2cap_reassemble_sdu(chan, skb, control);
6123 if (err)
6124 break;
6125
6126 if (control->final) {
6127 if (!test_and_clear_bit(CONN_REJ_ACT,
6128 &chan->conn_state)) {
6129 control->final = 0;
6130 l2cap_retransmit_all(chan, control);
6131 l2cap_ertm_send(chan);
6132 }
6133 }
6134
6135 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6136 l2cap_send_ack(chan);
6137 break;
6138 case L2CAP_TXSEQ_UNEXPECTED:
6139 l2cap_pass_to_tx(chan, control);
6140
6141 /* Can't issue SREJ frames in the local busy state.
6142 * Drop this frame, it will be seen as missing
6143 * when local busy is exited.
6144 */
6145 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6146 BT_DBG("Busy, discarding unexpected seq %d",
6147 control->txseq);
6148 break;
6149 }
6150
6151 /* There was a gap in the sequence, so an SREJ
6152 * must be sent for each missing frame. The
6153 * current frame is stored for later use.
6154 */
6155 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006156 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006157 BT_DBG("Queued %p (queue len %d)", skb,
6158 skb_queue_len(&chan->srej_q));
6159
6160 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6161 l2cap_seq_list_clear(&chan->srej_list);
6162 l2cap_send_srej(chan, control->txseq);
6163
6164 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6165 break;
6166 case L2CAP_TXSEQ_DUPLICATE:
6167 l2cap_pass_to_tx(chan, control);
6168 break;
6169 case L2CAP_TXSEQ_INVALID_IGNORE:
6170 break;
6171 case L2CAP_TXSEQ_INVALID:
6172 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006173 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006174 break;
6175 }
6176 break;
6177 case L2CAP_EV_RECV_RR:
6178 l2cap_pass_to_tx(chan, control);
6179 if (control->final) {
6180 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6181
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006182 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6183 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006184 control->final = 0;
6185 l2cap_retransmit_all(chan, control);
6186 }
6187
6188 l2cap_ertm_send(chan);
6189 } else if (control->poll) {
6190 l2cap_send_i_or_rr_or_rnr(chan);
6191 } else {
6192 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6193 &chan->conn_state) &&
6194 chan->unacked_frames)
6195 __set_retrans_timer(chan);
6196
6197 l2cap_ertm_send(chan);
6198 }
6199 break;
6200 case L2CAP_EV_RECV_RNR:
6201 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6202 l2cap_pass_to_tx(chan, control);
6203 if (control && control->poll) {
6204 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6205 l2cap_send_rr_or_rnr(chan, 0);
6206 }
6207 __clear_retrans_timer(chan);
6208 l2cap_seq_list_clear(&chan->retrans_list);
6209 break;
6210 case L2CAP_EV_RECV_REJ:
6211 l2cap_handle_rej(chan, control);
6212 break;
6213 case L2CAP_EV_RECV_SREJ:
6214 l2cap_handle_srej(chan, control);
6215 break;
6216 default:
6217 break;
6218 }
6219
6220 if (skb && !skb_in_use) {
6221 BT_DBG("Freeing %p", skb);
6222 kfree_skb(skb);
6223 }
6224
6225 return err;
6226}
6227
6228static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6229 struct l2cap_ctrl *control,
6230 struct sk_buff *skb, u8 event)
6231{
6232 int err = 0;
6233 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006234 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006235
6236 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6237 event);
6238
6239 switch (event) {
6240 case L2CAP_EV_RECV_IFRAME:
6241 switch (l2cap_classify_txseq(chan, txseq)) {
6242 case L2CAP_TXSEQ_EXPECTED:
6243 /* Keep frame for reassembly later */
6244 l2cap_pass_to_tx(chan, control);
6245 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006246 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006247 BT_DBG("Queued %p (queue len %d)", skb,
6248 skb_queue_len(&chan->srej_q));
6249
6250 chan->expected_tx_seq = __next_seq(chan, txseq);
6251 break;
6252 case L2CAP_TXSEQ_EXPECTED_SREJ:
6253 l2cap_seq_list_pop(&chan->srej_list);
6254
6255 l2cap_pass_to_tx(chan, control);
6256 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006257 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006258 BT_DBG("Queued %p (queue len %d)", skb,
6259 skb_queue_len(&chan->srej_q));
6260
6261 err = l2cap_rx_queued_iframes(chan);
6262 if (err)
6263 break;
6264
6265 break;
6266 case L2CAP_TXSEQ_UNEXPECTED:
6267 /* Got a frame that can't be reassembled yet.
6268 * Save it for later, and send SREJs to cover
6269 * the missing frames.
6270 */
6271 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006272 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006273 BT_DBG("Queued %p (queue len %d)", skb,
6274 skb_queue_len(&chan->srej_q));
6275
6276 l2cap_pass_to_tx(chan, control);
6277 l2cap_send_srej(chan, control->txseq);
6278 break;
6279 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6280 /* This frame was requested with an SREJ, but
6281 * some expected retransmitted frames are
6282 * missing. Request retransmission of missing
6283 * SREJ'd frames.
6284 */
6285 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006286 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006287 BT_DBG("Queued %p (queue len %d)", skb,
6288 skb_queue_len(&chan->srej_q));
6289
6290 l2cap_pass_to_tx(chan, control);
6291 l2cap_send_srej_list(chan, control->txseq);
6292 break;
6293 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6294 /* We've already queued this frame. Drop this copy. */
6295 l2cap_pass_to_tx(chan, control);
6296 break;
6297 case L2CAP_TXSEQ_DUPLICATE:
6298 /* Expecting a later sequence number, so this frame
6299 * was already received. Ignore it completely.
6300 */
6301 break;
6302 case L2CAP_TXSEQ_INVALID_IGNORE:
6303 break;
6304 case L2CAP_TXSEQ_INVALID:
6305 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006306 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006307 break;
6308 }
6309 break;
6310 case L2CAP_EV_RECV_RR:
6311 l2cap_pass_to_tx(chan, control);
6312 if (control->final) {
6313 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6314
6315 if (!test_and_clear_bit(CONN_REJ_ACT,
6316 &chan->conn_state)) {
6317 control->final = 0;
6318 l2cap_retransmit_all(chan, control);
6319 }
6320
6321 l2cap_ertm_send(chan);
6322 } else if (control->poll) {
6323 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6324 &chan->conn_state) &&
6325 chan->unacked_frames) {
6326 __set_retrans_timer(chan);
6327 }
6328
6329 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6330 l2cap_send_srej_tail(chan);
6331 } else {
6332 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6333 &chan->conn_state) &&
6334 chan->unacked_frames)
6335 __set_retrans_timer(chan);
6336
6337 l2cap_send_ack(chan);
6338 }
6339 break;
6340 case L2CAP_EV_RECV_RNR:
6341 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6342 l2cap_pass_to_tx(chan, control);
6343 if (control->poll) {
6344 l2cap_send_srej_tail(chan);
6345 } else {
6346 struct l2cap_ctrl rr_control;
6347 memset(&rr_control, 0, sizeof(rr_control));
6348 rr_control.sframe = 1;
6349 rr_control.super = L2CAP_SUPER_RR;
6350 rr_control.reqseq = chan->buffer_seq;
6351 l2cap_send_sframe(chan, &rr_control);
6352 }
6353
6354 break;
6355 case L2CAP_EV_RECV_REJ:
6356 l2cap_handle_rej(chan, control);
6357 break;
6358 case L2CAP_EV_RECV_SREJ:
6359 l2cap_handle_srej(chan, control);
6360 break;
6361 }
6362
6363 if (skb && !skb_in_use) {
6364 BT_DBG("Freeing %p", skb);
6365 kfree_skb(skb);
6366 }
6367
6368 return err;
6369}
6370
Mat Martineau32b32732012-10-23 15:24:11 -07006371static int l2cap_finish_move(struct l2cap_chan *chan)
6372{
6373 BT_DBG("chan %p", chan);
6374
6375 chan->rx_state = L2CAP_RX_STATE_RECV;
6376
6377 if (chan->hs_hcon)
6378 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6379 else
6380 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6381
6382 return l2cap_resegment(chan);
6383}
6384
6385static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6386 struct l2cap_ctrl *control,
6387 struct sk_buff *skb, u8 event)
6388{
6389 int err;
6390
6391 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6392 event);
6393
6394 if (!control->poll)
6395 return -EPROTO;
6396
6397 l2cap_process_reqseq(chan, control->reqseq);
6398
6399 if (!skb_queue_empty(&chan->tx_q))
6400 chan->tx_send_head = skb_peek(&chan->tx_q);
6401 else
6402 chan->tx_send_head = NULL;
6403
6404 /* Rewind next_tx_seq to the point expected
6405 * by the receiver.
6406 */
6407 chan->next_tx_seq = control->reqseq;
6408 chan->unacked_frames = 0;
6409
6410 err = l2cap_finish_move(chan);
6411 if (err)
6412 return err;
6413
6414 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6415 l2cap_send_i_or_rr_or_rnr(chan);
6416
6417 if (event == L2CAP_EV_RECV_IFRAME)
6418 return -EPROTO;
6419
6420 return l2cap_rx_state_recv(chan, control, NULL, event);
6421}
6422
6423static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6424 struct l2cap_ctrl *control,
6425 struct sk_buff *skb, u8 event)
6426{
6427 int err;
6428
6429 if (!control->final)
6430 return -EPROTO;
6431
6432 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6433
6434 chan->rx_state = L2CAP_RX_STATE_RECV;
6435 l2cap_process_reqseq(chan, control->reqseq);
6436
6437 if (!skb_queue_empty(&chan->tx_q))
6438 chan->tx_send_head = skb_peek(&chan->tx_q);
6439 else
6440 chan->tx_send_head = NULL;
6441
6442 /* Rewind next_tx_seq to the point expected
6443 * by the receiver.
6444 */
6445 chan->next_tx_seq = control->reqseq;
6446 chan->unacked_frames = 0;
6447
6448 if (chan->hs_hcon)
6449 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6450 else
6451 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6452
6453 err = l2cap_resegment(chan);
6454
6455 if (!err)
6456 err = l2cap_rx_state_recv(chan, control, skb, event);
6457
6458 return err;
6459}
6460
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006461static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6462{
6463 /* Make sure reqseq is for a packet that has been sent but not acked */
6464 u16 unacked;
6465
6466 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6467 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6468}
6469
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006470static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6471 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006472{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006473 int err = 0;
6474
6475 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6476 control, skb, event, chan->rx_state);
6477
6478 if (__valid_reqseq(chan, control->reqseq)) {
6479 switch (chan->rx_state) {
6480 case L2CAP_RX_STATE_RECV:
6481 err = l2cap_rx_state_recv(chan, control, skb, event);
6482 break;
6483 case L2CAP_RX_STATE_SREJ_SENT:
6484 err = l2cap_rx_state_srej_sent(chan, control, skb,
6485 event);
6486 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006487 case L2CAP_RX_STATE_WAIT_P:
6488 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6489 break;
6490 case L2CAP_RX_STATE_WAIT_F:
6491 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6492 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006493 default:
6494 /* shut it down */
6495 break;
6496 }
6497 } else {
6498 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6499 control->reqseq, chan->next_tx_seq,
6500 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006501 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006502 }
6503
6504 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006505}
6506
6507static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6508 struct sk_buff *skb)
6509{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006510 int err = 0;
6511
6512 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6513 chan->rx_state);
6514
6515 if (l2cap_classify_txseq(chan, control->txseq) ==
6516 L2CAP_TXSEQ_EXPECTED) {
6517 l2cap_pass_to_tx(chan, control);
6518
6519 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6520 __next_seq(chan, chan->buffer_seq));
6521
6522 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6523
6524 l2cap_reassemble_sdu(chan, skb, control);
6525 } else {
6526 if (chan->sdu) {
6527 kfree_skb(chan->sdu);
6528 chan->sdu = NULL;
6529 }
6530 chan->sdu_last_frag = NULL;
6531 chan->sdu_len = 0;
6532
6533 if (skb) {
6534 BT_DBG("Freeing %p", skb);
6535 kfree_skb(skb);
6536 }
6537 }
6538
6539 chan->last_acked_seq = control->txseq;
6540 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6541
6542 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006543}
6544
6545static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6546{
6547 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6548 u16 len;
6549 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006550
Mat Martineaub76bbd62012-04-11 10:48:43 -07006551 __unpack_control(chan, skb);
6552
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006553 len = skb->len;
6554
6555 /*
6556 * We can just drop the corrupted I-frame here.
6557 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006558 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006559 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006560 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006561 goto drop;
6562
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006563 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006564 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006565
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006566 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006567 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006568
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006569 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006570 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006571 goto drop;
6572 }
6573
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006574 if (!control->sframe) {
6575 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006576
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006577 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6578 control->sar, control->reqseq, control->final,
6579 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006580
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006581 /* Validate F-bit - F=0 always valid, F=1 only
6582 * valid in TX WAIT_F
6583 */
6584 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006585 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006586
6587 if (chan->mode != L2CAP_MODE_STREAMING) {
6588 event = L2CAP_EV_RECV_IFRAME;
6589 err = l2cap_rx(chan, control, skb, event);
6590 } else {
6591 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006592 }
6593
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006594 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006595 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006596 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006597 const u8 rx_func_to_event[4] = {
6598 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6599 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6600 };
6601
6602 /* Only I-frames are expected in streaming mode */
6603 if (chan->mode == L2CAP_MODE_STREAMING)
6604 goto drop;
6605
6606 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6607 control->reqseq, control->final, control->poll,
6608 control->super);
6609
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006610 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006611 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006612 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006613 goto drop;
6614 }
6615
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006616 /* Validate F and P bits */
6617 if (control->final && (control->poll ||
6618 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6619 goto drop;
6620
6621 event = rx_func_to_event[control->super];
6622 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006623 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006624 }
6625
6626 return 0;
6627
6628drop:
6629 kfree_skb(skb);
6630 return 0;
6631}
6632
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006633static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6634{
6635 struct l2cap_conn *conn = chan->conn;
6636 struct l2cap_le_credits pkt;
6637 u16 return_credits;
6638
6639 /* We return more credits to the sender only after the amount of
6640 * credits falls below half of the initial amount.
6641 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006642 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006643 return;
6644
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006645 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006646
6647 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6648
6649 chan->rx_credits += return_credits;
6650
6651 pkt.cid = cpu_to_le16(chan->scid);
6652 pkt.credits = cpu_to_le16(return_credits);
6653
6654 chan->ident = l2cap_get_ident(conn);
6655
6656 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6657}
6658
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006659static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6660{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006661 int err;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006662
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006663 if (!chan->rx_credits) {
6664 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006665 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006666 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006667 }
6668
6669 if (chan->imtu < skb->len) {
6670 BT_ERR("Too big LE L2CAP PDU");
6671 return -ENOBUFS;
6672 }
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006673
6674 chan->rx_credits--;
6675 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6676
6677 l2cap_chan_le_send_credits(chan);
6678
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006679 err = 0;
6680
6681 if (!chan->sdu) {
6682 u16 sdu_len;
6683
6684 sdu_len = get_unaligned_le16(skb->data);
6685 skb_pull(skb, L2CAP_SDULEN_SIZE);
6686
6687 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6688 sdu_len, skb->len, chan->imtu);
6689
6690 if (sdu_len > chan->imtu) {
6691 BT_ERR("Too big LE L2CAP SDU length received");
6692 err = -EMSGSIZE;
6693 goto failed;
6694 }
6695
6696 if (skb->len > sdu_len) {
6697 BT_ERR("Too much LE L2CAP data received");
6698 err = -EINVAL;
6699 goto failed;
6700 }
6701
6702 if (skb->len == sdu_len)
6703 return chan->ops->recv(chan, skb);
6704
6705 chan->sdu = skb;
6706 chan->sdu_len = sdu_len;
6707 chan->sdu_last_frag = skb;
6708
6709 return 0;
6710 }
6711
6712 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6713 chan->sdu->len, skb->len, chan->sdu_len);
6714
6715 if (chan->sdu->len + skb->len > chan->sdu_len) {
6716 BT_ERR("Too much LE L2CAP data received");
6717 err = -EINVAL;
6718 goto failed;
6719 }
6720
6721 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6722 skb = NULL;
6723
6724 if (chan->sdu->len == chan->sdu_len) {
6725 err = chan->ops->recv(chan, chan->sdu);
6726 if (!err) {
6727 chan->sdu = NULL;
6728 chan->sdu_last_frag = NULL;
6729 chan->sdu_len = 0;
6730 }
6731 }
6732
6733failed:
6734 if (err) {
6735 kfree_skb(skb);
6736 kfree_skb(chan->sdu);
6737 chan->sdu = NULL;
6738 chan->sdu_last_frag = NULL;
6739 chan->sdu_len = 0;
6740 }
6741
6742 /* We can't return an error here since we took care of the skb
6743 * freeing internally. An error return would cause the caller to
6744 * do a double-free of the skb.
6745 */
6746 return 0;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006747}
6748
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006749static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6750 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006751{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006752 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006753
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006754 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006755 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006756 if (cid == L2CAP_CID_A2MP) {
6757 chan = a2mp_channel_create(conn, skb);
6758 if (!chan) {
6759 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006760 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006761 }
6762
6763 l2cap_chan_lock(chan);
6764 } else {
6765 BT_DBG("unknown cid 0x%4.4x", cid);
6766 /* Drop packet and return */
6767 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006768 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006770 }
6771
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006772 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006773
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006774 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775 goto drop;
6776
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006777 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006778 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006779 if (l2cap_le_data_rcv(chan, skb) < 0)
6780 goto drop;
6781
6782 goto done;
6783
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006784 case L2CAP_MODE_BASIC:
6785 /* If socket recv buffers overflows we drop data here
6786 * which is *bad* because L2CAP has to be reliable.
6787 * But we don't have any other choice. L2CAP doesn't
6788 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006789
Szymon Janc2c96e032014-02-18 20:48:34 +01006790 if (chan->imtu < skb->len) {
6791 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006792 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006793 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006794
Gustavo Padovan80b98022012-05-27 22:27:51 -03006795 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006796 goto done;
6797 break;
6798
6799 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006800 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006801 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006802 goto done;
6803
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006804 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006805 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006806 break;
6807 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006808
6809drop:
6810 kfree_skb(skb);
6811
6812done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006813 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006814}
6815
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006816static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6817 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006819 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006820 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006821
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006822 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006823 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006824
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006825 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6826 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006827 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006828 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006829
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006830 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006831
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006832 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 goto drop;
6834
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006835 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006836 goto drop;
6837
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006838 /* Store remote BD_ADDR and PSM for msg_name */
Marcel Holtmann06ae3312013-10-18 03:43:00 -07006839 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006840 bt_cb(skb)->psm = psm;
6841
Johan Hedberga24cce12014-08-07 22:56:42 +03006842 if (!chan->ops->recv(chan, skb)) {
6843 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006844 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006845 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006846
6847drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006848 l2cap_chan_put(chan);
6849free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006850 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006851}
6852
6853static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6854{
6855 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006856 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006857 u16 cid, len;
6858 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006859
Johan Hedberg61a939c2014-01-17 20:45:11 +02006860 if (hcon->state != BT_CONNECTED) {
6861 BT_DBG("queueing pending rx skb");
6862 skb_queue_tail(&conn->pending_rx, skb);
6863 return;
6864 }
6865
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866 skb_pull(skb, L2CAP_HDR_SIZE);
6867 cid = __le16_to_cpu(lh->cid);
6868 len = __le16_to_cpu(lh->len);
6869
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006870 if (len != skb->len) {
6871 kfree_skb(skb);
6872 return;
6873 }
6874
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006875 /* Since we can't actively block incoming LE connections we must
6876 * at least ensure that we ignore incoming data from them.
6877 */
6878 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006879 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6880 bdaddr_type(hcon, hcon->dst_type))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006881 kfree_skb(skb);
6882 return;
6883 }
6884
Linus Torvalds1da177e2005-04-16 15:20:36 -07006885 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6886
6887 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006888 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006889 l2cap_sig_channel(conn, skb);
6890 break;
6891
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006892 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006893 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006894 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895 l2cap_conless_channel(conn, psm, skb);
6896 break;
6897
Marcel Holtmanna2877622013-10-02 23:46:54 -07006898 case L2CAP_CID_LE_SIGNALING:
6899 l2cap_le_sig_channel(conn, skb);
6900 break;
6901
Linus Torvalds1da177e2005-04-16 15:20:36 -07006902 default:
6903 l2cap_data_channel(conn, cid, skb);
6904 break;
6905 }
6906}
6907
Johan Hedberg61a939c2014-01-17 20:45:11 +02006908static void process_pending_rx(struct work_struct *work)
6909{
6910 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6911 pending_rx_work);
6912 struct sk_buff *skb;
6913
6914 BT_DBG("");
6915
6916 while ((skb = skb_dequeue(&conn->pending_rx)))
6917 l2cap_recv_frame(conn, skb);
6918}
6919
Johan Hedberg162b49e2014-01-17 20:45:10 +02006920static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6921{
6922 struct l2cap_conn *conn = hcon->l2cap_data;
6923 struct hci_chan *hchan;
6924
6925 if (conn)
6926 return conn;
6927
6928 hchan = hci_chan_create(hcon);
6929 if (!hchan)
6930 return NULL;
6931
Johan Hedberg27f70f32014-07-21 10:50:06 +03006932 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006933 if (!conn) {
6934 hci_chan_del(hchan);
6935 return NULL;
6936 }
6937
6938 kref_init(&conn->ref);
6939 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03006940 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006941 conn->hchan = hchan;
6942
6943 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6944
6945 switch (hcon->type) {
6946 case LE_LINK:
6947 if (hcon->hdev->le_mtu) {
6948 conn->mtu = hcon->hdev->le_mtu;
6949 break;
6950 }
6951 /* fall through */
6952 default:
6953 conn->mtu = hcon->hdev->acl_mtu;
6954 break;
6955 }
6956
6957 conn->feat_mask = 0;
6958
6959 if (hcon->type == ACL_LINK)
6960 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6961 &hcon->hdev->dev_flags);
6962
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02006963 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006964 mutex_init(&conn->chan_lock);
6965
6966 INIT_LIST_HEAD(&conn->chan_l);
6967 INIT_LIST_HEAD(&conn->users);
6968
Johan Hedberg276d8072014-08-11 22:06:41 +03006969 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006970
Johan Hedberg61a939c2014-01-17 20:45:11 +02006971 skb_queue_head_init(&conn->pending_rx);
6972 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03006973 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02006974
Johan Hedberg162b49e2014-01-17 20:45:10 +02006975 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6976
6977 return conn;
6978}
6979
6980static bool is_valid_psm(u16 psm, u8 dst_type) {
6981 if (!psm)
6982 return false;
6983
6984 if (bdaddr_type_is_le(dst_type))
6985 return (psm <= 0x00ff);
6986
6987 /* PSM must be odd and lsb of upper byte must be 0 */
6988 return ((psm & 0x0101) == 0x0001);
6989}
6990
6991int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6992 bdaddr_t *dst, u8 dst_type)
6993{
6994 struct l2cap_conn *conn;
6995 struct hci_conn *hcon;
6996 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006997 int err;
6998
6999 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7000 dst_type, __le16_to_cpu(psm));
7001
7002 hdev = hci_get_route(dst, &chan->src);
7003 if (!hdev)
7004 return -EHOSTUNREACH;
7005
7006 hci_dev_lock(hdev);
7007
Johan Hedberg162b49e2014-01-17 20:45:10 +02007008 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7009 chan->chan_type != L2CAP_CHAN_RAW) {
7010 err = -EINVAL;
7011 goto done;
7012 }
7013
Johan Hedberg21626e62014-01-24 10:35:41 +02007014 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7015 err = -EINVAL;
7016 goto done;
7017 }
7018
7019 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02007020 err = -EINVAL;
7021 goto done;
7022 }
7023
7024 switch (chan->mode) {
7025 case L2CAP_MODE_BASIC:
7026 break;
7027 case L2CAP_MODE_LE_FLOWCTL:
7028 l2cap_le_flowctl_init(chan);
7029 break;
7030 case L2CAP_MODE_ERTM:
7031 case L2CAP_MODE_STREAMING:
7032 if (!disable_ertm)
7033 break;
7034 /* fall through */
7035 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007036 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007037 goto done;
7038 }
7039
7040 switch (chan->state) {
7041 case BT_CONNECT:
7042 case BT_CONNECT2:
7043 case BT_CONFIG:
7044 /* Already connecting */
7045 err = 0;
7046 goto done;
7047
7048 case BT_CONNECTED:
7049 /* Already connected */
7050 err = -EISCONN;
7051 goto done;
7052
7053 case BT_OPEN:
7054 case BT_BOUND:
7055 /* Can connect */
7056 break;
7057
7058 default:
7059 err = -EBADFD;
7060 goto done;
7061 }
7062
7063 /* Set destination address and psm */
7064 bacpy(&chan->dst, dst);
7065 chan->dst_type = dst_type;
7066
7067 chan->psm = psm;
7068 chan->dcid = cid;
7069
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007070 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007071 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007072
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007073 /* Convert from L2CAP channel address type to HCI address type
7074 */
7075 if (dst_type == BDADDR_LE_PUBLIC)
7076 dst_type = ADDR_LE_DEV_PUBLIC;
7077 else
7078 dst_type = ADDR_LE_DEV_RANDOM;
7079
Johan Hedberge804d252014-07-16 11:42:28 +03007080 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7081 role = HCI_ROLE_SLAVE;
7082 else
7083 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007084
Andre Guedes04a6c582014-02-26 20:21:44 -03007085 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
Johan Hedberge804d252014-07-16 11:42:28 +03007086 HCI_LE_CONN_TIMEOUT, role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007087 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007088 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007089 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007090 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007091
7092 if (IS_ERR(hcon)) {
7093 err = PTR_ERR(hcon);
7094 goto done;
7095 }
7096
7097 conn = l2cap_conn_add(hcon);
7098 if (!conn) {
7099 hci_conn_drop(hcon);
7100 err = -ENOMEM;
7101 goto done;
7102 }
7103
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007104 mutex_lock(&conn->chan_lock);
7105 l2cap_chan_lock(chan);
7106
Johan Hedberg162b49e2014-01-17 20:45:10 +02007107 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7108 hci_conn_drop(hcon);
7109 err = -EBUSY;
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007110 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007111 }
7112
7113 /* Update source addr of the socket */
7114 bacpy(&chan->src, &hcon->src);
7115 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7116
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007117 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007118
7119 /* l2cap_chan_add takes its own ref so we can drop this one */
7120 hci_conn_drop(hcon);
7121
7122 l2cap_state_change(chan, BT_CONNECT);
7123 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7124
Johan Hedberg61202e42014-01-28 15:16:48 -08007125 /* Release chan->sport so that it can be reused by other
7126 * sockets (as it's only used for listening sockets).
7127 */
7128 write_lock(&chan_list_lock);
7129 chan->sport = 0;
7130 write_unlock(&chan_list_lock);
7131
Johan Hedberg162b49e2014-01-17 20:45:10 +02007132 if (hcon->state == BT_CONNECTED) {
7133 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7134 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007135 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007136 l2cap_state_change(chan, BT_CONNECTED);
7137 } else
7138 l2cap_do_start(chan);
7139 }
7140
7141 err = 0;
7142
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007143chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007144 l2cap_chan_unlock(chan);
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007145 mutex_unlock(&conn->chan_lock);
7146done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007147 hci_dev_unlock(hdev);
7148 hci_dev_put(hdev);
7149 return err;
7150}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007151EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007152
Linus Torvalds1da177e2005-04-16 15:20:36 -07007153/* ---- L2CAP interface with lower layer (HCI) ---- */
7154
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007155int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007156{
7157 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007158 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007160 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007161
7162 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007163 read_lock(&chan_list_lock);
7164 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007165 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166 continue;
7167
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007168 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007169 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007170 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007171 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007173 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007174 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007175 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007176 lm2 |= HCI_LM_MASTER;
7177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007178 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007179 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007180
7181 return exact ? lm1 : lm2;
7182}
7183
Johan Hedberge760ec12014-08-07 22:56:47 +03007184/* Find the next fixed channel in BT_LISTEN state, continue iteration
7185 * from an existing channel in the list or from the beginning of the
7186 * global list (by passing NULL as first parameter).
7187 */
7188static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg54a1b622014-08-07 22:56:48 +03007189 bdaddr_t *src, u8 link_type)
Johan Hedberge760ec12014-08-07 22:56:47 +03007190{
7191 read_lock(&chan_list_lock);
7192
7193 if (c)
7194 c = list_next_entry(c, global_l);
7195 else
7196 c = list_entry(chan_list.next, typeof(*c), global_l);
7197
7198 list_for_each_entry_from(c, &chan_list, global_l) {
7199 if (c->chan_type != L2CAP_CHAN_FIXED)
7200 continue;
7201 if (c->state != BT_LISTEN)
7202 continue;
7203 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7204 continue;
Johan Hedberg54a1b622014-08-07 22:56:48 +03007205 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7206 continue;
7207 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7208 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007209
7210 l2cap_chan_hold(c);
7211 read_unlock(&chan_list_lock);
7212 return c;
7213 }
7214
7215 read_unlock(&chan_list_lock);
7216
7217 return NULL;
7218}
7219
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007220void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007221{
Johan Hedberge760ec12014-08-07 22:56:47 +03007222 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007223 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007224 struct l2cap_chan *pchan;
7225 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007226
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007227 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007228
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007229 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007230 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007231 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007232 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007233
7234 conn = l2cap_conn_add(hcon);
7235 if (!conn)
7236 return;
7237
Johan Hedberge760ec12014-08-07 22:56:47 +03007238 dst_type = bdaddr_type(hcon, hcon->dst_type);
7239
7240 /* If device is blocked, do not create channels for it */
7241 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7242 return;
7243
7244 /* Find fixed channels and notify them of the new connection. We
7245 * use multiple individual lookups, continuing each time where
7246 * we left off, because the list lock would prevent calling the
7247 * potentially sleeping l2cap_chan_lock() function.
7248 */
Johan Hedberg54a1b622014-08-07 22:56:48 +03007249 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007250 while (pchan) {
7251 struct l2cap_chan *chan, *next;
7252
7253 /* Client fixed channels should override server ones */
7254 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7255 goto next;
7256
7257 l2cap_chan_lock(pchan);
7258 chan = pchan->ops->new_connection(pchan);
7259 if (chan) {
7260 bacpy(&chan->src, &hcon->src);
7261 bacpy(&chan->dst, &hcon->dst);
7262 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7263 chan->dst_type = dst_type;
7264
7265 __l2cap_chan_add(conn, chan);
7266 }
7267
7268 l2cap_chan_unlock(pchan);
7269next:
Johan Hedberg54a1b622014-08-07 22:56:48 +03007270 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7271 hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007272 l2cap_chan_put(pchan);
7273 pchan = next;
7274 }
7275
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007276 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007277}
7278
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007279int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007280{
7281 struct l2cap_conn *conn = hcon->l2cap_data;
7282
7283 BT_DBG("hcon %p", hcon);
7284
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007285 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007286 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007287 return conn->disc_reason;
7288}
7289
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007290void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007291{
7292 BT_DBG("hcon %p reason %d", hcon, reason);
7293
Joe Perchese1750722011-06-29 18:18:29 -07007294 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295}
7296
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007297static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007298{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007299 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007300 return;
7301
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007302 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007303 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007304 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007305 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7306 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007307 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007308 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007309 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007310 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007311 }
7312}
7313
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007314int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007315{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007316 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007317 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007318
Marcel Holtmann01394182006-07-03 10:02:46 +02007319 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007320 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007321
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007322 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007324 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007325
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007326 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007327 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007328
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007329 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7330 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007331
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007332 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007333 l2cap_chan_unlock(chan);
7334 continue;
7335 }
7336
Johan Hedberg191eb392014-08-07 22:56:45 +03007337 if (!status && encrypt)
7338 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007339
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007340 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007341 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007342 continue;
7343 }
7344
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007345 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007346 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007347 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007348 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007349 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007350 continue;
7351 }
7352
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007353 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007354 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007355 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007356 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007357 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergfa37c1a2014-11-13 10:55:17 +02007358 } else if (chan->state == BT_CONNECT2 &&
7359 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007360 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007361 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007362
7363 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007364 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007365 res = L2CAP_CR_PEND;
7366 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007367 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007368 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007369 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007370 res = L2CAP_CR_SUCCESS;
7371 stat = L2CAP_CS_NO_INFO;
7372 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007373 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007374 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007375 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007376 res = L2CAP_CR_SEC_BLOCK;
7377 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007378 }
7379
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007380 rsp.scid = cpu_to_le16(chan->dcid);
7381 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007382 rsp.result = cpu_to_le16(res);
7383 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007384 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007385 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007386
7387 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7388 res == L2CAP_CR_SUCCESS) {
7389 char buf[128];
7390 set_bit(CONF_REQ_SENT, &chan->conf_state);
7391 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7392 L2CAP_CONF_REQ,
7393 l2cap_build_conf_req(chan, buf),
7394 buf);
7395 chan->num_conf_req++;
7396 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007397 }
7398
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007399 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007400 }
7401
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007402 mutex_unlock(&conn->chan_lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007403
Linus Torvalds1da177e2005-04-16 15:20:36 -07007404 return 0;
7405}
7406
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007407int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408{
7409 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007410 struct l2cap_hdr *hdr;
7411 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007413 /* For AMP controller do not create l2cap conn */
7414 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7415 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007416
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007417 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007418 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007419
7420 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421 goto drop;
7422
7423 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7424
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007425 switch (flags) {
7426 case ACL_START:
7427 case ACL_START_NO_FLUSH:
7428 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007429 if (conn->rx_len) {
7430 BT_ERR("Unexpected start frame (len %d)", skb->len);
7431 kfree_skb(conn->rx_skb);
7432 conn->rx_skb = NULL;
7433 conn->rx_len = 0;
7434 l2cap_conn_unreliable(conn, ECOMM);
7435 }
7436
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007437 /* Start fragment always begin with Basic L2CAP header */
7438 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439 BT_ERR("Frame is too short (len %d)", skb->len);
7440 l2cap_conn_unreliable(conn, ECOMM);
7441 goto drop;
7442 }
7443
7444 hdr = (struct l2cap_hdr *) skb->data;
7445 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7446
7447 if (len == skb->len) {
7448 /* Complete frame received */
7449 l2cap_recv_frame(conn, skb);
7450 return 0;
7451 }
7452
7453 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7454
7455 if (skb->len > len) {
7456 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007457 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007458 l2cap_conn_unreliable(conn, ECOMM);
7459 goto drop;
7460 }
7461
7462 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007463 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007464 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007465 goto drop;
7466
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007467 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007468 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007469 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007470 break;
7471
7472 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007473 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7474
7475 if (!conn->rx_len) {
7476 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7477 l2cap_conn_unreliable(conn, ECOMM);
7478 goto drop;
7479 }
7480
7481 if (skb->len > conn->rx_len) {
7482 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007483 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484 kfree_skb(conn->rx_skb);
7485 conn->rx_skb = NULL;
7486 conn->rx_len = 0;
7487 l2cap_conn_unreliable(conn, ECOMM);
7488 goto drop;
7489 }
7490
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007491 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007492 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007493 conn->rx_len -= skb->len;
7494
7495 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007496 /* Complete frame received. l2cap_recv_frame
7497 * takes ownership of the skb so set the global
7498 * rx_skb pointer to NULL first.
7499 */
7500 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007502 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007504 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505 }
7506
7507drop:
7508 kfree_skb(skb);
7509 return 0;
7510}
7511
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007512static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007514 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007516 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007518 list_for_each_entry(c, &chan_list, global_l) {
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007519 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007520 &c->src, &c->dst,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007521 c->state, __le16_to_cpu(c->psm),
7522 c->scid, c->dcid, c->imtu, c->omtu,
7523 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007525
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007526 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007527
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007528 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529}
7530
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007531static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7532{
7533 return single_open(file, l2cap_debugfs_show, inode->i_private);
7534}
7535
7536static const struct file_operations l2cap_debugfs_fops = {
7537 .open = l2cap_debugfs_open,
7538 .read = seq_read,
7539 .llseek = seq_lseek,
7540 .release = single_release,
7541};
7542
7543static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007545int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546{
7547 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007548
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007549 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 if (err < 0)
7551 return err;
7552
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007553 if (IS_ERR_OR_NULL(bt_debugfs))
7554 return 0;
7555
7556 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7557 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007558
Samuel Ortiz40b93972014-05-14 17:53:35 +02007559 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007560 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007561 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007562 &le_default_mps);
7563
Linus Torvalds1da177e2005-04-16 15:20:36 -07007564 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007565}
7566
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007567void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007568{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007569 debugfs_remove(l2cap_debugfs);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007570 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007571}
7572
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007573module_param(disable_ertm, bool, 0644);
7574MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");