blob: 11012a50907083baff581241135b357b0857440d [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Daniel Borkmanndbb50882016-07-27 11:40:14 -070035#include <linux/filter.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
37#include <net/bluetooth/bluetooth.h>
38#include <net/bluetooth/hci_core.h>
39#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070040
Marcel Holtmannac4b7232013-10-10 14:54:16 -070041#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070042#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070043#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080045#define LE_FLOWCTL_MAX_CREDITS 65535
46
Mat Martineaud1de6d42012-05-17 20:53:55 -070047bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020048
Marcel Holtmann547d1032013-10-12 08:18:19 -070049static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Ben Seri6300c8b2017-09-09 23:15:59 +020061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Johan Hedberga250e042015-01-15 13:06:44 +020067static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070068{
Johan Hedberga250e042015-01-15 13:06:44 +020069 if (link_type == LE_LINK) {
70 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070071 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Johan Hedberga250e042015-01-15 13:06:44 +020079static inline u8 bdaddr_src_type(struct hci_conn *hcon)
80{
81 return bdaddr_type(hcon->type, hcon->src_type);
82}
83
84static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
85{
86 return bdaddr_type(hcon->type, hcon->dst_type);
87}
88
Marcel Holtmann01394182006-07-03 10:02:46 +020089/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030090
Gustavo Padovan2d792812012-10-06 10:07:01 +010091static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
92 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020093{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020094 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030095
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 list_for_each_entry(c, &conn->chan_l, list) {
97 if (c->dcid == cid)
98 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020099 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200100 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101}
102
Gustavo Padovan2d792812012-10-06 10:07:01 +0100103static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
104 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200105{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200106 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300107
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200108 list_for_each_entry(c, &conn->chan_l, list) {
109 if (c->scid == cid)
110 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200111 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200113}
114
115/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700116 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100117static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
118 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200119{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300120 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300121
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200122 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300123 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700124 if (c)
125 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200126 mutex_unlock(&conn->chan_lock);
127
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300128 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200129}
130
Mat Martineaub1a130b2012-10-23 15:24:09 -0700131/* Find channel with given DCID.
132 * Returns locked channel.
133 */
134static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
135 u16 cid)
136{
137 struct l2cap_chan *c;
138
139 mutex_lock(&conn->chan_lock);
140 c = __l2cap_get_chan_by_dcid(conn, cid);
141 if (c)
142 l2cap_chan_lock(c);
143 mutex_unlock(&conn->chan_lock);
144
145 return c;
146}
147
Gustavo Padovan2d792812012-10-06 10:07:01 +0100148static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
149 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200150{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200151 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300152
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200153 list_for_each_entry(c, &conn->chan_l, list) {
154 if (c->ident == ident)
155 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200156 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200157 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200158}
159
Mat Martineau5b155ef2012-10-23 15:24:14 -0700160static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
161 u8 ident)
162{
163 struct l2cap_chan *c;
164
165 mutex_lock(&conn->chan_lock);
166 c = __l2cap_get_chan_by_ident(conn, ident);
167 if (c)
168 l2cap_chan_lock(c);
169 mutex_unlock(&conn->chan_lock);
170
171 return c;
172}
173
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300174static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300175{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300176 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300177
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300178 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700179 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100180 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300181 }
Szymon Janc250938c2011-11-16 09:32:22 +0100182 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300183}
184
185int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
186{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300187 int err;
188
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200189 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300190
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300191 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300192 err = -EADDRINUSE;
193 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300194 }
195
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 if (psm) {
197 chan->psm = psm;
198 chan->sport = psm;
199 err = 0;
200 } else {
Johan Hedberg92594a52016-01-26 17:19:10 -0500201 u16 p, start, end, incr;
202
203 if (chan->src_type == BDADDR_BREDR) {
204 start = L2CAP_PSM_DYN_START;
205 end = L2CAP_PSM_AUTO_END;
206 incr = 2;
207 } else {
208 start = L2CAP_PSM_LE_DYN_START;
209 end = L2CAP_PSM_LE_DYN_END;
210 incr = 1;
211 }
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300213 err = -EINVAL;
Johan Hedberg92594a52016-01-26 17:19:10 -0500214 for (p = start; p <= end; p += incr)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300215 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300216 chan->psm = cpu_to_le16(p);
217 chan->sport = cpu_to_le16(p);
218 err = 0;
219 break;
220 }
221 }
222
223done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200224 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300225 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300226}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300227EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300228
229int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
230{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200231 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300232
Johan Hedberg14824302014-08-07 22:56:50 +0300233 /* Override the defaults (which are for conn-oriented) */
234 chan->omtu = L2CAP_DEFAULT_MTU;
235 chan->chan_type = L2CAP_CHAN_FIXED;
236
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300237 chan->scid = scid;
238
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200239 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300240
241 return 0;
242}
243
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300244static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200245{
Johan Hedberge77af752013-10-08 10:31:00 +0200246 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200247
Johan Hedberge77af752013-10-08 10:31:00 +0200248 if (conn->hcon->type == LE_LINK)
249 dyn_end = L2CAP_CID_LE_DYN_END;
250 else
251 dyn_end = L2CAP_CID_DYN_END;
252
Johan Hedbergab0c1272015-11-02 14:39:16 +0200253 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300254 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200255 return cid;
256 }
257
258 return 0;
259}
260
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200261static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300262{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200263 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100264 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200265
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300266 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300267 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300268}
269
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300270static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
271 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200272{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300273 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300274 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200275}
276
277static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
278{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300279 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200280}
281
Mat Martineau4239d162012-05-17 20:53:49 -0700282static void __set_retrans_timer(struct l2cap_chan *chan)
283{
284 if (!delayed_work_pending(&chan->monitor_timer) &&
285 chan->retrans_timeout) {
286 l2cap_set_timer(chan, &chan->retrans_timer,
287 msecs_to_jiffies(chan->retrans_timeout));
288 }
289}
290
291static void __set_monitor_timer(struct l2cap_chan *chan)
292{
293 __clear_retrans_timer(chan);
294 if (chan->monitor_timeout) {
295 l2cap_set_timer(chan, &chan->monitor_timer,
296 msecs_to_jiffies(chan->monitor_timeout));
297 }
298}
299
Mat Martineau608bcc62012-05-17 20:53:32 -0700300static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
301 u16 seq)
302{
303 struct sk_buff *skb;
304
305 skb_queue_walk(head, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +0300306 if (bt_cb(skb)->l2cap.txseq == seq)
Mat Martineau608bcc62012-05-17 20:53:32 -0700307 return skb;
308 }
309
310 return NULL;
311}
312
Mat Martineau3c588192012-04-11 10:48:42 -0700313/* ---- L2CAP sequence number lists ---- */
314
315/* For ERTM, ordered lists of sequence numbers must be tracked for
316 * SREJ requests that are received and for frames that are to be
317 * retransmitted. These seq_list functions implement a singly-linked
318 * list in an array, where membership in the list can also be checked
319 * in constant time. Items can also be added to the tail of the list
320 * and removed from the head in constant time, without further memory
321 * allocs or frees.
322 */
323
324static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
325{
326 size_t alloc_size, i;
327
328 /* Allocated size is a power of 2 to map sequence numbers
329 * (which may be up to 14 bits) in to a smaller array that is
330 * sized for the negotiated ERTM transmit windows.
331 */
332 alloc_size = roundup_pow_of_two(size);
333
334 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
335 if (!seq_list->list)
336 return -ENOMEM;
337
338 seq_list->mask = alloc_size - 1;
339 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
340 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
341 for (i = 0; i < alloc_size; i++)
342 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
343
344 return 0;
345}
346
347static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
348{
349 kfree(seq_list->list);
350}
351
352static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
353 u16 seq)
354{
355 /* Constant-time check for list membership */
356 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
357}
358
Mat Martineau3c588192012-04-11 10:48:42 -0700359static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
360{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200361 u16 seq = seq_list->head;
362 u16 mask = seq_list->mask;
363
364 seq_list->head = seq_list->list[seq & mask];
365 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
366
367 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
368 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
369 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
370 }
371
372 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700373}
374
375static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
376{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300377 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700378
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300379 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
380 return;
381
382 for (i = 0; i <= seq_list->mask; i++)
383 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
384
385 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
386 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700387}
388
389static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
390{
391 u16 mask = seq_list->mask;
392
393 /* All appends happen in constant time */
394
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300395 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
396 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700397
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300398 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
399 seq_list->head = seq;
400 else
401 seq_list->list[seq_list->tail & mask] = seq;
402
403 seq_list->tail = seq;
404 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700405}
406
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300407static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300409 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100410 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200411 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300412 int reason;
413
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200414 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200416 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200417 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300418
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300419 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300420 reason = ECONNREFUSED;
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300421 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100422 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300423 reason = ECONNREFUSED;
424 else
425 reason = ETIMEDOUT;
426
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300427 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300428
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200429 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300430
Gustavo Padovan80b98022012-05-27 22:27:51 -0300431 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200432 mutex_unlock(&conn->chan_lock);
433
Ulisses Furquim371fd832011-12-21 20:02:36 -0200434 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300435}
436
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300437struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200438{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300439 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200440
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300441 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
442 if (!chan)
443 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200444
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200445 mutex_init(&chan->lock);
446
Johan Hedbergff714112014-11-13 09:46:04 +0200447 /* Set default lock nesting level */
448 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
449
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200450 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300451 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200452 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300453
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300454 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300455
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300456 chan->state = BT_OPEN;
457
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530458 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300459
Mat Martineau28270112012-05-17 21:14:09 -0700460 /* This flag is cleared in l2cap_chan_ready() */
461 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
462
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300463 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100464
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300465 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200466}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300467EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200468
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530469static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300470{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530471 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
472
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530473 BT_DBG("chan %p", chan);
474
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200475 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300476 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200477 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300478
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530479 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300480}
481
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530482void l2cap_chan_hold(struct l2cap_chan *c)
483{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530484 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530485
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530486 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530487}
488
489void l2cap_chan_put(struct l2cap_chan *c)
490{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530491 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530492
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530493 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530494}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300495EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530496
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300497void l2cap_chan_set_defaults(struct l2cap_chan *chan)
498{
499 chan->fcs = L2CAP_FCS_CRC16;
500 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
501 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
502 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300503 chan->remote_max_tx = chan->max_tx;
504 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700505 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300506 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300507 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
508 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
509 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
510 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300511
512 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
513}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300514EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300515
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200516static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300517{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200518 chan->sdu = NULL;
519 chan->sdu_last_frag = NULL;
520 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300521 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200522 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800523 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200524
525 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300526}
527
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300528void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200529{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300530 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200531 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200532
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200533 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100534
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300535 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200536
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200537 switch (chan->chan_type) {
538 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200539 /* Alloc CID for connection-oriented socket */
540 chan->scid = l2cap_alloc_cid(conn);
541 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300542 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200543 break;
544
545 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300547 chan->scid = L2CAP_CID_CONN_LESS;
548 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300549 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200550 break;
551
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200552 case L2CAP_CHAN_FIXED:
553 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300554 break;
555
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200556 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200557 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300558 chan->scid = L2CAP_CID_SIGNALING;
559 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300560 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200561 }
562
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300563 chan->local_id = L2CAP_BESTEFFORT_ID;
564 chan->local_stype = L2CAP_SERV_BESTEFFORT;
565 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
566 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
567 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300568 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300569
Ulisses Furquim371fd832011-12-21 20:02:36 -0200570 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300571
Johan Hedbergc16900c2014-08-15 21:17:06 +0300572 /* Only keep a reference for fixed channels if they requested it */
573 if (chan->chan_type != L2CAP_CHAN_FIXED ||
574 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
575 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300576
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200577 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200578}
579
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300580void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200581{
582 mutex_lock(&conn->chan_lock);
583 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200584 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300587void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200588{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300589 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200590
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300591 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200592
Johan Hedberg49d11742014-11-13 14:37:50 +0200593 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
594 state_to_string(chan->state));
Marcel Holtmann01394182006-07-03 10:02:46 +0200595
Johan Hedberg72847ce2014-08-08 09:28:03 +0300596 chan->ops->teardown(chan, err);
597
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900598 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300599 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300600 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200601 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200602
Ulisses Furquim371fd832011-12-21 20:02:36 -0200603 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300604
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300605 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300606
Johan Hedbergc16900c2014-08-15 21:17:06 +0300607 /* Reference was only held for non-fixed channels or
608 * fixed channels that explicitly requested it using the
609 * FLAG_HOLD_HCI_CONN flag.
610 */
611 if (chan->chan_type != L2CAP_CHAN_FIXED ||
612 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200613 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300614
615 if (mgr && mgr->bredr_chan == chan)
616 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200617 }
618
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200619 if (chan->hs_hchan) {
620 struct hci_chan *hs_hchan = chan->hs_hchan;
621
622 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
623 amp_disconnect_logical_link(hs_hchan);
624 }
625
Mat Martineau28270112012-05-17 21:14:09 -0700626 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300627 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300628
Gustavo Padovanee556f62012-05-18 20:22:38 -0300629 switch(chan->mode) {
630 case L2CAP_MODE_BASIC:
631 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300632
Johan Hedberg38319712013-05-17 12:49:23 +0300633 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300634 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300635 break;
636
Gustavo Padovanee556f62012-05-18 20:22:38 -0300637 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300638 __clear_retrans_timer(chan);
639 __clear_monitor_timer(chan);
640 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300641
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300642 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300643
Mat Martineau3c588192012-04-11 10:48:42 -0700644 l2cap_seq_list_free(&chan->srej_list);
645 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300646
647 /* fall through */
648
649 case L2CAP_MODE_STREAMING:
650 skb_queue_purge(&chan->tx_q);
651 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300652 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300653
654 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200655}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300656EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200657
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300658static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200659{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300660 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
661 id_addr_update_work);
662 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200663 struct l2cap_chan *chan;
664
665 mutex_lock(&conn->chan_lock);
666
667 list_for_each_entry(chan, &conn->chan_l, list) {
668 l2cap_chan_lock(chan);
669 bacpy(&chan->dst, &hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +0200670 chan->dst_type = bdaddr_dst_type(hcon);
Johan Hedberg387a33e2014-02-18 21:41:33 +0200671 l2cap_chan_unlock(chan);
672 }
673
674 mutex_unlock(&conn->chan_lock);
675}
676
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300677static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
678{
679 struct l2cap_conn *conn = chan->conn;
680 struct l2cap_le_conn_rsp rsp;
681 u16 result;
682
683 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
684 result = L2CAP_CR_AUTHORIZATION;
685 else
686 result = L2CAP_CR_BAD_PSM;
687
688 l2cap_state_change(chan, BT_DISCONN);
689
690 rsp.dcid = cpu_to_le16(chan->scid);
691 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200692 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300693 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300694 rsp.result = cpu_to_le16(result);
695
696 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
697 &rsp);
698}
699
Johan Hedberg791d60f2013-05-14 22:24:44 +0300700static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
701{
702 struct l2cap_conn *conn = chan->conn;
703 struct l2cap_conn_rsp rsp;
704 u16 result;
705
706 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
707 result = L2CAP_CR_SEC_BLOCK;
708 else
709 result = L2CAP_CR_BAD_PSM;
710
711 l2cap_state_change(chan, BT_DISCONN);
712
713 rsp.scid = cpu_to_le16(chan->dcid);
714 rsp.dcid = cpu_to_le16(chan->scid);
715 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700716 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300717
718 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
719}
720
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300721void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300722{
723 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300724
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700725 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300726
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -0300727 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300728 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100729 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300730 break;
731
732 case BT_CONNECTED:
733 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800734 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300735 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200736 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300737 } else
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300742 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
743 if (conn->hcon->type == ACL_LINK)
744 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300745 else if (conn->hcon->type == LE_LINK)
746 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300747 }
748
749 l2cap_chan_del(chan, reason);
750 break;
751
752 case BT_CONNECT:
753 case BT_DISCONN:
754 l2cap_chan_del(chan, reason);
755 break;
756
757 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100758 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300759 break;
760 }
761}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300762EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300763
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300764static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530765{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700766 switch (chan->chan_type) {
767 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300768 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530769 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800770 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530771 return HCI_AT_DEDICATED_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_DEDICATED_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700777 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700778 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700779 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
782 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700789 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700790 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700791 if (chan->sec_level == BT_SECURITY_LOW)
792 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530793
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800794 if (chan->sec_level == BT_SECURITY_HIGH ||
795 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700796 return HCI_AT_NO_BONDING_MITM;
797 else
798 return HCI_AT_NO_BONDING;
799 }
800 /* fall through */
801 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300802 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530803 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800804 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530805 return HCI_AT_GENERAL_BONDING_MITM;
806 case BT_SECURITY_MEDIUM:
807 return HCI_AT_GENERAL_BONDING;
808 default:
809 return HCI_AT_NO_BONDING;
810 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700811 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530812 }
813}
814
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200815/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300816int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200817{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300818 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100819 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200820
Johan Hedberga17de2f2013-05-14 13:25:37 +0300821 if (conn->hcon->type == LE_LINK)
822 return smp_conn_security(conn->hcon, chan->sec_level);
823
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300824 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100825
Johan Hedberge7cafc42014-07-17 15:35:38 +0300826 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
827 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200828}
829
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200830static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200831{
832 u8 id;
833
834 /* Get next available identificator.
835 * 1 - 128 are used by kernel.
836 * 129 - 199 are reserved.
837 * 200 - 254 are used by utilities like l2ping, etc.
838 */
839
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200840 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200841
842 if (++conn->tx_ident > 128)
843 conn->tx_ident = 1;
844
845 id = conn->tx_ident;
846
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200847 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200848
849 return id;
850}
851
Gustavo Padovan2d792812012-10-06 10:07:01 +0100852static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
853 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200854{
855 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200856 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200857
858 BT_DBG("code 0x%2.2x", code);
859
860 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300861 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200862
Steven Walterf6af6752014-11-19 09:41:17 -0500863 /* Use NO_FLUSH if supported or we have an LE link (which does
864 * not support auto-flushing packets) */
865 if (lmp_no_flush_capable(conn->hcon->hdev) ||
866 conn->hcon->type == LE_LINK)
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200867 flags = ACL_START_NO_FLUSH;
868 else
869 flags = ACL_START;
870
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700871 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200872 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700873
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200874 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200875}
876
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700877static bool __chan_is_moving(struct l2cap_chan *chan)
878{
879 return chan->move_state != L2CAP_MOVE_STABLE &&
880 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
881}
882
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200883static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
884{
885 struct hci_conn *hcon = chan->conn->hcon;
886 u16 flags;
887
888 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100889 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200890
Mat Martineaud5f8a752012-10-23 15:24:18 -0700891 if (chan->hs_hcon && !__chan_is_moving(chan)) {
892 if (chan->hs_hchan)
893 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
894 else
895 kfree_skb(skb);
896
897 return;
898 }
899
Steven Walterf6af6752014-11-19 09:41:17 -0500900 /* Use NO_FLUSH for LE links (where this is the only option) or
901 * if the BR/EDR link supports it and flushing has not been
902 * explicitly requested (through FLAG_FLUSHABLE).
903 */
904 if (hcon->type == LE_LINK ||
905 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
906 lmp_no_flush_capable(hcon->hdev)))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200907 flags = ACL_START_NO_FLUSH;
908 else
909 flags = ACL_START;
910
911 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
912 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700913}
914
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700915static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
916{
917 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
918 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
919
920 if (enh & L2CAP_CTRL_FRAME_TYPE) {
921 /* S-Frame */
922 control->sframe = 1;
923 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
924 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
925
926 control->sar = 0;
927 control->txseq = 0;
928 } else {
929 /* I-Frame */
930 control->sframe = 0;
931 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
932 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
933
934 control->poll = 0;
935 control->super = 0;
936 }
937}
938
939static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
940{
941 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
942 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
943
944 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
945 /* S-Frame */
946 control->sframe = 1;
947 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
948 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
949
950 control->sar = 0;
951 control->txseq = 0;
952 } else {
953 /* I-Frame */
954 control->sframe = 0;
955 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
956 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
957
958 control->poll = 0;
959 control->super = 0;
960 }
961}
962
963static inline void __unpack_control(struct l2cap_chan *chan,
964 struct sk_buff *skb)
965{
966 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
967 __unpack_extended_control(get_unaligned_le32(skb->data),
Johan Hedberga4368ff2015-03-30 23:21:01 +0300968 &bt_cb(skb)->l2cap);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700969 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700970 } else {
971 __unpack_enhanced_control(get_unaligned_le16(skb->data),
Johan Hedberga4368ff2015-03-30 23:21:01 +0300972 &bt_cb(skb)->l2cap);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700973 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700974 }
975}
976
977static u32 __pack_extended_control(struct l2cap_ctrl *control)
978{
979 u32 packed;
980
981 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
982 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
983
984 if (control->sframe) {
985 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
986 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
987 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
988 } else {
989 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
990 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
991 }
992
993 return packed;
994}
995
996static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
997{
998 u16 packed;
999
1000 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
1001 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
1002
1003 if (control->sframe) {
1004 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
1005 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
1006 packed |= L2CAP_CTRL_FRAME_TYPE;
1007 } else {
1008 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
1009 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
1010 }
1011
1012 return packed;
1013}
1014
1015static inline void __pack_control(struct l2cap_chan *chan,
1016 struct l2cap_ctrl *control,
1017 struct sk_buff *skb)
1018{
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1020 put_unaligned_le32(__pack_extended_control(control),
1021 skb->data + L2CAP_HDR_SIZE);
1022 } else {
1023 put_unaligned_le16(__pack_enhanced_control(control),
1024 skb->data + L2CAP_HDR_SIZE);
1025 }
1026}
1027
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001028static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1029{
1030 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1031 return L2CAP_EXT_HDR_SIZE;
1032 else
1033 return L2CAP_ENH_HDR_SIZE;
1034}
1035
Mat Martineaua67d7f62012-05-17 20:53:35 -07001036static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1037 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001038{
1039 struct sk_buff *skb;
1040 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001041 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001042
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001043 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001044 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001045
Mat Martineaua67d7f62012-05-17 20:53:35 -07001046 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001047
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001048 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001049 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001050
1051 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001052 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001053 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001054
Mat Martineaua67d7f62012-05-17 20:53:35 -07001055 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1056 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1057 else
1058 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001059
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001060 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001061 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001062 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001063 }
1064
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001065 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001066 return skb;
1067}
1068
1069static void l2cap_send_sframe(struct l2cap_chan *chan,
1070 struct l2cap_ctrl *control)
1071{
1072 struct sk_buff *skb;
1073 u32 control_field;
1074
1075 BT_DBG("chan %p, control %p", chan, control);
1076
1077 if (!control->sframe)
1078 return;
1079
Mat Martineaub99e13a2012-10-23 15:24:19 -07001080 if (__chan_is_moving(chan))
1081 return;
1082
Mat Martineaua67d7f62012-05-17 20:53:35 -07001083 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1084 !control->poll)
1085 control->final = 1;
1086
1087 if (control->super == L2CAP_SUPER_RR)
1088 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1089 else if (control->super == L2CAP_SUPER_RNR)
1090 set_bit(CONN_RNR_SENT, &chan->conn_state);
1091
1092 if (control->super != L2CAP_SUPER_SREJ) {
1093 chan->last_acked_seq = control->reqseq;
1094 __clear_ack_timer(chan);
1095 }
1096
1097 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1098 control->final, control->poll, control->super);
1099
1100 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1101 control_field = __pack_extended_control(control);
1102 else
1103 control_field = __pack_enhanced_control(control);
1104
1105 skb = l2cap_create_sframe_pdu(chan, control_field);
1106 if (!IS_ERR(skb))
1107 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001108}
1109
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001110static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001111{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001112 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001113
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001114 BT_DBG("chan %p, poll %d", chan, poll);
1115
1116 memset(&control, 0, sizeof(control));
1117 control.sframe = 1;
1118 control.poll = poll;
1119
1120 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1121 control.super = L2CAP_SUPER_RNR;
1122 else
1123 control.super = L2CAP_SUPER_RR;
1124
1125 control.reqseq = chan->buffer_seq;
1126 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001127}
1128
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001129static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001130{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001131 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1132 return true;
1133
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001134 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001135}
1136
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001137static bool __amp_capable(struct l2cap_chan *chan)
1138{
1139 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001140 struct hci_dev *hdev;
1141 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001142
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02001143 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001144 return false;
1145
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02001146 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001147 return false;
1148
1149 read_lock(&hci_dev_list_lock);
1150 list_for_each_entry(hdev, &hci_dev_list, list) {
1151 if (hdev->amp_type != AMP_TYPE_BREDR &&
1152 test_bit(HCI_UP, &hdev->flags)) {
1153 amp_available = true;
1154 break;
1155 }
1156 }
1157 read_unlock(&hci_dev_list_lock);
1158
1159 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1160 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001161
1162 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001163}
1164
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001165static bool l2cap_check_efs(struct l2cap_chan *chan)
1166{
1167 /* Check EFS parameters */
1168 return true;
1169}
1170
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001171void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001172{
1173 struct l2cap_conn *conn = chan->conn;
1174 struct l2cap_conn_req req;
1175
1176 req.scid = cpu_to_le16(chan->scid);
1177 req.psm = chan->psm;
1178
1179 chan->ident = l2cap_get_ident(conn);
1180
1181 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1182
1183 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1184}
1185
Mat Martineau8eb200b2012-10-23 15:24:17 -07001186static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1187{
1188 struct l2cap_create_chan_req req;
1189 req.scid = cpu_to_le16(chan->scid);
1190 req.psm = chan->psm;
1191 req.amp_id = amp_id;
1192
1193 chan->ident = l2cap_get_ident(chan->conn);
1194
1195 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1196 sizeof(req), &req);
1197}
1198
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001199static void l2cap_move_setup(struct l2cap_chan *chan)
1200{
1201 struct sk_buff *skb;
1202
1203 BT_DBG("chan %p", chan);
1204
1205 if (chan->mode != L2CAP_MODE_ERTM)
1206 return;
1207
1208 __clear_retrans_timer(chan);
1209 __clear_monitor_timer(chan);
1210 __clear_ack_timer(chan);
1211
1212 chan->retry_count = 0;
1213 skb_queue_walk(&chan->tx_q, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +03001214 if (bt_cb(skb)->l2cap.retries)
1215 bt_cb(skb)->l2cap.retries = 1;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001216 else
1217 break;
1218 }
1219
1220 chan->expected_tx_seq = chan->buffer_seq;
1221
1222 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1223 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1224 l2cap_seq_list_clear(&chan->retrans_list);
1225 l2cap_seq_list_clear(&chan->srej_list);
1226 skb_queue_purge(&chan->srej_q);
1227
1228 chan->tx_state = L2CAP_TX_STATE_XMIT;
1229 chan->rx_state = L2CAP_RX_STATE_MOVE;
1230
1231 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1232}
1233
Mat Martineau5f3847a2012-10-23 15:24:12 -07001234static void l2cap_move_done(struct l2cap_chan *chan)
1235{
1236 u8 move_role = chan->move_role;
1237 BT_DBG("chan %p", chan);
1238
1239 chan->move_state = L2CAP_MOVE_STABLE;
1240 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1241
1242 if (chan->mode != L2CAP_MODE_ERTM)
1243 return;
1244
1245 switch (move_role) {
1246 case L2CAP_MOVE_ROLE_INITIATOR:
1247 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1248 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1249 break;
1250 case L2CAP_MOVE_ROLE_RESPONDER:
1251 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1252 break;
1253 }
1254}
1255
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001256static void l2cap_chan_ready(struct l2cap_chan *chan)
1257{
Johan Hedberg315917e2015-02-16 11:42:11 +02001258 /* The channel may have already been flagged as connected in
1259 * case of receiving data before the L2CAP info req/rsp
1260 * procedure is complete.
1261 */
1262 if (chan->state == BT_CONNECTED)
1263 return;
1264
Mat Martineau28270112012-05-17 21:14:09 -07001265 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001266 chan->conf_state = 0;
1267 __clear_chan_timer(chan);
1268
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001269 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1270 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001271
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001272 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001273
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001274 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001275}
1276
Johan Hedbergf1496de2013-05-13 14:15:56 +03001277static void l2cap_le_connect(struct l2cap_chan *chan)
1278{
1279 struct l2cap_conn *conn = chan->conn;
1280 struct l2cap_le_conn_req req;
1281
Johan Hedberg595177f2013-12-02 22:12:22 +02001282 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1283 return;
1284
Johan Hedbergf1496de2013-05-13 14:15:56 +03001285 req.psm = chan->psm;
1286 req.scid = cpu_to_le16(chan->scid);
1287 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001288 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001289 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001290
1291 chan->ident = l2cap_get_ident(conn);
1292
1293 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1294 sizeof(req), &req);
1295}
1296
1297static void l2cap_le_start(struct l2cap_chan *chan)
1298{
1299 struct l2cap_conn *conn = chan->conn;
1300
1301 if (!smp_conn_security(conn->hcon, chan->sec_level))
1302 return;
1303
1304 if (!chan->psm) {
1305 l2cap_chan_ready(chan);
1306 return;
1307 }
1308
1309 if (chan->state == BT_CONNECT)
1310 l2cap_le_connect(chan);
1311}
1312
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001313static void l2cap_start_connection(struct l2cap_chan *chan)
1314{
1315 if (__amp_capable(chan)) {
1316 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1317 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001318 } else if (chan->conn->hcon->type == LE_LINK) {
1319 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001320 } else {
1321 l2cap_send_conn_req(chan);
1322 }
1323}
1324
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001325static void l2cap_request_info(struct l2cap_conn *conn)
1326{
1327 struct l2cap_info_req req;
1328
1329 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1330 return;
1331
1332 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1333
1334 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1335 conn->info_ident = l2cap_get_ident(conn);
1336
1337 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1338
1339 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1340 sizeof(req), &req);
1341}
1342
Marcel Holtmann68d1e282019-06-22 15:47:01 +02001343static bool l2cap_check_enc_key_size(struct hci_conn *hcon)
1344{
1345 /* The minimum encryption key size needs to be enforced by the
1346 * host stack before establishing any L2CAP connections. The
1347 * specification in theory allows a minimum of 1, but to align
1348 * BR/EDR and LE transports, a minimum of 7 is chosen.
1349 *
1350 * This check might also be called for unencrypted connections
1351 * that have no key size requirements. Ensure that the link is
1352 * actually encrypted before enforcing a key size.
1353 */
1354 return (!test_bit(HCI_CONN_ENCRYPT, &hcon->flags) ||
Matias Karhumaaf4705572019-07-02 16:35:09 +02001355 hcon->enc_key_size >= HCI_MIN_ENC_KEY_SIZE);
Marcel Holtmann68d1e282019-06-22 15:47:01 +02001356}
1357
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001358static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001359{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001360 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001361
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001362 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001363 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001364 return;
1365 }
1366
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001367 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1368 l2cap_request_info(conn);
1369 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001370 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001371
1372 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1373 return;
1374
Marcel Holtmann68d1e282019-06-22 15:47:01 +02001375 if (!l2cap_chan_check_security(chan, true) ||
1376 !__l2cap_no_conn_pending(chan))
1377 return;
1378
1379 if (l2cap_check_enc_key_size(conn->hcon))
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001380 l2cap_start_connection(chan);
Marcel Holtmann68d1e282019-06-22 15:47:01 +02001381 else
1382 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001383}
1384
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001385static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1386{
1387 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001388 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001389 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1390
1391 switch (mode) {
1392 case L2CAP_MODE_ERTM:
1393 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1394 case L2CAP_MODE_STREAMING:
1395 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1396 default:
1397 return 0x00;
1398 }
1399}
1400
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001401static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001402{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001403 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001404 struct l2cap_disconn_req req;
1405
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001406 if (!conn)
1407 return;
1408
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001409 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001410 __clear_retrans_timer(chan);
1411 __clear_monitor_timer(chan);
1412 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001413 }
1414
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001415 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001416 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001417 return;
1418 }
1419
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001420 req.dcid = cpu_to_le16(chan->dcid);
1421 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001422 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1423 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001424
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001425 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001426}
1427
Linus Torvalds1da177e2005-04-16 15:20:36 -07001428/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001429static void l2cap_conn_start(struct l2cap_conn *conn)
1430{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001431 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001432
1433 BT_DBG("conn %p", conn);
1434
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001435 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001436
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001437 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001438 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001439
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001440 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001441 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001442 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001443 continue;
1444 }
1445
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001446 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001447 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001448 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001449 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001450 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001451 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001452
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001453 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001454 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001455 &chan->conf_state)) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001456 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001457 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001458 continue;
1459 }
1460
Marcel Holtmann68d1e282019-06-22 15:47:01 +02001461 if (l2cap_check_enc_key_size(conn->hcon))
1462 l2cap_start_connection(chan);
1463 else
1464 l2cap_chan_close(chan, ECONNREFUSED);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001465
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001466 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001467 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001468 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001469 rsp.scid = cpu_to_le16(chan->dcid);
1470 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001471
Johan Hedberge7cafc42014-07-17 15:35:38 +03001472 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001473 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001474 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1475 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001476 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001477
1478 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001479 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001480 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1481 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001482 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001483 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001484 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1485 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001486 }
1487
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001488 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001489 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001490
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001491 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001492 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001493 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001494 continue;
1495 }
1496
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001497 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001498 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02001499 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001500 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001501 }
1502
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001503 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001504 }
1505
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001506 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001507}
1508
Ville Tervob62f3282011-02-10 22:38:50 -03001509static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1510{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001511 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001512 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001513
Johan Hedberge760ec12014-08-07 22:56:47 +03001514 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001515
Johan Hedberge760ec12014-08-07 22:56:47 +03001516 /* For outgoing pairing which doesn't necessarily have an
1517 * associated socket (e.g. mgmt_pair_device).
1518 */
1519 if (hcon->out)
1520 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001521
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001522 /* For LE slave connections, make sure the connection interval
1523 * is in the range of the minium and maximum interval that has
1524 * been configured for this connection. If not, then trigger
1525 * the connection update procedure.
1526 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001527 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001528 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1529 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1530 struct l2cap_conn_param_update_req req;
1531
1532 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1533 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1534 req.latency = cpu_to_le16(hcon->le_conn_latency);
1535 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1536
1537 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1538 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1539 }
Ville Tervob62f3282011-02-10 22:38:50 -03001540}
1541
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001542static void l2cap_conn_ready(struct l2cap_conn *conn)
1543{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001544 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001545 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001546
1547 BT_DBG("conn %p", conn);
1548
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001549 if (hcon->type == ACL_LINK)
1550 l2cap_request_info(conn);
1551
Johan Hedberge760ec12014-08-07 22:56:47 +03001552 mutex_lock(&conn->chan_lock);
1553
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001554 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001555
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001556 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001557
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001558 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001559 l2cap_chan_unlock(chan);
1560 continue;
1561 }
1562
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001563 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001564 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001565 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001566 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1567 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001568 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001569 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001570 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001571
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001572 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001573 }
1574
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001575 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001576
Johan Hedberg79a05722014-08-08 09:28:04 +03001577 if (hcon->type == LE_LINK)
1578 l2cap_le_conn_ready(conn);
1579
Johan Hedberg61a939c2014-01-17 20:45:11 +02001580 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001581}
1582
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001583/* Notify sockets that we cannot guaranty reliability anymore */
1584static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1585{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001586 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001587
1588 BT_DBG("conn %p", conn);
1589
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001590 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001591
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001592 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001593 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001594 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001595 }
1596
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001597 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001598}
1599
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001600static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001601{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001602 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001603 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001604
Marcel Holtmann984947d2009-02-06 23:35:19 +01001605 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001606 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001607
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001608 l2cap_conn_start(conn);
1609}
1610
David Herrmann2c8e1412013-04-06 20:28:45 +02001611/*
1612 * l2cap_user
1613 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1614 * callback is called during registration. The ->remove callback is called
1615 * during unregistration.
1616 * An l2cap_user object can either be explicitly unregistered or when the
1617 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1618 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1619 * External modules must own a reference to the l2cap_conn object if they intend
1620 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1621 * any time if they don't.
1622 */
1623
1624int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1625{
1626 struct hci_dev *hdev = conn->hcon->hdev;
1627 int ret;
1628
1629 /* We need to check whether l2cap_conn is registered. If it is not, we
1630 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1631 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1632 * relies on the parent hci_conn object to be locked. This itself relies
1633 * on the hci_dev object to be locked. So we must lock the hci device
1634 * here, too. */
1635
1636 hci_dev_lock(hdev);
1637
Alexey Dobriyan835a6a22015-06-10 20:28:33 +03001638 if (!list_empty(&user->list)) {
David Herrmann2c8e1412013-04-06 20:28:45 +02001639 ret = -EINVAL;
1640 goto out_unlock;
1641 }
1642
1643 /* conn->hchan is NULL after l2cap_conn_del() was called */
1644 if (!conn->hchan) {
1645 ret = -ENODEV;
1646 goto out_unlock;
1647 }
1648
1649 ret = user->probe(conn, user);
1650 if (ret)
1651 goto out_unlock;
1652
1653 list_add(&user->list, &conn->users);
1654 ret = 0;
1655
1656out_unlock:
1657 hci_dev_unlock(hdev);
1658 return ret;
1659}
1660EXPORT_SYMBOL(l2cap_register_user);
1661
1662void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1663{
1664 struct hci_dev *hdev = conn->hcon->hdev;
1665
1666 hci_dev_lock(hdev);
1667
Alexey Dobriyan835a6a22015-06-10 20:28:33 +03001668 if (list_empty(&user->list))
David Herrmann2c8e1412013-04-06 20:28:45 +02001669 goto out_unlock;
1670
Tedd Ho-Jeong Anab944c82015-06-30 11:43:40 -07001671 list_del_init(&user->list);
David Herrmann2c8e1412013-04-06 20:28:45 +02001672 user->remove(conn, user);
1673
1674out_unlock:
1675 hci_dev_unlock(hdev);
1676}
1677EXPORT_SYMBOL(l2cap_unregister_user);
1678
1679static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1680{
1681 struct l2cap_user *user;
1682
1683 while (!list_empty(&conn->users)) {
1684 user = list_first_entry(&conn->users, struct l2cap_user, list);
Tedd Ho-Jeong Anab944c82015-06-30 11:43:40 -07001685 list_del_init(&user->list);
David Herrmann2c8e1412013-04-06 20:28:45 +02001686 user->remove(conn, user);
1687 }
1688}
1689
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001690static void l2cap_conn_del(struct hci_conn *hcon, int err)
1691{
1692 struct l2cap_conn *conn = hcon->l2cap_data;
1693 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001694
1695 if (!conn)
1696 return;
1697
1698 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1699
1700 kfree_skb(conn->rx_skb);
1701
Johan Hedberg61a939c2014-01-17 20:45:11 +02001702 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001703
1704 /* We can not call flush_work(&conn->pending_rx_work) here since we
1705 * might block if we are running on a worker from the same workqueue
1706 * pending_rx_work is waiting on.
1707 */
1708 if (work_pending(&conn->pending_rx_work))
1709 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001710
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001711 if (work_pending(&conn->id_addr_update_work))
1712 cancel_work_sync(&conn->id_addr_update_work);
1713
David Herrmann2c8e1412013-04-06 20:28:45 +02001714 l2cap_unregister_all_users(conn);
1715
Johan Hedberge31fb862014-08-18 20:33:28 +03001716 /* Force the connection to be immediately dropped */
1717 hcon->disc_timeout = 0;
1718
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001719 mutex_lock(&conn->chan_lock);
1720
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001721 /* Kill channels */
1722 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001723 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001724 l2cap_chan_lock(chan);
1725
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001726 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001727
1728 l2cap_chan_unlock(chan);
1729
Gustavo Padovan80b98022012-05-27 22:27:51 -03001730 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001731 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001732 }
1733
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001734 mutex_unlock(&conn->chan_lock);
1735
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001736 hci_chan_del(conn->hchan);
1737
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001738 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001739 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001740
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001741 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001742 conn->hchan = NULL;
1743 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001744}
1745
David Herrmann9c903e32013-04-06 20:28:44 +02001746static void l2cap_conn_free(struct kref *ref)
1747{
1748 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1749
1750 hci_conn_put(conn->hcon);
1751 kfree(conn);
1752}
1753
Johan Hedberg51bb84572014-08-15 21:06:57 +03001754struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001755{
1756 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001757 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001758}
1759EXPORT_SYMBOL(l2cap_conn_get);
1760
1761void l2cap_conn_put(struct l2cap_conn *conn)
1762{
1763 kref_put(&conn->ref, l2cap_conn_free);
1764}
1765EXPORT_SYMBOL(l2cap_conn_put);
1766
Linus Torvalds1da177e2005-04-16 15:20:36 -07001767/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768
Ido Yarivc2287682012-04-20 15:46:07 -03001769/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770 * Returns closest match.
1771 */
Ido Yarivc2287682012-04-20 15:46:07 -03001772static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1773 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001774 bdaddr_t *dst,
1775 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001777 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001779 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001780
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001781 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001782 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001783 continue;
1784
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001785 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1786 continue;
1787
1788 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1789 continue;
1790
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001791 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001792 int src_match, dst_match;
1793 int src_any, dst_any;
1794
Linus Torvalds1da177e2005-04-16 15:20:36 -07001795 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001796 src_match = !bacmp(&c->src, src);
1797 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001798 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001799 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001800 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001801 return c;
1802 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803
1804 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001805 src_any = !bacmp(&c->src, BDADDR_ANY);
1806 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001807 if ((src_match && dst_any) || (src_any && dst_match) ||
1808 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001809 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810 }
1811 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001812
Johan Hedberga24cce12014-08-07 22:56:42 +03001813 if (c1)
1814 l2cap_chan_hold(c1);
1815
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001816 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001817
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001818 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001819}
1820
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001821static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001822{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001823 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001824 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001825
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001826 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001827
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001828 l2cap_chan_lock(chan);
1829
Mat Martineau80909e02012-05-17 20:53:50 -07001830 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001831 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001832 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001833 return;
1834 }
1835
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001836 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001837
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001838 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001839 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001840}
1841
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001842static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001843{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001844 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001845 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001846
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001847 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001848
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001849 l2cap_chan_lock(chan);
1850
Mat Martineau80909e02012-05-17 20:53:50 -07001851 if (!chan->conn) {
1852 l2cap_chan_unlock(chan);
1853 l2cap_chan_put(chan);
1854 return;
1855 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001856
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001857 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001858 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001859 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001860}
1861
Gustavo Padovand6603662012-05-21 13:58:22 -03001862static void l2cap_streaming_send(struct l2cap_chan *chan,
1863 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001864{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001865 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001866 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001867
Mat Martineau37339372012-05-17 20:53:33 -07001868 BT_DBG("chan %p, skbs %p", chan, skbs);
1869
Mat Martineaub99e13a2012-10-23 15:24:19 -07001870 if (__chan_is_moving(chan))
1871 return;
1872
Mat Martineau37339372012-05-17 20:53:33 -07001873 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1874
1875 while (!skb_queue_empty(&chan->tx_q)) {
1876
1877 skb = skb_dequeue(&chan->tx_q);
1878
Johan Hedberga4368ff2015-03-30 23:21:01 +03001879 bt_cb(skb)->l2cap.retries = 1;
1880 control = &bt_cb(skb)->l2cap;
Mat Martineau37339372012-05-17 20:53:33 -07001881
1882 control->reqseq = 0;
1883 control->txseq = chan->next_tx_seq;
1884
1885 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001886
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001887 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001888 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1889 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001890 }
1891
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001892 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001893
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001894 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001895
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001896 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001897 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001898 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001899}
1900
Szymon Janc67c9e842011-07-28 16:24:33 +02001901static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001902{
1903 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001904 struct l2cap_ctrl *control;
1905 int sent = 0;
1906
1907 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001908
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03001909 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001910 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001911
Mat Martineau94122bb2012-05-02 09:42:02 -07001912 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1913 return 0;
1914
Mat Martineaub99e13a2012-10-23 15:24:19 -07001915 if (__chan_is_moving(chan))
1916 return 0;
1917
Mat Martineau18a48e72012-05-17 20:53:34 -07001918 while (chan->tx_send_head &&
1919 chan->unacked_frames < chan->remote_tx_win &&
1920 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001921
Mat Martineau18a48e72012-05-17 20:53:34 -07001922 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001923
Johan Hedberga4368ff2015-03-30 23:21:01 +03001924 bt_cb(skb)->l2cap.retries = 1;
1925 control = &bt_cb(skb)->l2cap;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001926
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001927 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001928 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001929
Mat Martineau18a48e72012-05-17 20:53:34 -07001930 control->reqseq = chan->buffer_seq;
1931 chan->last_acked_seq = chan->buffer_seq;
1932 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001933
Mat Martineau18a48e72012-05-17 20:53:34 -07001934 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001935
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001936 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001937 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1938 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001939 }
1940
Mat Martineau18a48e72012-05-17 20:53:34 -07001941 /* Clone after data has been modified. Data is assumed to be
1942 read-only (for locking purposes) on cloned sk_buffs.
1943 */
1944 tx_skb = skb_clone(skb, GFP_KERNEL);
1945
1946 if (!tx_skb)
1947 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001948
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001949 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001950
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001951 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001952 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001953 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001954 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001955
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001956 if (skb_queue_is_last(&chan->tx_q, skb))
1957 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001958 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001959 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001960
1961 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001962 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001963 }
1964
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001965 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1966 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001967
1968 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001969}
1970
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001971static void l2cap_ertm_resend(struct l2cap_chan *chan)
1972{
1973 struct l2cap_ctrl control;
1974 struct sk_buff *skb;
1975 struct sk_buff *tx_skb;
1976 u16 seq;
1977
1978 BT_DBG("chan %p", chan);
1979
1980 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1981 return;
1982
Mat Martineaub99e13a2012-10-23 15:24:19 -07001983 if (__chan_is_moving(chan))
1984 return;
1985
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001986 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1987 seq = l2cap_seq_list_pop(&chan->retrans_list);
1988
1989 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1990 if (!skb) {
1991 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001992 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001993 continue;
1994 }
1995
Johan Hedberga4368ff2015-03-30 23:21:01 +03001996 bt_cb(skb)->l2cap.retries++;
1997 control = bt_cb(skb)->l2cap;
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001998
1999 if (chan->max_tx != 0 &&
Johan Hedberga4368ff2015-03-30 23:21:01 +03002000 bt_cb(skb)->l2cap.retries > chan->max_tx) {
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002001 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002002 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002003 l2cap_seq_list_clear(&chan->retrans_list);
2004 break;
2005 }
2006
2007 control.reqseq = chan->buffer_seq;
2008 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
2009 control.final = 1;
2010 else
2011 control.final = 0;
2012
2013 if (skb_cloned(skb)) {
2014 /* Cloned sk_buffs are read-only, so we need a
2015 * writeable copy
2016 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002017 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002018 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002019 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002020 }
2021
2022 if (!tx_skb) {
2023 l2cap_seq_list_clear(&chan->retrans_list);
2024 break;
2025 }
2026
2027 /* Update skb contents */
2028 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
2029 put_unaligned_le32(__pack_extended_control(&control),
2030 tx_skb->data + L2CAP_HDR_SIZE);
2031 } else {
2032 put_unaligned_le16(__pack_enhanced_control(&control),
2033 tx_skb->data + L2CAP_HDR_SIZE);
2034 }
2035
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02002036 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002037 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02002038 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2039 tx_skb->len - L2CAP_FCS_SIZE);
2040 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2041 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002042 }
2043
2044 l2cap_do_send(chan, tx_skb);
2045
2046 BT_DBG("Resent txseq %d", control.txseq);
2047
2048 chan->last_acked_seq = chan->buffer_seq;
2049 }
2050}
2051
Mat Martineauf80842a2012-05-17 20:53:46 -07002052static void l2cap_retransmit(struct l2cap_chan *chan,
2053 struct l2cap_ctrl *control)
2054{
2055 BT_DBG("chan %p, control %p", chan, control);
2056
2057 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2058 l2cap_ertm_resend(chan);
2059}
2060
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002061static void l2cap_retransmit_all(struct l2cap_chan *chan,
2062 struct l2cap_ctrl *control)
2063{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002064 struct sk_buff *skb;
2065
2066 BT_DBG("chan %p, control %p", chan, control);
2067
2068 if (control->poll)
2069 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2070
2071 l2cap_seq_list_clear(&chan->retrans_list);
2072
2073 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2074 return;
2075
2076 if (chan->unacked_frames) {
2077 skb_queue_walk(&chan->tx_q, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +03002078 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002079 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002080 break;
2081 }
2082
2083 skb_queue_walk_from(&chan->tx_q, skb) {
2084 if (skb == chan->tx_send_head)
2085 break;
2086
2087 l2cap_seq_list_append(&chan->retrans_list,
Johan Hedberga4368ff2015-03-30 23:21:01 +03002088 bt_cb(skb)->l2cap.txseq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002089 }
2090
2091 l2cap_ertm_resend(chan);
2092 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002093}
2094
Szymon Jancb17e73b2012-01-11 10:59:47 +01002095static void l2cap_send_ack(struct l2cap_chan *chan)
2096{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002097 struct l2cap_ctrl control;
2098 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2099 chan->last_acked_seq);
2100 int threshold;
2101
2102 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2103 chan, chan->last_acked_seq, chan->buffer_seq);
2104
2105 memset(&control, 0, sizeof(control));
2106 control.sframe = 1;
2107
2108 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2109 chan->rx_state == L2CAP_RX_STATE_RECV) {
2110 __clear_ack_timer(chan);
2111 control.super = L2CAP_SUPER_RNR;
2112 control.reqseq = chan->buffer_seq;
2113 l2cap_send_sframe(chan, &control);
2114 } else {
2115 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2116 l2cap_ertm_send(chan);
2117 /* If any i-frames were sent, they included an ack */
2118 if (chan->buffer_seq == chan->last_acked_seq)
2119 frames_to_ack = 0;
2120 }
2121
Mat Martineauc20f8e32012-07-10 05:47:07 -07002122 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002123 * Calculate without mul or div
2124 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002125 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002126 threshold += threshold << 1;
2127 threshold >>= 2;
2128
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002129 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002130 threshold);
2131
2132 if (frames_to_ack >= threshold) {
2133 __clear_ack_timer(chan);
2134 control.super = L2CAP_SUPER_RR;
2135 control.reqseq = chan->buffer_seq;
2136 l2cap_send_sframe(chan, &control);
2137 frames_to_ack = 0;
2138 }
2139
2140 if (frames_to_ack)
2141 __set_ack_timer(chan);
2142 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002143}
2144
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002145static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2146 struct msghdr *msg, int len,
2147 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002148{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002149 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002150 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002151 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002152
Al Viro17836392014-11-24 17:07:38 -05002153 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002154 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002155
2156 sent += count;
2157 len -= count;
2158
2159 /* Continuation fragments (no L2CAP header) */
2160 frag = &skb_shinfo(skb)->frag_list;
2161 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002162 struct sk_buff *tmp;
2163
Linus Torvalds1da177e2005-04-16 15:20:36 -07002164 count = min_t(unsigned int, conn->mtu, len);
2165
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002166 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002167 msg->msg_flags & MSG_DONTWAIT);
2168 if (IS_ERR(tmp))
2169 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002170
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002171 *frag = tmp;
2172
Al Viro17836392014-11-24 17:07:38 -05002173 if (copy_from_iter(skb_put(*frag, count), count,
2174 &msg->msg_iter) != count)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002175 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002176
2177 sent += count;
2178 len -= count;
2179
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002180 skb->len += (*frag)->len;
2181 skb->data_len += (*frag)->len;
2182
Linus Torvalds1da177e2005-04-16 15:20:36 -07002183 frag = &(*frag)->next;
2184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002185
2186 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002187}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002188
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002189static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002190 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002191{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002192 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002193 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002194 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002195 struct l2cap_hdr *lh;
2196
Marcel Holtmann8d463212014-06-05 15:22:51 +02002197 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2198 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002199
2200 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002201
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002202 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002203 msg->msg_flags & MSG_DONTWAIT);
2204 if (IS_ERR(skb))
2205 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002206
2207 /* Create L2CAP header */
2208 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002209 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002210 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002211 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002212
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002213 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002214 if (unlikely(err < 0)) {
2215 kfree_skb(skb);
2216 return ERR_PTR(err);
2217 }
2218 return skb;
2219}
2220
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002221static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002222 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002223{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002224 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002225 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002226 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002227 struct l2cap_hdr *lh;
2228
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002229 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002230
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002231 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002232
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002233 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002234 msg->msg_flags & MSG_DONTWAIT);
2235 if (IS_ERR(skb))
2236 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002237
2238 /* Create L2CAP header */
2239 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002240 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002241 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002242
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002243 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002244 if (unlikely(err < 0)) {
2245 kfree_skb(skb);
2246 return ERR_PTR(err);
2247 }
2248 return skb;
2249}
2250
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002251static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002252 struct msghdr *msg, size_t len,
2253 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002254{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002255 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002256 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002257 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002258 struct l2cap_hdr *lh;
2259
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002260 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002261
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002262 if (!conn)
2263 return ERR_PTR(-ENOTCONN);
2264
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002265 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002266
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002267 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002268 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002269
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002270 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002271 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002272
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002273 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002274
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002275 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002276 msg->msg_flags & MSG_DONTWAIT);
2277 if (IS_ERR(skb))
2278 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002279
2280 /* Create L2CAP header */
2281 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002282 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002283 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002284
Mat Martineau18a48e72012-05-17 20:53:34 -07002285 /* Control header is populated later */
2286 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2287 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2288 else
2289 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002290
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002291 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002292 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002293
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002294 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002295 if (unlikely(err < 0)) {
2296 kfree_skb(skb);
2297 return ERR_PTR(err);
2298 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002299
Johan Hedberga4368ff2015-03-30 23:21:01 +03002300 bt_cb(skb)->l2cap.fcs = chan->fcs;
2301 bt_cb(skb)->l2cap.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002302 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002303}
2304
Mat Martineau94122bb2012-05-02 09:42:02 -07002305static int l2cap_segment_sdu(struct l2cap_chan *chan,
2306 struct sk_buff_head *seg_queue,
2307 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002308{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002309 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002310 u16 sdu_len;
2311 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002312 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002313
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002314 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002315
Mat Martineau94122bb2012-05-02 09:42:02 -07002316 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2317 * so fragmented skbs are not used. The HCI layer's handling
2318 * of fragmented skbs is not compatible with ERTM's queueing.
2319 */
2320
2321 /* PDU size is derived from the HCI MTU */
2322 pdu_len = chan->conn->mtu;
2323
Mat Martineaua5495742012-10-23 15:24:21 -07002324 /* Constrain PDU size for BR/EDR connections */
2325 if (!chan->hs_hcon)
2326 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002327
2328 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002329 if (chan->fcs)
2330 pdu_len -= L2CAP_FCS_SIZE;
2331
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002332 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002333
2334 /* Remote device may have requested smaller PDUs */
2335 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2336
2337 if (len <= pdu_len) {
2338 sar = L2CAP_SAR_UNSEGMENTED;
2339 sdu_len = 0;
2340 pdu_len = len;
2341 } else {
2342 sar = L2CAP_SAR_START;
2343 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002344 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002345
2346 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002347 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002348
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002349 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002350 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002351 return PTR_ERR(skb);
2352 }
2353
Johan Hedberga4368ff2015-03-30 23:21:01 +03002354 bt_cb(skb)->l2cap.sar = sar;
Mat Martineau94122bb2012-05-02 09:42:02 -07002355 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002356
Mat Martineau94122bb2012-05-02 09:42:02 -07002357 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002358 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002359 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002360
2361 if (len <= pdu_len) {
2362 sar = L2CAP_SAR_END;
2363 pdu_len = len;
2364 } else {
2365 sar = L2CAP_SAR_CONTINUE;
2366 }
2367 }
2368
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002369 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002370}
2371
Johan Hedberg177f8f22013-05-31 17:54:51 +03002372static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2373 struct msghdr *msg,
2374 size_t len, u16 sdulen)
2375{
2376 struct l2cap_conn *conn = chan->conn;
2377 struct sk_buff *skb;
2378 int err, count, hlen;
2379 struct l2cap_hdr *lh;
2380
2381 BT_DBG("chan %p len %zu", chan, len);
2382
2383 if (!conn)
2384 return ERR_PTR(-ENOTCONN);
2385
2386 hlen = L2CAP_HDR_SIZE;
2387
2388 if (sdulen)
2389 hlen += L2CAP_SDULEN_SIZE;
2390
2391 count = min_t(unsigned int, (conn->mtu - hlen), len);
2392
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002393 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002394 msg->msg_flags & MSG_DONTWAIT);
2395 if (IS_ERR(skb))
2396 return skb;
2397
2398 /* Create L2CAP header */
2399 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2400 lh->cid = cpu_to_le16(chan->dcid);
2401 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2402
2403 if (sdulen)
2404 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2405
2406 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2407 if (unlikely(err < 0)) {
2408 kfree_skb(skb);
2409 return ERR_PTR(err);
2410 }
2411
2412 return skb;
2413}
2414
2415static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2416 struct sk_buff_head *seg_queue,
2417 struct msghdr *msg, size_t len)
2418{
2419 struct sk_buff *skb;
2420 size_t pdu_len;
2421 u16 sdu_len;
2422
2423 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2424
Johan Hedberg177f8f22013-05-31 17:54:51 +03002425 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002426 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002427
2428 while (len > 0) {
2429 if (len <= pdu_len)
2430 pdu_len = len;
2431
2432 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2433 if (IS_ERR(skb)) {
2434 __skb_queue_purge(seg_queue);
2435 return PTR_ERR(skb);
2436 }
2437
2438 __skb_queue_tail(seg_queue, skb);
2439
2440 len -= pdu_len;
2441
2442 if (sdu_len) {
2443 sdu_len = 0;
2444 pdu_len += L2CAP_SDULEN_SIZE;
2445 }
2446 }
2447
2448 return 0;
2449}
2450
Marcel Holtmann8d463212014-06-05 15:22:51 +02002451int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002452{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002453 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002454 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002455 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002456
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002457 if (!chan->conn)
2458 return -ENOTCONN;
2459
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002460 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002461 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002462 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002463 if (IS_ERR(skb))
2464 return PTR_ERR(skb);
2465
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002466 /* Channel lock is released before requesting new skb and then
2467 * reacquired thus we need to recheck channel state.
2468 */
2469 if (chan->state != BT_CONNECTED) {
2470 kfree_skb(skb);
2471 return -ENOTCONN;
2472 }
2473
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002474 l2cap_do_send(chan, skb);
2475 return len;
2476 }
2477
2478 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002479 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002480 /* Check outgoing MTU */
2481 if (len > chan->omtu)
2482 return -EMSGSIZE;
2483
Johan Hedbergfad5fc892013-12-05 09:45:01 +02002484 if (!chan->tx_credits)
2485 return -EAGAIN;
2486
Johan Hedberg177f8f22013-05-31 17:54:51 +03002487 __skb_queue_head_init(&seg_queue);
2488
2489 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2490
2491 if (chan->state != BT_CONNECTED) {
2492 __skb_queue_purge(&seg_queue);
2493 err = -ENOTCONN;
2494 }
2495
2496 if (err)
2497 return err;
2498
2499 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2500
2501 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2502 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2503 chan->tx_credits--;
2504 }
2505
2506 if (!chan->tx_credits)
2507 chan->ops->suspend(chan);
2508
2509 err = len;
2510
2511 break;
2512
Johan Hedbergfad5fc892013-12-05 09:45:01 +02002513 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002514 /* Check outgoing MTU */
2515 if (len > chan->omtu)
2516 return -EMSGSIZE;
2517
2518 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002519 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002520 if (IS_ERR(skb))
2521 return PTR_ERR(skb);
2522
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002523 /* Channel lock is released before requesting new skb and then
2524 * reacquired thus we need to recheck channel state.
2525 */
2526 if (chan->state != BT_CONNECTED) {
2527 kfree_skb(skb);
2528 return -ENOTCONN;
2529 }
2530
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002531 l2cap_do_send(chan, skb);
2532 err = len;
2533 break;
2534
2535 case L2CAP_MODE_ERTM:
2536 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002537 /* Check outgoing MTU */
2538 if (len > chan->omtu) {
2539 err = -EMSGSIZE;
2540 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002541 }
2542
Mat Martineau94122bb2012-05-02 09:42:02 -07002543 __skb_queue_head_init(&seg_queue);
2544
2545 /* Do segmentation before calling in to the state machine,
2546 * since it's possible to block while waiting for memory
2547 * allocation.
2548 */
2549 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2550
2551 /* The channel could have been closed while segmenting,
2552 * check that it is still connected.
2553 */
2554 if (chan->state != BT_CONNECTED) {
2555 __skb_queue_purge(&seg_queue);
2556 err = -ENOTCONN;
2557 }
2558
2559 if (err)
2560 break;
2561
Mat Martineau37339372012-05-17 20:53:33 -07002562 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002563 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002564 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002565 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002566
Gustavo Padovand6603662012-05-21 13:58:22 -03002567 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002568
Mat Martineau94122bb2012-05-02 09:42:02 -07002569 /* If the skbs were not queued for sending, they'll still be in
2570 * seg_queue and need to be purged.
2571 */
2572 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002573 break;
2574
2575 default:
2576 BT_DBG("bad state %1.1x", chan->mode);
2577 err = -EBADFD;
2578 }
2579
2580 return err;
2581}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002582EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002583
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002584static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2585{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002586 struct l2cap_ctrl control;
2587 u16 seq;
2588
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002589 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002590
2591 memset(&control, 0, sizeof(control));
2592 control.sframe = 1;
2593 control.super = L2CAP_SUPER_SREJ;
2594
2595 for (seq = chan->expected_tx_seq; seq != txseq;
2596 seq = __next_seq(chan, seq)) {
2597 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2598 control.reqseq = seq;
2599 l2cap_send_sframe(chan, &control);
2600 l2cap_seq_list_append(&chan->srej_list, seq);
2601 }
2602 }
2603
2604 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002605}
2606
2607static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2608{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002609 struct l2cap_ctrl control;
2610
2611 BT_DBG("chan %p", chan);
2612
2613 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2614 return;
2615
2616 memset(&control, 0, sizeof(control));
2617 control.sframe = 1;
2618 control.super = L2CAP_SUPER_SREJ;
2619 control.reqseq = chan->srej_list.tail;
2620 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002621}
2622
2623static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2624{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002625 struct l2cap_ctrl control;
2626 u16 initial_head;
2627 u16 seq;
2628
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002629 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002630
2631 memset(&control, 0, sizeof(control));
2632 control.sframe = 1;
2633 control.super = L2CAP_SUPER_SREJ;
2634
2635 /* Capture initial list head to allow only one pass through the list. */
2636 initial_head = chan->srej_list.head;
2637
2638 do {
2639 seq = l2cap_seq_list_pop(&chan->srej_list);
2640 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2641 break;
2642
2643 control.reqseq = seq;
2644 l2cap_send_sframe(chan, &control);
2645 l2cap_seq_list_append(&chan->srej_list, seq);
2646 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002647}
2648
Mat Martineau608bcc62012-05-17 20:53:32 -07002649static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2650{
2651 struct sk_buff *acked_skb;
2652 u16 ackseq;
2653
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002654 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002655
2656 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2657 return;
2658
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002659 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002660 chan->expected_ack_seq, chan->unacked_frames);
2661
2662 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2663 ackseq = __next_seq(chan, ackseq)) {
2664
2665 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2666 if (acked_skb) {
2667 skb_unlink(acked_skb, &chan->tx_q);
2668 kfree_skb(acked_skb);
2669 chan->unacked_frames--;
2670 }
2671 }
2672
2673 chan->expected_ack_seq = reqseq;
2674
2675 if (chan->unacked_frames == 0)
2676 __clear_retrans_timer(chan);
2677
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002678 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002679}
2680
2681static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2682{
2683 BT_DBG("chan %p", chan);
2684
2685 chan->expected_tx_seq = chan->buffer_seq;
2686 l2cap_seq_list_clear(&chan->srej_list);
2687 skb_queue_purge(&chan->srej_q);
2688 chan->rx_state = L2CAP_RX_STATE_RECV;
2689}
2690
Gustavo Padovand6603662012-05-21 13:58:22 -03002691static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2692 struct l2cap_ctrl *control,
2693 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002694{
Mat Martineau608bcc62012-05-17 20:53:32 -07002695 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2696 event);
2697
2698 switch (event) {
2699 case L2CAP_EV_DATA_REQUEST:
2700 if (chan->tx_send_head == NULL)
2701 chan->tx_send_head = skb_peek(skbs);
2702
2703 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2704 l2cap_ertm_send(chan);
2705 break;
2706 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2707 BT_DBG("Enter LOCAL_BUSY");
2708 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2709
2710 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2711 /* The SREJ_SENT state must be aborted if we are to
2712 * enter the LOCAL_BUSY state.
2713 */
2714 l2cap_abort_rx_srej_sent(chan);
2715 }
2716
2717 l2cap_send_ack(chan);
2718
2719 break;
2720 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2721 BT_DBG("Exit LOCAL_BUSY");
2722 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2723
2724 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2725 struct l2cap_ctrl local_control;
2726
2727 memset(&local_control, 0, sizeof(local_control));
2728 local_control.sframe = 1;
2729 local_control.super = L2CAP_SUPER_RR;
2730 local_control.poll = 1;
2731 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002732 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002733
2734 chan->retry_count = 1;
2735 __set_monitor_timer(chan);
2736 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2737 }
2738 break;
2739 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2740 l2cap_process_reqseq(chan, control->reqseq);
2741 break;
2742 case L2CAP_EV_EXPLICIT_POLL:
2743 l2cap_send_rr_or_rnr(chan, 1);
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 __clear_ack_timer(chan);
2747 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2748 break;
2749 case L2CAP_EV_RETRANS_TO:
2750 l2cap_send_rr_or_rnr(chan, 1);
2751 chan->retry_count = 1;
2752 __set_monitor_timer(chan);
2753 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2754 break;
2755 case L2CAP_EV_RECV_FBIT:
2756 /* Nothing to process */
2757 break;
2758 default:
2759 break;
2760 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002761}
2762
Gustavo Padovand6603662012-05-21 13:58:22 -03002763static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2764 struct l2cap_ctrl *control,
2765 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002766{
Mat Martineau608bcc62012-05-17 20:53:32 -07002767 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2768 event);
2769
2770 switch (event) {
2771 case L2CAP_EV_DATA_REQUEST:
2772 if (chan->tx_send_head == NULL)
2773 chan->tx_send_head = skb_peek(skbs);
2774 /* Queue data, but don't send. */
2775 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2776 break;
2777 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2778 BT_DBG("Enter LOCAL_BUSY");
2779 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2780
2781 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2782 /* The SREJ_SENT state must be aborted if we are to
2783 * enter the LOCAL_BUSY state.
2784 */
2785 l2cap_abort_rx_srej_sent(chan);
2786 }
2787
2788 l2cap_send_ack(chan);
2789
2790 break;
2791 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2792 BT_DBG("Exit LOCAL_BUSY");
2793 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2794
2795 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2796 struct l2cap_ctrl local_control;
2797 memset(&local_control, 0, sizeof(local_control));
2798 local_control.sframe = 1;
2799 local_control.super = L2CAP_SUPER_RR;
2800 local_control.poll = 1;
2801 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002802 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002803
2804 chan->retry_count = 1;
2805 __set_monitor_timer(chan);
2806 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2807 }
2808 break;
2809 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2810 l2cap_process_reqseq(chan, control->reqseq);
2811
2812 /* Fall through */
2813
2814 case L2CAP_EV_RECV_FBIT:
2815 if (control && control->final) {
2816 __clear_monitor_timer(chan);
2817 if (chan->unacked_frames > 0)
2818 __set_retrans_timer(chan);
2819 chan->retry_count = 0;
2820 chan->tx_state = L2CAP_TX_STATE_XMIT;
2821 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2822 }
2823 break;
2824 case L2CAP_EV_EXPLICIT_POLL:
2825 /* Ignore */
2826 break;
2827 case L2CAP_EV_MONITOR_TO:
2828 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2829 l2cap_send_rr_or_rnr(chan, 1);
2830 __set_monitor_timer(chan);
2831 chan->retry_count++;
2832 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002833 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002834 }
2835 break;
2836 default:
2837 break;
2838 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002839}
2840
Gustavo Padovand6603662012-05-21 13:58:22 -03002841static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2842 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002843{
Mat Martineau608bcc62012-05-17 20:53:32 -07002844 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2845 chan, control, skbs, event, chan->tx_state);
2846
2847 switch (chan->tx_state) {
2848 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002849 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002850 break;
2851 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002852 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002853 break;
2854 default:
2855 /* Ignore event */
2856 break;
2857 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002858}
2859
Mat Martineau4b51dae92012-05-17 20:53:37 -07002860static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2861 struct l2cap_ctrl *control)
2862{
2863 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002864 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002865}
2866
Mat Martineauf80842a2012-05-17 20:53:46 -07002867static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2868 struct l2cap_ctrl *control)
2869{
2870 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002871 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002872}
2873
Linus Torvalds1da177e2005-04-16 15:20:36 -07002874/* Copy frame to all raw sockets on that connection */
2875static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2876{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002877 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002878 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
2880 BT_DBG("conn %p", conn);
2881
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002882 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002883
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002884 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002885 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002886 continue;
2887
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002888 /* Don't send frame to the channel it came from */
Johan Hedberga4368ff2015-03-30 23:21:01 +03002889 if (bt_cb(skb)->l2cap.chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002890 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002891
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002892 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002893 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002894 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002895 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002896 kfree_skb(nskb);
2897 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002898
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002899 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002900}
2901
2902/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002903static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2904 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002905{
2906 struct sk_buff *skb, **frag;
2907 struct l2cap_cmd_hdr *cmd;
2908 struct l2cap_hdr *lh;
2909 int len, count;
2910
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002911 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2912 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002913
Anderson Lizardo300b9622013-06-02 16:30:40 -04002914 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2915 return NULL;
2916
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2918 count = min_t(unsigned int, conn->mtu, len);
2919
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002920 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002921 if (!skb)
2922 return NULL;
2923
2924 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002925 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002926
2927 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002928 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002929 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002930 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002931
2932 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2933 cmd->code = code;
2934 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002935 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002936
2937 if (dlen) {
2938 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2939 memcpy(skb_put(skb, count), data, count);
2940 data += count;
2941 }
2942
2943 len -= skb->len;
2944
2945 /* Continuation fragments (no L2CAP header) */
2946 frag = &skb_shinfo(skb)->frag_list;
2947 while (len) {
2948 count = min_t(unsigned int, conn->mtu, len);
2949
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002950 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002951 if (!*frag)
2952 goto fail;
2953
2954 memcpy(skb_put(*frag, count), data, count);
2955
2956 len -= count;
2957 data += count;
2958
2959 frag = &(*frag)->next;
2960 }
2961
2962 return skb;
2963
2964fail:
2965 kfree_skb(skb);
2966 return NULL;
2967}
2968
Gustavo Padovan2d792812012-10-06 10:07:01 +01002969static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2970 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002971{
2972 struct l2cap_conf_opt *opt = *ptr;
2973 int len;
2974
2975 len = L2CAP_CONF_OPT_SIZE + opt->len;
2976 *ptr += len;
2977
2978 *type = opt->type;
2979 *olen = opt->len;
2980
2981 switch (opt->len) {
2982 case 1:
2983 *val = *((u8 *) opt->val);
2984 break;
2985
2986 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002987 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002988 break;
2989
2990 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002991 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002992 break;
2993
2994 default:
2995 *val = (unsigned long) opt->val;
2996 break;
2997 }
2998
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002999 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000 return len;
3001}
3002
Ben Seri6300c8b2017-09-09 23:15:59 +02003003static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003004{
3005 struct l2cap_conf_opt *opt = *ptr;
3006
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03003007 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003008
Ben Seri6300c8b2017-09-09 23:15:59 +02003009 if (size < L2CAP_CONF_OPT_SIZE + len)
3010 return;
3011
Linus Torvalds1da177e2005-04-16 15:20:36 -07003012 opt->type = type;
3013 opt->len = len;
3014
3015 switch (len) {
3016 case 1:
3017 *((u8 *) opt->val) = val;
3018 break;
3019
3020 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02003021 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003022 break;
3023
3024 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02003025 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003026 break;
3027
3028 default:
3029 memcpy(opt->val, (void *) val, len);
3030 break;
3031 }
3032
3033 *ptr += L2CAP_CONF_OPT_SIZE + len;
3034}
3035
Ben Seri6300c8b2017-09-09 23:15:59 +02003036static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan, size_t size)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003037{
3038 struct l2cap_conf_efs efs;
3039
Szymon Janc1ec918c2011-11-16 09:32:21 +01003040 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003041 case L2CAP_MODE_ERTM:
3042 efs.id = chan->local_id;
3043 efs.stype = chan->local_stype;
3044 efs.msdu = cpu_to_le16(chan->local_msdu);
3045 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003046 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3047 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003048 break;
3049
3050 case L2CAP_MODE_STREAMING:
3051 efs.id = 1;
3052 efs.stype = L2CAP_SERV_BESTEFFORT;
3053 efs.msdu = cpu_to_le16(chan->local_msdu);
3054 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3055 efs.acc_lat = 0;
3056 efs.flush_to = 0;
3057 break;
3058
3059 default:
3060 return;
3061 }
3062
3063 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Ben Seri6300c8b2017-09-09 23:15:59 +02003064 (unsigned long) &efs, size);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003065}
3066
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003067static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003068{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003069 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003070 ack_timer.work);
3071 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003072
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003073 BT_DBG("chan %p", chan);
3074
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003075 l2cap_chan_lock(chan);
3076
Mat Martineau03625202012-05-17 20:53:51 -07003077 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3078 chan->last_acked_seq);
3079
3080 if (frames_to_ack)
3081 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003082
3083 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003084 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003085}
3086
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003087int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003088{
Mat Martineau3c588192012-04-11 10:48:42 -07003089 int err;
3090
Mat Martineau105bdf92012-04-27 16:50:48 -07003091 chan->next_tx_seq = 0;
3092 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003093 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003094 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003095 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003096 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003097 chan->last_acked_seq = 0;
3098 chan->sdu = NULL;
3099 chan->sdu_last_frag = NULL;
3100 chan->sdu_len = 0;
3101
Mat Martineaud34c34f2012-05-14 14:49:27 -07003102 skb_queue_head_init(&chan->tx_q);
3103
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003104 chan->local_amp_id = AMP_ID_BREDR;
3105 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003106 chan->move_state = L2CAP_MOVE_STABLE;
3107 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3108
Mat Martineau105bdf92012-04-27 16:50:48 -07003109 if (chan->mode != L2CAP_MODE_ERTM)
3110 return 0;
3111
3112 chan->rx_state = L2CAP_RX_STATE_RECV;
3113 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003114
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003115 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3116 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3117 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003118
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003119 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003120
Mat Martineau3c588192012-04-11 10:48:42 -07003121 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3122 if (err < 0)
3123 return err;
3124
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003125 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3126 if (err < 0)
3127 l2cap_seq_list_free(&chan->srej_list);
3128
3129 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003130}
3131
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003132static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3133{
3134 switch (mode) {
3135 case L2CAP_MODE_STREAMING:
3136 case L2CAP_MODE_ERTM:
3137 if (l2cap_mode_supported(mode, remote_feat_mask))
3138 return mode;
3139 /* fall through */
3140 default:
3141 return L2CAP_MODE_BASIC;
3142 }
3143}
3144
Marcel Holtmann848566b2013-10-01 22:59:22 -07003145static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003146{
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003147 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3148 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003149}
3150
Marcel Holtmann848566b2013-10-01 22:59:22 -07003151static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003152{
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003153 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3154 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003155}
3156
Mat Martineau36c86c82012-10-23 15:24:20 -07003157static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3158 struct l2cap_conf_rfc *rfc)
3159{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003160 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003161 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3162
3163 /* Class 1 devices have must have ERTM timeouts
3164 * exceeding the Link Supervision Timeout. The
3165 * default Link Supervision Timeout for AMP
3166 * controllers is 10 seconds.
3167 *
3168 * Class 1 devices use 0xffffffff for their
3169 * best-effort flush timeout, so the clamping logic
3170 * will result in a timeout that meets the above
3171 * requirement. ERTM timeouts are 16-bit values, so
3172 * the maximum timeout is 65.535 seconds.
3173 */
3174
3175 /* Convert timeout to milliseconds and round */
3176 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3177
3178 /* This is the recommended formula for class 2 devices
3179 * that start ERTM timers when packets are sent to the
3180 * controller.
3181 */
3182 ertm_to = 3 * ertm_to + 500;
3183
3184 if (ertm_to > 0xffff)
3185 ertm_to = 0xffff;
3186
3187 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3188 rfc->monitor_timeout = rfc->retrans_timeout;
3189 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003190 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3191 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003192 }
3193}
3194
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003195static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3196{
3197 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003198 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003199 /* use extended control field */
3200 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003201 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3202 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003203 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003204 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003205 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3206 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003207 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003208}
3209
Ben Seri6300c8b2017-09-09 23:15:59 +02003210static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003211{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003212 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003213 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003214 void *ptr = req->data;
Ben Seri6300c8b2017-09-09 23:15:59 +02003215 void *endptr = data + data_size;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003216 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003217
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003218 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003219
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003220 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003221 goto done;
3222
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003223 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003224 case L2CAP_MODE_STREAMING:
3225 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003226 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003227 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003228
Marcel Holtmann848566b2013-10-01 22:59:22 -07003229 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003230 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3231
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003232 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003233 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003234 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003235 break;
3236 }
3237
3238done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003239 if (chan->imtu != L2CAP_DEFAULT_MTU)
Ben Seri6300c8b2017-09-09 23:15:59 +02003240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu, endptr - ptr);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003241
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003242 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003243 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003244 if (disable_ertm)
3245 break;
3246
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003247 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003248 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003249 break;
3250
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003251 rfc.mode = L2CAP_MODE_BASIC;
3252 rfc.txwin_size = 0;
3253 rfc.max_transmit = 0;
3254 rfc.retrans_timeout = 0;
3255 rfc.monitor_timeout = 0;
3256 rfc.max_pdu_size = 0;
3257
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003258 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Ben Seri6300c8b2017-09-09 23:15:59 +02003259 (unsigned long) &rfc, endptr - ptr);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003260 break;
3261
3262 case L2CAP_MODE_ERTM:
3263 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003264 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003265
3266 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003267
3268 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003269 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3270 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003271 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003272
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003273 l2cap_txwin_setup(chan);
3274
3275 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003276 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003277
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003278 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Ben Seri6300c8b2017-09-09 23:15:59 +02003279 (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003280
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003281 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
Ben Seri6300c8b2017-09-09 23:15:59 +02003282 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003283
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003284 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3285 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Ben Seri6300c8b2017-09-09 23:15:59 +02003286 chan->tx_win, endptr - ptr);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003287
3288 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3289 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003290 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003291 chan->fcs = L2CAP_FCS_NONE;
3292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
Ben Seri6300c8b2017-09-09 23:15:59 +02003293 chan->fcs, endptr - ptr);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003294 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003295 break;
3296
3297 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003298 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003299 rfc.mode = L2CAP_MODE_STREAMING;
3300 rfc.txwin_size = 0;
3301 rfc.max_transmit = 0;
3302 rfc.retrans_timeout = 0;
3303 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003304
3305 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003306 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3307 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003308 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003309
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Ben Seri6300c8b2017-09-09 23:15:59 +02003311 (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003312
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003313 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
Ben Seri6300c8b2017-09-09 23:15:59 +02003314 l2cap_add_opt_efs(&ptr, chan, endptr - ptr);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003315
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003316 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3317 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003318 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003319 chan->fcs = L2CAP_FCS_NONE;
3320 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
Ben Seri6300c8b2017-09-09 23:15:59 +02003321 chan->fcs, endptr - ptr);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003322 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003323 break;
3324 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003325
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003326 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003327 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003328
3329 return ptr - data;
3330}
3331
Ben Seri6300c8b2017-09-09 23:15:59 +02003332static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data, size_t data_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003333{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003334 struct l2cap_conf_rsp *rsp = data;
3335 void *ptr = rsp->data;
Ben Seri6300c8b2017-09-09 23:15:59 +02003336 void *endptr = data + data_size;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003337 void *req = chan->conf_req;
3338 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003339 int type, hint, olen;
3340 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003341 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003342 struct l2cap_conf_efs efs;
3343 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003344 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003345 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003346 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003347
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003348 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003349
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003350 while (len >= L2CAP_CONF_OPT_SIZE) {
3351 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Marcel Holtmann99665dc2019-01-18 13:43:19 +01003352 if (len < 0)
3353 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003354
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003355 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003356 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003357
3358 switch (type) {
3359 case L2CAP_CONF_MTU:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003360 if (olen != 2)
3361 break;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003362 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003363 break;
3364
3365 case L2CAP_CONF_FLUSH_TO:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003366 if (olen != 2)
3367 break;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003368 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003369 break;
3370
3371 case L2CAP_CONF_QOS:
3372 break;
3373
Marcel Holtmann6464f352007-10-20 13:39:51 +02003374 case L2CAP_CONF_RFC:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003375 if (olen != sizeof(rfc))
3376 break;
3377 memcpy(&rfc, (void *) val, olen);
Marcel Holtmann6464f352007-10-20 13:39:51 +02003378 break;
3379
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003380 case L2CAP_CONF_FCS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003381 if (olen != 1)
3382 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003383 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003384 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003385 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003386
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003387 case L2CAP_CONF_EFS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003388 if (olen != sizeof(efs))
3389 break;
3390 remote_efs = 1;
3391 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003392 break;
3393
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003394 case L2CAP_CONF_EWS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003395 if (olen != 2)
3396 break;
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003397 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003398 return -ECONNREFUSED;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003399 set_bit(FLAG_EXT_CTRL, &chan->flags);
3400 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003401 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003402 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003403 break;
3404
3405 default:
3406 if (hint)
3407 break;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003408 result = L2CAP_CONF_UNKNOWN;
3409 *((u8 *) ptr++) = type;
3410 break;
3411 }
3412 }
3413
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003414 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003415 goto done;
3416
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003417 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003418 case L2CAP_MODE_STREAMING:
3419 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003420 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003421 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003422 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003423 break;
3424 }
3425
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003426 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003427 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003428 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3429 else
3430 return -ECONNREFUSED;
3431 }
3432
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003433 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003434 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003435
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003436 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003437 }
3438
3439done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003440 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003441 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003442 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003443
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003444 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003445 return -ECONNREFUSED;
3446
Gustavo Padovan2d792812012-10-06 10:07:01 +01003447 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Ben Seri6300c8b2017-09-09 23:15:59 +02003448 (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003449 }
3450
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003451 if (result == L2CAP_CONF_SUCCESS) {
3452 /* Configure output options and let the other side know
3453 * which ones we don't like. */
3454
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003455 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3456 result = L2CAP_CONF_UNACCEPT;
3457 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003458 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003459 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003460 }
Ben Seri6300c8b2017-09-09 23:15:59 +02003461 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu, endptr - ptr);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003462
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003463 if (remote_efs) {
3464 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003465 efs.stype != L2CAP_SERV_NOTRAFIC &&
3466 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003467
3468 result = L2CAP_CONF_UNACCEPT;
3469
3470 if (chan->num_conf_req >= 1)
3471 return -ECONNREFUSED;
3472
3473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003474 sizeof(efs),
Ben Seri6300c8b2017-09-09 23:15:59 +02003475 (unsigned long) &efs, endptr - ptr);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003476 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003477 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003478 result = L2CAP_CONF_PENDING;
3479 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003480 }
3481 }
3482
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003483 switch (rfc.mode) {
3484 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003485 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003486 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003487 break;
3488
3489 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003490 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3491 chan->remote_tx_win = rfc.txwin_size;
3492 else
3493 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3494
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003495 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003496
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003497 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003498 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3499 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003500 rfc.max_pdu_size = cpu_to_le16(size);
3501 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003502
Mat Martineau36c86c82012-10-23 15:24:20 -07003503 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003504
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003505 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003506
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Ben Seri6300c8b2017-09-09 23:15:59 +02003508 sizeof(rfc), (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003509
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003510 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3511 chan->remote_id = efs.id;
3512 chan->remote_stype = efs.stype;
3513 chan->remote_msdu = le16_to_cpu(efs.msdu);
3514 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003515 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003516 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003517 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003518 chan->remote_sdu_itime =
3519 le32_to_cpu(efs.sdu_itime);
3520 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003521 sizeof(efs),
Ben Seri6300c8b2017-09-09 23:15:59 +02003522 (unsigned long) &efs, endptr - ptr);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003523 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003524 break;
3525
3526 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003527 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003528 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3529 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003530 rfc.max_pdu_size = cpu_to_le16(size);
3531 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003532
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003533 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003534
Gustavo Padovan2d792812012-10-06 10:07:01 +01003535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Ben Seri6300c8b2017-09-09 23:15:59 +02003536 (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003537
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003538 break;
3539
3540 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003541 result = L2CAP_CONF_UNACCEPT;
3542
3543 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003544 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003545 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003546
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003547 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003548 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003549 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003550 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003551 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003552 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003553
3554 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003555}
3556
Gustavo Padovan2d792812012-10-06 10:07:01 +01003557static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
Ben Seri6300c8b2017-09-09 23:15:59 +02003558 void *data, size_t size, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003559{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003560 struct l2cap_conf_req *req = data;
3561 void *ptr = req->data;
Ben Seri6300c8b2017-09-09 23:15:59 +02003562 void *endptr = data + size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003563 int type, olen;
3564 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003565 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003566 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003567
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003568 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003569
3570 while (len >= L2CAP_CONF_OPT_SIZE) {
3571 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
Marcel Holtmann99665dc2019-01-18 13:43:19 +01003572 if (len < 0)
3573 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003574
3575 switch (type) {
3576 case L2CAP_CONF_MTU:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003577 if (olen != 2)
3578 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003579 if (val < L2CAP_DEFAULT_MIN_MTU) {
3580 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003581 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003582 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003583 chan->imtu = val;
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu,
3585 endptr - ptr);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003586 break;
3587
3588 case L2CAP_CONF_FLUSH_TO:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003589 if (olen != 2)
3590 break;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003591 chan->flush_to = val;
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2,
3593 chan->flush_to, endptr - ptr);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003594 break;
3595
3596 case L2CAP_CONF_RFC:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003597 if (olen != sizeof(rfc))
3598 break;
3599 memcpy(&rfc, (void *)val, olen);
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003600 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003601 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003602 return -ECONNREFUSED;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003603 chan->fcs = 0;
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003604 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3605 (unsigned long) &rfc, endptr - ptr);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003606 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003607
3608 case L2CAP_CONF_EWS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003609 if (olen != 2)
3610 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003611 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Ben Seri6300c8b2017-09-09 23:15:59 +02003613 chan->tx_win, endptr - ptr);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003614 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003615
3616 case L2CAP_CONF_EFS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003617 if (olen != sizeof(efs))
3618 break;
3619 memcpy(&efs, (void *)val, olen);
3620 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
3621 efs.stype != L2CAP_SERV_NOTRAFIC &&
3622 efs.stype != chan->local_stype)
3623 return -ECONNREFUSED;
3624 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3625 (unsigned long) &efs, endptr - ptr);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003626 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003627
3628 case L2CAP_CONF_FCS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003629 if (olen != 1)
3630 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003631 if (*result == L2CAP_CONF_PENDING)
3632 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003633 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003634 &chan->conf_state);
3635 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003636 }
3637 }
3638
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003639 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003640 return -ECONNREFUSED;
3641
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003642 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003643
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003644 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003645 switch (rfc.mode) {
3646 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003647 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3648 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3649 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003650 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3651 chan->ack_win = min_t(u16, chan->ack_win,
3652 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003653
3654 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3655 chan->local_msdu = le16_to_cpu(efs.msdu);
3656 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003657 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003658 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3659 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003660 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003661 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003662 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003663
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003664 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003665 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003666 }
3667 }
3668
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003669 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003670 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003671
3672 return ptr - data;
3673}
3674
Gustavo Padovan2d792812012-10-06 10:07:01 +01003675static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3676 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003677{
3678 struct l2cap_conf_rsp *rsp = data;
3679 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003680
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003681 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003682
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003683 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003684 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003685 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003686
3687 return ptr - data;
3688}
3689
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003690void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3691{
3692 struct l2cap_le_conn_rsp rsp;
3693 struct l2cap_conn *conn = chan->conn;
3694
3695 BT_DBG("chan %p", chan);
3696
3697 rsp.dcid = cpu_to_le16(chan->scid);
3698 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003699 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003700 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003701 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003702
3703 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3704 &rsp);
3705}
3706
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003707void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003708{
3709 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003710 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003711 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003712 u8 rsp_code;
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003713
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003714 rsp.scid = cpu_to_le16(chan->dcid);
3715 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003716 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3717 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003718
3719 if (chan->hs_hcon)
3720 rsp_code = L2CAP_CREATE_CHAN_RSP;
3721 else
3722 rsp_code = L2CAP_CONN_RSP;
3723
3724 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3725
3726 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003727
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003728 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003729 return;
3730
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003731 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02003732 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
Gustavo F. Padovan710f9b0a2011-03-25 14:30:37 -03003733 chan->num_conf_req++;
3734}
3735
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003736static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003737{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003738 int type, olen;
3739 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003740 /* Use sane default values in case a misbehaving remote device
3741 * did not send an RFC or extended window size option.
3742 */
3743 u16 txwin_ext = chan->ack_win;
3744 struct l2cap_conf_rfc rfc = {
3745 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003746 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3747 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003748 .max_pdu_size = cpu_to_le16(chan->imtu),
3749 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3750 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003751
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003752 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003753
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003754 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003755 return;
3756
3757 while (len >= L2CAP_CONF_OPT_SIZE) {
3758 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
Marcel Holtmann99665dc2019-01-18 13:43:19 +01003759 if (len < 0)
3760 break;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003761
Mat Martineauc20f8e32012-07-10 05:47:07 -07003762 switch (type) {
3763 case L2CAP_CONF_RFC:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003764 if (olen != sizeof(rfc))
3765 break;
3766 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003767 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003768 case L2CAP_CONF_EWS:
Marcel Holtmanndef5c1f2019-01-18 12:56:20 +01003769 if (olen != 2)
3770 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003771 txwin_ext = val;
3772 break;
3773 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003774 }
3775
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003776 switch (rfc.mode) {
3777 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003778 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3779 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003780 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3781 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3782 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3783 else
3784 chan->ack_win = min_t(u16, chan->ack_win,
3785 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003786 break;
3787 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003788 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003789 }
3790}
3791
Gustavo Padovan2d792812012-10-06 10:07:01 +01003792static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003793 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3794 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003795{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003796 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003797
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003798 if (cmd_len < sizeof(*rej))
3799 return -EPROTO;
3800
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003801 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003802 return 0;
3803
3804 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003805 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003806 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003807
3808 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003809 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003810
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003811 l2cap_conn_start(conn);
3812 }
3813
3814 return 0;
3815}
3816
Mat Martineau17009152012-10-23 15:24:07 -07003817static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3818 struct l2cap_cmd_hdr *cmd,
3819 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003820{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003821 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3822 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003823 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003824 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003825
3826 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003827 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003829 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003830
3831 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003832 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003833 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003834 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003835 result = L2CAP_CR_BAD_PSM;
3836 goto sendresp;
3837 }
3838
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003839 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003840 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003841
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003842 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003843 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003844 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003845 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003846 result = L2CAP_CR_SEC_BLOCK;
3847 goto response;
3848 }
3849
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 result = L2CAP_CR_NO_MEM;
3851
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003852 /* Check if we already have channel with that dcid */
3853 if (__l2cap_get_chan_by_dcid(conn, scid))
3854 goto response;
3855
Gustavo Padovan80b98022012-05-27 22:27:51 -03003856 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003857 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003858 goto response;
3859
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003860 /* For certain devices (ex: HID mouse), support for authentication,
3861 * pairing and bonding is optional. For such devices, inorder to avoid
3862 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3863 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3864 */
3865 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3866
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003867 bacpy(&chan->src, &conn->hcon->src);
3868 bacpy(&chan->dst, &conn->hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02003869 chan->src_type = bdaddr_src_type(conn->hcon);
3870 chan->dst_type = bdaddr_dst_type(conn->hcon);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003871 chan->psm = psm;
3872 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003873 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003874
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003875 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003876
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003877 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003878
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003879 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003880
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003881 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003882
Marcel Holtmann984947d2009-02-06 23:35:19 +01003883 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003884 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003885 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003886 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003887 result = L2CAP_CR_PEND;
3888 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003889 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003890 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003891 /* Force pending result for AMP controllers.
3892 * The connection will succeed after the
3893 * physical link is up.
3894 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003895 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003896 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003897 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003898 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003899 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003900 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003901 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003902 status = L2CAP_CS_NO_INFO;
3903 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003904 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003905 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003906 result = L2CAP_CR_PEND;
3907 status = L2CAP_CS_AUTHEN_PEND;
3908 }
3909 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003910 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003911 result = L2CAP_CR_PEND;
3912 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003913 }
3914
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003916 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003917 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003918 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003919
3920sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003921 rsp.scid = cpu_to_le16(scid);
3922 rsp.dcid = cpu_to_le16(dcid);
3923 rsp.result = cpu_to_le16(result);
3924 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003925 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003926
3927 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3928 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003929 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003930
3931 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3932 conn->info_ident = l2cap_get_ident(conn);
3933
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003934 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003935
Gustavo Padovan2d792812012-10-06 10:07:01 +01003936 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3937 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003938 }
3939
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003940 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003941 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003942 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003943 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003944 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02003945 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003946 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003947 }
Mat Martineau17009152012-10-23 15:24:07 -07003948
3949 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003950}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003951
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003952static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003953 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003954{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303955 struct hci_dev *hdev = conn->hcon->hdev;
3956 struct hci_conn *hcon = conn->hcon;
3957
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003958 if (cmd_len < sizeof(struct l2cap_conn_req))
3959 return -EPROTO;
3960
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303961 hci_dev_lock(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003962 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303963 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
Alfonso Acosta48ec92f2014-10-07 08:44:10 +00003964 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303965 hci_dev_unlock(hdev);
3966
Gustavo Padovan300229f2012-10-12 19:40:40 +08003967 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003968 return 0;
3969}
3970
Mat Martineau5909cf32012-10-23 15:24:08 -07003971static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003972 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3973 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003974{
3975 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3976 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003977 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003978 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003979 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003980
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003981 if (cmd_len < sizeof(*rsp))
3982 return -EPROTO;
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 scid = __le16_to_cpu(rsp->scid);
3985 dcid = __le16_to_cpu(rsp->dcid);
3986 result = __le16_to_cpu(rsp->result);
3987 status = __le16_to_cpu(rsp->status);
3988
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003989 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003990 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003991
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003992 mutex_lock(&conn->chan_lock);
3993
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003995 chan = __l2cap_get_chan_by_scid(conn, scid);
3996 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003997 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003998 goto unlock;
3999 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004000 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004001 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
4002 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03004003 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004004 goto unlock;
4005 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 }
4007
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004008 err = 0;
4009
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004010 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004011
Linus Torvalds1da177e2005-04-16 15:20:36 -07004012 switch (result) {
4013 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03004014 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03004015 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004016 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004017 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004018
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004019 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004020 break;
4021
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02004023 l2cap_build_conf_req(chan, req, sizeof(req)), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004024 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004025 break;
4026
4027 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004028 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029 break;
4030
4031 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004032 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 break;
4034 }
4035
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004036 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004037
4038unlock:
4039 mutex_unlock(&conn->chan_lock);
4040
4041 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042}
4043
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004044static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07004045{
4046 /* FCS is enabled only in ERTM or streaming mode, if one or both
4047 * sides request it.
4048 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03004049 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004050 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02004051 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004052 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004053}
4054
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004055static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
4056 u8 ident, u16 flags)
4057{
4058 struct l2cap_conn *conn = chan->conn;
4059
4060 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
4061 flags);
4062
4063 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4064 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4065
4066 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4067 l2cap_build_conf_rsp(chan, data,
4068 L2CAP_CONF_SUCCESS, flags), data);
4069}
4070
Johan Hedberg662d6522013-10-16 11:20:47 +03004071static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4072 u16 scid, u16 dcid)
4073{
4074 struct l2cap_cmd_rej_cid rej;
4075
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004076 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03004077 rej.scid = __cpu_to_le16(scid);
4078 rej.dcid = __cpu_to_le16(dcid);
4079
4080 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4081}
4082
Gustavo Padovan2d792812012-10-06 10:07:01 +01004083static inline int l2cap_config_req(struct l2cap_conn *conn,
4084 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4085 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086{
4087 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4088 u16 dcid, flags;
4089 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004090 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004091 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004092
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004093 if (cmd_len < sizeof(*req))
4094 return -EPROTO;
4095
Linus Torvalds1da177e2005-04-16 15:20:36 -07004096 dcid = __le16_to_cpu(req->dcid);
4097 flags = __le16_to_cpu(req->flags);
4098
4099 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4100
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004101 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004102 if (!chan) {
4103 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4104 return 0;
4105 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106
David S. Miller033b1142011-07-21 13:38:42 -07004107 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004108 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4109 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004110 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004111 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004112
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004113 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004114 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004115 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004116 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004117 l2cap_build_conf_rsp(chan, rsp,
4118 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004119 goto unlock;
4120 }
4121
4122 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004123 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4124 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004126 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127 /* Incomplete config. Send empty response. */
4128 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004129 l2cap_build_conf_rsp(chan, rsp,
4130 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 goto unlock;
4132 }
4133
4134 /* Complete config. */
Ben Seri6300c8b2017-09-09 23:15:59 +02004135 len = l2cap_parse_conf_req(chan, rsp, sizeof(rsp));
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004136 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004137 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004139 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140
Mat Martineau1500109b2012-10-23 15:24:15 -07004141 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004142 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004143 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004144
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004145 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004146 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004147
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004148 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004149 goto unlock;
4150
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004151 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004152 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004153
Mat Martineau105bdf92012-04-27 16:50:48 -07004154 if (chan->mode == L2CAP_MODE_ERTM ||
4155 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004156 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004157
Mat Martineau3c588192012-04-11 10:48:42 -07004158 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004159 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004160 else
4161 l2cap_chan_ready(chan);
4162
Marcel Holtmann876d9482007-10-20 13:35:42 +02004163 goto unlock;
4164 }
4165
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004166 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004167 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004168 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02004169 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004170 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004171 }
4172
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004173 /* Got Conf Rsp PENDING from remote side and assume we sent
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004174 Conf Rsp PENDING in the code above */
4175 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004176 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004177
4178 /* check compatibility */
4179
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004180 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004181 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004182 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4183 else
4184 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004185 }
4186
Linus Torvalds1da177e2005-04-16 15:20:36 -07004187unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004188 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004189 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004190}
4191
Gustavo Padovan2d792812012-10-06 10:07:01 +01004192static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004193 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4194 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004195{
4196 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4197 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004198 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004199 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004200 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004201
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004202 if (cmd_len < sizeof(*rsp))
4203 return -EPROTO;
4204
Linus Torvalds1da177e2005-04-16 15:20:36 -07004205 scid = __le16_to_cpu(rsp->scid);
4206 flags = __le16_to_cpu(rsp->flags);
4207 result = __le16_to_cpu(rsp->result);
4208
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004209 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4210 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004211
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004212 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004213 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004214 return 0;
4215
4216 switch (result) {
4217 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004218 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004219 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004220 break;
4221
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004222 case L2CAP_CONF_PENDING:
4223 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4224
4225 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4226 char buf[64];
4227
4228 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Ben Seri6300c8b2017-09-09 23:15:59 +02004229 buf, sizeof(buf), &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004230 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004231 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004232 goto done;
4233 }
4234
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004235 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004236 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4237 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004238 } else {
4239 if (l2cap_check_efs(chan)) {
4240 amp_create_logical_link(chan);
4241 chan->ident = cmd->ident;
4242 }
4243 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004244 }
4245 goto done;
4246
Linus Torvalds1da177e2005-04-16 15:20:36 -07004247 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004248 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004249 char req[64];
4250
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004251 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004252 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004253 goto done;
4254 }
4255
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004256 /* throw out any old stored conf requests */
4257 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004258 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Ben Seri6300c8b2017-09-09 23:15:59 +02004259 req, sizeof(req), &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004260 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004261 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004262 goto done;
4263 }
4264
4265 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004266 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004267 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004268 if (result != L2CAP_CONF_SUCCESS)
4269 goto done;
4270 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 }
4272
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004273 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004274 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004275
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004276 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004277 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004278 goto done;
4279 }
4280
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004281 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004282 goto done;
4283
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004284 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004286 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004287 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004288
Mat Martineau105bdf92012-04-27 16:50:48 -07004289 if (chan->mode == L2CAP_MODE_ERTM ||
4290 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004291 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004292
Mat Martineau3c588192012-04-11 10:48:42 -07004293 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004294 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004295 else
4296 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 }
4298
4299done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004300 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004301 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004302}
4303
Gustavo Padovan2d792812012-10-06 10:07:01 +01004304static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004305 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4306 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004307{
4308 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4309 struct l2cap_disconn_rsp rsp;
4310 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004311 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004313 if (cmd_len != sizeof(*req))
4314 return -EPROTO;
4315
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316 scid = __le16_to_cpu(req->scid);
4317 dcid = __le16_to_cpu(req->dcid);
4318
4319 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4320
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004321 mutex_lock(&conn->chan_lock);
4322
4323 chan = __l2cap_get_chan_by_scid(conn, dcid);
4324 if (!chan) {
4325 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004326 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4327 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004329
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004330 l2cap_chan_lock(chan);
4331
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004332 rsp.dcid = cpu_to_le16(chan->scid);
4333 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004334 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4335
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004336 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337
Mat Martineau61d6ef32012-04-27 16:50:50 -07004338 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004339 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004340
4341 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004342
Gustavo Padovan80b98022012-05-27 22:27:51 -03004343 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004344 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004345
4346 mutex_unlock(&conn->chan_lock);
4347
Linus Torvalds1da177e2005-04-16 15:20:36 -07004348 return 0;
4349}
4350
Gustavo Padovan2d792812012-10-06 10:07:01 +01004351static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004352 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4353 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354{
4355 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4356 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004357 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004358
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004359 if (cmd_len != sizeof(*rsp))
4360 return -EPROTO;
4361
Linus Torvalds1da177e2005-04-16 15:20:36 -07004362 scid = __le16_to_cpu(rsp->scid);
4363 dcid = __le16_to_cpu(rsp->dcid);
4364
4365 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4366
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004367 mutex_lock(&conn->chan_lock);
4368
4369 chan = __l2cap_get_chan_by_scid(conn, scid);
4370 if (!chan) {
4371 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004372 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004375 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004376
Matias Karhumaa0a5e8c12019-05-21 13:07:22 +03004377 if (chan->state != BT_DISCONN) {
4378 l2cap_chan_unlock(chan);
4379 mutex_unlock(&conn->chan_lock);
4380 return 0;
4381 }
4382
Mat Martineau61d6ef32012-04-27 16:50:50 -07004383 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004384 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004385
4386 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004387
Gustavo Padovan80b98022012-05-27 22:27:51 -03004388 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004389 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004390
4391 mutex_unlock(&conn->chan_lock);
4392
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 return 0;
4394}
4395
Gustavo Padovan2d792812012-10-06 10:07:01 +01004396static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004397 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4398 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399{
4400 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004401 u16 type;
4402
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004403 if (cmd_len != sizeof(*req))
4404 return -EPROTO;
4405
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406 type = __le16_to_cpu(req->type);
4407
4408 BT_DBG("type 0x%4.4x", type);
4409
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004410 if (type == L2CAP_IT_FEAT_MASK) {
4411 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004412 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004413 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004414 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4415 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004416 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004417 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004418 | L2CAP_FEAT_FCS;
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004419 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004420 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004421 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004422
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004423 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004424 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4425 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004426 } else if (type == L2CAP_IT_FIXED_CHAN) {
4427 u8 buf[12];
4428 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004429
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004430 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4431 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004432 rsp->data[0] = conn->local_fixed_chan;
4433 memset(rsp->data + 1, 0, 7);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004434 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4435 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004436 } else {
4437 struct l2cap_info_rsp rsp;
4438 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004439 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004440 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4441 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443
4444 return 0;
4445}
4446
Gustavo Padovan2d792812012-10-06 10:07:01 +01004447static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004448 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4449 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004450{
4451 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4452 u16 type, result;
4453
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304454 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004455 return -EPROTO;
4456
Linus Torvalds1da177e2005-04-16 15:20:36 -07004457 type = __le16_to_cpu(rsp->type);
4458 result = __le16_to_cpu(rsp->result);
4459
4460 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4461
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004462 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4463 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004464 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004465 return 0;
4466
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004467 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004468
Ville Tervoadb08ed2010-08-04 09:43:33 +03004469 if (result != L2CAP_IR_SUCCESS) {
4470 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4471 conn->info_ident = 0;
4472
4473 l2cap_conn_start(conn);
4474
4475 return 0;
4476 }
4477
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004478 switch (type) {
4479 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004480 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004481
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004482 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004483 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004484 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004485
4486 conn->info_ident = l2cap_get_ident(conn);
4487
4488 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004489 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004490 } else {
4491 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4492 conn->info_ident = 0;
4493
4494 l2cap_conn_start(conn);
4495 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004496 break;
4497
4498 case L2CAP_IT_FIXED_CHAN:
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004499 conn->remote_fixed_chan = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004501 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004502
4503 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004504 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004505 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004506
Linus Torvalds1da177e2005-04-16 15:20:36 -07004507 return 0;
4508}
4509
Mat Martineau17009152012-10-23 15:24:07 -07004510static int l2cap_create_channel_req(struct l2cap_conn *conn,
4511 struct l2cap_cmd_hdr *cmd,
4512 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004513{
4514 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004515 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004516 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004517 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004518 u16 psm, scid;
4519
4520 if (cmd_len != sizeof(*req))
4521 return -EPROTO;
4522
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004523 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004524 return -EINVAL;
4525
4526 psm = le16_to_cpu(req->psm);
4527 scid = le16_to_cpu(req->scid);
4528
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004529 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004530
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004531 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004532 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004533 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4534 req->amp_id);
4535 return 0;
4536 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004537
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004538 /* Validate AMP controller id */
4539 hdev = hci_dev_get(req->amp_id);
4540 if (!hdev)
4541 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004542
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004543 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004544 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004545 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004546 }
4547
4548 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4549 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004550 if (chan) {
4551 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4552 struct hci_conn *hs_hcon;
4553
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004554 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4555 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004556 if (!hs_hcon) {
4557 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004558 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4559 chan->dcid);
4560 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004561 }
4562
4563 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4564
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004565 mgr->bredr_chan = chan;
4566 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004567 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004568 conn->mtu = hdev->block_mtu;
4569 }
4570
4571 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004572
4573 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004574
4575error:
4576 rsp.dcid = 0;
4577 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004578 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4579 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004580
4581 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4582 sizeof(rsp), &rsp);
4583
Johan Hedbergdc280802013-09-16 13:05:13 +03004584 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004585}
4586
Mat Martineau8eb200b2012-10-23 15:24:17 -07004587static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4588{
4589 struct l2cap_move_chan_req req;
4590 u8 ident;
4591
4592 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4593
4594 ident = l2cap_get_ident(chan->conn);
4595 chan->ident = ident;
4596
4597 req.icid = cpu_to_le16(chan->scid);
4598 req.dest_amp_id = dest_amp_id;
4599
4600 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4601 &req);
4602
4603 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4604}
4605
Mat Martineau1500109b2012-10-23 15:24:15 -07004606static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004607{
4608 struct l2cap_move_chan_rsp rsp;
4609
Mat Martineau1500109b2012-10-23 15:24:15 -07004610 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004611
Mat Martineau1500109b2012-10-23 15:24:15 -07004612 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004613 rsp.result = cpu_to_le16(result);
4614
Mat Martineau1500109b2012-10-23 15:24:15 -07004615 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4616 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004617}
4618
Mat Martineau5b155ef2012-10-23 15:24:14 -07004619static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004620{
4621 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004622
Mat Martineau5b155ef2012-10-23 15:24:14 -07004623 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004624
Mat Martineau5b155ef2012-10-23 15:24:14 -07004625 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004626
Mat Martineau5b155ef2012-10-23 15:24:14 -07004627 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004628 cfm.result = cpu_to_le16(result);
4629
Mat Martineau5b155ef2012-10-23 15:24:14 -07004630 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4631 sizeof(cfm), &cfm);
4632
4633 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4634}
4635
4636static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4637{
4638 struct l2cap_move_chan_cfm cfm;
4639
4640 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4641
4642 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004643 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004644
4645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4646 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004647}
4648
4649static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004650 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004651{
4652 struct l2cap_move_chan_cfm_rsp rsp;
4653
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004654 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004655
4656 rsp.icid = cpu_to_le16(icid);
4657 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4658}
4659
Mat Martineau5f3847a2012-10-23 15:24:12 -07004660static void __release_logical_link(struct l2cap_chan *chan)
4661{
4662 chan->hs_hchan = NULL;
4663 chan->hs_hcon = NULL;
4664
4665 /* Placeholder - release the logical link */
4666}
4667
Mat Martineau1500109b2012-10-23 15:24:15 -07004668static void l2cap_logical_fail(struct l2cap_chan *chan)
4669{
4670 /* Logical link setup failed */
4671 if (chan->state != BT_CONNECTED) {
4672 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004673 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004674 return;
4675 }
4676
4677 switch (chan->move_role) {
4678 case L2CAP_MOVE_ROLE_RESPONDER:
4679 l2cap_move_done(chan);
4680 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4681 break;
4682 case L2CAP_MOVE_ROLE_INITIATOR:
4683 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4684 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4685 /* Remote has only sent pending or
4686 * success responses, clean up
4687 */
4688 l2cap_move_done(chan);
4689 }
4690
4691 /* Other amp move states imply that the move
4692 * has already aborted
4693 */
4694 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4695 break;
4696 }
4697}
4698
4699static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4700 struct hci_chan *hchan)
4701{
4702 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004703
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004704 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004705 chan->hs_hcon->l2cap_data = chan->conn;
4706
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004707 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004708
4709 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004710 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004711
4712 set_default_fcs(chan);
4713
4714 err = l2cap_ertm_init(chan);
4715 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004716 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004717 else
4718 l2cap_chan_ready(chan);
4719 }
4720}
4721
4722static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4723 struct hci_chan *hchan)
4724{
4725 chan->hs_hcon = hchan->conn;
4726 chan->hs_hcon->l2cap_data = chan->conn;
4727
4728 BT_DBG("move_state %d", chan->move_state);
4729
4730 switch (chan->move_state) {
4731 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4732 /* Move confirm will be sent after a success
4733 * response is received
4734 */
4735 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4736 break;
4737 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4738 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4739 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4740 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4741 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4742 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4743 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4744 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4745 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4746 }
4747 break;
4748 default:
4749 /* Move was not in expected state, free the channel */
4750 __release_logical_link(chan);
4751
4752 chan->move_state = L2CAP_MOVE_STABLE;
4753 }
4754}
4755
4756/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004757void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4758 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004759{
Mat Martineau1500109b2012-10-23 15:24:15 -07004760 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4761
4762 if (status) {
4763 l2cap_logical_fail(chan);
4764 __release_logical_link(chan);
4765 return;
4766 }
4767
4768 if (chan->state != BT_CONNECTED) {
4769 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004770 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004771 l2cap_logical_finish_create(chan, hchan);
4772 } else {
4773 l2cap_logical_finish_move(chan, hchan);
4774 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004775}
4776
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004777void l2cap_move_start(struct l2cap_chan *chan)
4778{
4779 BT_DBG("chan %p", chan);
4780
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004781 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004782 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4783 return;
4784 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4785 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4786 /* Placeholder - start physical link setup */
4787 } else {
4788 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4789 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4790 chan->move_id = 0;
4791 l2cap_move_setup(chan);
4792 l2cap_send_move_chan_req(chan, 0);
4793 }
4794}
4795
Mat Martineau8eb200b2012-10-23 15:24:17 -07004796static void l2cap_do_create(struct l2cap_chan *chan, int result,
4797 u8 local_amp_id, u8 remote_amp_id)
4798{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004799 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4800 local_amp_id, remote_amp_id);
4801
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004802 chan->fcs = L2CAP_FCS_NONE;
4803
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004804 /* Outgoing channel on AMP */
4805 if (chan->state == BT_CONNECT) {
4806 if (result == L2CAP_CR_SUCCESS) {
4807 chan->local_amp_id = local_amp_id;
4808 l2cap_send_create_chan_req(chan, remote_amp_id);
4809 } else {
4810 /* Revert to BR/EDR connect */
4811 l2cap_send_conn_req(chan);
4812 }
4813
4814 return;
4815 }
4816
4817 /* Incoming channel on AMP */
4818 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004819 struct l2cap_conn_rsp rsp;
4820 char buf[128];
4821 rsp.scid = cpu_to_le16(chan->dcid);
4822 rsp.dcid = cpu_to_le16(chan->scid);
4823
Mat Martineau8eb200b2012-10-23 15:24:17 -07004824 if (result == L2CAP_CR_SUCCESS) {
4825 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004826 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4827 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004828 } else {
4829 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004830 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4831 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004832 }
4833
4834 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4835 sizeof(rsp), &rsp);
4836
4837 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004838 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004839 set_bit(CONF_REQ_SENT, &chan->conf_state);
4840 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4841 L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02004842 l2cap_build_conf_req(chan, buf, sizeof(buf)), buf);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004843 chan->num_conf_req++;
4844 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004845 }
4846}
4847
4848static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4849 u8 remote_amp_id)
4850{
4851 l2cap_move_setup(chan);
4852 chan->move_id = local_amp_id;
4853 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4854
4855 l2cap_send_move_chan_req(chan, remote_amp_id);
4856}
4857
4858static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4859{
4860 struct hci_chan *hchan = NULL;
4861
4862 /* Placeholder - get hci_chan for logical link */
4863
4864 if (hchan) {
4865 if (hchan->state == BT_CONNECTED) {
4866 /* Logical link is ready to go */
4867 chan->hs_hcon = hchan->conn;
4868 chan->hs_hcon->l2cap_data = chan->conn;
4869 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4870 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4871
4872 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4873 } else {
4874 /* Wait for logical link to be ready */
4875 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4876 }
4877 } else {
4878 /* Logical link not available */
4879 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4880 }
4881}
4882
4883static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4884{
4885 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4886 u8 rsp_result;
4887 if (result == -EINVAL)
4888 rsp_result = L2CAP_MR_BAD_ID;
4889 else
4890 rsp_result = L2CAP_MR_NOT_ALLOWED;
4891
4892 l2cap_send_move_chan_rsp(chan, rsp_result);
4893 }
4894
4895 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4896 chan->move_state = L2CAP_MOVE_STABLE;
4897
4898 /* Restart data transmission */
4899 l2cap_ertm_send(chan);
4900}
4901
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004902/* Invoke with locked chan */
4903void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004904{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004905 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004906 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004907
Mat Martineau8eb200b2012-10-23 15:24:17 -07004908 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4909 chan, result, local_amp_id, remote_amp_id);
4910
Dan Carpenter641aec92019-11-19 09:17:05 +03004911 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004912 return;
Mat Martineau8eb200b2012-10-23 15:24:17 -07004913
4914 if (chan->state != BT_CONNECTED) {
4915 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4916 } else if (result != L2CAP_MR_SUCCESS) {
4917 l2cap_do_move_cancel(chan, result);
4918 } else {
4919 switch (chan->move_role) {
4920 case L2CAP_MOVE_ROLE_INITIATOR:
4921 l2cap_do_move_initiate(chan, local_amp_id,
4922 remote_amp_id);
4923 break;
4924 case L2CAP_MOVE_ROLE_RESPONDER:
4925 l2cap_do_move_respond(chan, result);
4926 break;
4927 default:
4928 l2cap_do_move_cancel(chan, result);
4929 break;
4930 }
4931 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004932}
4933
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004934static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004935 struct l2cap_cmd_hdr *cmd,
4936 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004937{
4938 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004939 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004940 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004941 u16 icid = 0;
4942 u16 result = L2CAP_MR_NOT_ALLOWED;
4943
4944 if (cmd_len != sizeof(*req))
4945 return -EPROTO;
4946
4947 icid = le16_to_cpu(req->icid);
4948
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004949 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004950
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004951 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004952 return -EINVAL;
4953
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004954 chan = l2cap_get_chan_by_dcid(conn, icid);
4955 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004956 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004957 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004958 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4959 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004960 return 0;
4961 }
4962
Mat Martineau1500109b2012-10-23 15:24:15 -07004963 chan->ident = cmd->ident;
4964
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004965 if (chan->scid < L2CAP_CID_DYN_START ||
4966 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4967 (chan->mode != L2CAP_MODE_ERTM &&
4968 chan->mode != L2CAP_MODE_STREAMING)) {
4969 result = L2CAP_MR_NOT_ALLOWED;
4970 goto send_move_response;
4971 }
4972
4973 if (chan->local_amp_id == req->dest_amp_id) {
4974 result = L2CAP_MR_SAME_ID;
4975 goto send_move_response;
4976 }
4977
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004978 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004979 struct hci_dev *hdev;
4980 hdev = hci_dev_get(req->dest_amp_id);
4981 if (!hdev || hdev->dev_type != HCI_AMP ||
4982 !test_bit(HCI_UP, &hdev->flags)) {
4983 if (hdev)
4984 hci_dev_put(hdev);
4985
4986 result = L2CAP_MR_BAD_ID;
4987 goto send_move_response;
4988 }
4989 hci_dev_put(hdev);
4990 }
4991
4992 /* Detect a move collision. Only send a collision response
4993 * if this side has "lost", otherwise proceed with the move.
4994 * The winner has the larger bd_addr.
4995 */
4996 if ((__chan_is_moving(chan) ||
4997 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004998 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004999 result = L2CAP_MR_COLLISION;
5000 goto send_move_response;
5001 }
5002
Mat Martineau02b0fbb2012-10-23 15:24:10 -07005003 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5004 l2cap_move_setup(chan);
5005 chan->move_id = req->dest_amp_id;
5006 icid = chan->dcid;
5007
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005008 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07005009 /* Moving to BR/EDR */
5010 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5011 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5012 result = L2CAP_MR_PEND;
5013 } else {
5014 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
5015 result = L2CAP_MR_SUCCESS;
5016 }
5017 } else {
5018 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
5019 /* Placeholder - uncomment when amp functions are available */
5020 /*amp_accept_physical(chan, req->dest_amp_id);*/
5021 result = L2CAP_MR_PEND;
5022 }
5023
5024send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07005025 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005026
Mat Martineau02b0fbb2012-10-23 15:24:10 -07005027 l2cap_chan_unlock(chan);
5028
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005029 return 0;
5030}
5031
Mat Martineau5b155ef2012-10-23 15:24:14 -07005032static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
5033{
5034 struct l2cap_chan *chan;
5035 struct hci_chan *hchan = NULL;
5036
5037 chan = l2cap_get_chan_by_scid(conn, icid);
5038 if (!chan) {
5039 l2cap_send_move_chan_cfm_icid(conn, icid);
5040 return;
5041 }
5042
5043 __clear_chan_timer(chan);
5044 if (result == L2CAP_MR_PEND)
5045 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
5046
5047 switch (chan->move_state) {
5048 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
5049 /* Move confirm will be sent when logical link
5050 * is complete.
5051 */
5052 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5053 break;
5054 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
5055 if (result == L2CAP_MR_PEND) {
5056 break;
5057 } else if (test_bit(CONN_LOCAL_BUSY,
5058 &chan->conn_state)) {
5059 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
5060 } else {
5061 /* Logical link is up or moving to BR/EDR,
5062 * proceed with move
5063 */
5064 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
5065 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5066 }
5067 break;
5068 case L2CAP_MOVE_WAIT_RSP:
5069 /* Moving to AMP */
5070 if (result == L2CAP_MR_SUCCESS) {
5071 /* Remote is ready, send confirm immediately
5072 * after logical link is ready
5073 */
5074 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5075 } else {
5076 /* Both logical link and move success
5077 * are required to confirm
5078 */
5079 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5080 }
5081
5082 /* Placeholder - get hci_chan for logical link */
5083 if (!hchan) {
5084 /* Logical link not available */
5085 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5086 break;
5087 }
5088
5089 /* If the logical link is not yet connected, do not
5090 * send confirmation.
5091 */
5092 if (hchan->state != BT_CONNECTED)
5093 break;
5094
5095 /* Logical link is already ready to go */
5096
5097 chan->hs_hcon = hchan->conn;
5098 chan->hs_hcon->l2cap_data = chan->conn;
5099
5100 if (result == L2CAP_MR_SUCCESS) {
5101 /* Can confirm now */
5102 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5103 } else {
5104 /* Now only need move success
5105 * to confirm
5106 */
5107 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5108 }
5109
5110 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5111 break;
5112 default:
5113 /* Any other amp move state means the move failed. */
5114 chan->move_id = chan->local_amp_id;
5115 l2cap_move_done(chan);
5116 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5117 }
5118
5119 l2cap_chan_unlock(chan);
5120}
5121
5122static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5123 u16 result)
5124{
5125 struct l2cap_chan *chan;
5126
5127 chan = l2cap_get_chan_by_ident(conn, ident);
5128 if (!chan) {
5129 /* Could not locate channel, icid is best guess */
5130 l2cap_send_move_chan_cfm_icid(conn, icid);
5131 return;
5132 }
5133
5134 __clear_chan_timer(chan);
5135
5136 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5137 if (result == L2CAP_MR_COLLISION) {
5138 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5139 } else {
5140 /* Cleanup - cancel move */
5141 chan->move_id = chan->local_amp_id;
5142 l2cap_move_done(chan);
5143 }
5144 }
5145
5146 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5147
5148 l2cap_chan_unlock(chan);
5149}
5150
5151static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5152 struct l2cap_cmd_hdr *cmd,
5153 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005154{
5155 struct l2cap_move_chan_rsp *rsp = data;
5156 u16 icid, result;
5157
5158 if (cmd_len != sizeof(*rsp))
5159 return -EPROTO;
5160
5161 icid = le16_to_cpu(rsp->icid);
5162 result = le16_to_cpu(rsp->result);
5163
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005164 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005165
Mat Martineau5b155ef2012-10-23 15:24:14 -07005166 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5167 l2cap_move_continue(conn, icid, result);
5168 else
5169 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005170
5171 return 0;
5172}
5173
Mat Martineau5f3847a2012-10-23 15:24:12 -07005174static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5175 struct l2cap_cmd_hdr *cmd,
5176 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005177{
5178 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005179 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005180 u16 icid, result;
5181
5182 if (cmd_len != sizeof(*cfm))
5183 return -EPROTO;
5184
5185 icid = le16_to_cpu(cfm->icid);
5186 result = le16_to_cpu(cfm->result);
5187
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005188 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005189
Mat Martineau5f3847a2012-10-23 15:24:12 -07005190 chan = l2cap_get_chan_by_dcid(conn, icid);
5191 if (!chan) {
5192 /* Spec requires a response even if the icid was not found */
5193 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5194 return 0;
5195 }
5196
5197 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5198 if (result == L2CAP_MC_CONFIRMED) {
5199 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005200 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005201 __release_logical_link(chan);
5202 } else {
5203 chan->move_id = chan->local_amp_id;
5204 }
5205
5206 l2cap_move_done(chan);
5207 }
5208
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005209 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5210
Mat Martineau5f3847a2012-10-23 15:24:12 -07005211 l2cap_chan_unlock(chan);
5212
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005213 return 0;
5214}
5215
5216static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005217 struct l2cap_cmd_hdr *cmd,
5218 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005219{
5220 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005221 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005222 u16 icid;
5223
5224 if (cmd_len != sizeof(*rsp))
5225 return -EPROTO;
5226
5227 icid = le16_to_cpu(rsp->icid);
5228
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005229 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005230
Mat Martineau3fd71a02012-10-23 15:24:16 -07005231 chan = l2cap_get_chan_by_scid(conn, icid);
5232 if (!chan)
5233 return 0;
5234
5235 __clear_chan_timer(chan);
5236
5237 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5238 chan->local_amp_id = chan->move_id;
5239
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005240 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005241 __release_logical_link(chan);
5242
5243 l2cap_move_done(chan);
5244 }
5245
5246 l2cap_chan_unlock(chan);
5247
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005248 return 0;
5249}
5250
Claudio Takahaside731152011-02-11 19:28:55 -02005251static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005252 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005253 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005254{
5255 struct hci_conn *hcon = conn->hcon;
5256 struct l2cap_conn_param_update_req *req;
5257 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005258 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005259 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005260
Johan Hedberg40bef302014-07-16 11:42:27 +03005261 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005262 return -EINVAL;
5263
Claudio Takahaside731152011-02-11 19:28:55 -02005264 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5265 return -EPROTO;
5266
5267 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005268 min = __le16_to_cpu(req->min);
5269 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005270 latency = __le16_to_cpu(req->latency);
5271 to_multiplier = __le16_to_cpu(req->to_multiplier);
5272
5273 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005274 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005275
5276 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005277
Marcel Holtmann6b48ef12019-09-04 20:13:08 +02005278 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005279 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005280 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005281 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005282 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005283
5284 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005285 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005286
Andre Guedesffb5a8272014-07-01 18:10:11 -03005287 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005288 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005289
Johan Hedbergf4869e22014-07-02 17:37:32 +03005290 store_hint = hci_le_conn_update(hcon, min, max, latency,
5291 to_multiplier);
5292 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5293 store_hint, min, max, latency,
5294 to_multiplier);
5295
Andre Guedesffb5a8272014-07-01 18:10:11 -03005296 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005297
Claudio Takahaside731152011-02-11 19:28:55 -02005298 return 0;
5299}
5300
Johan Hedbergf1496de2013-05-13 14:15:56 +03005301static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5303 u8 *data)
5304{
5305 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005306 struct hci_conn *hcon = conn->hcon;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005307 u16 dcid, mtu, mps, credits, result;
5308 struct l2cap_chan *chan;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005309 int err, sec_level;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005310
5311 if (cmd_len < sizeof(*rsp))
5312 return -EPROTO;
5313
5314 dcid = __le16_to_cpu(rsp->dcid);
5315 mtu = __le16_to_cpu(rsp->mtu);
5316 mps = __le16_to_cpu(rsp->mps);
5317 credits = __le16_to_cpu(rsp->credits);
5318 result = __le16_to_cpu(rsp->result);
5319
Johan Hedberg40624182015-11-02 14:39:17 +02005320 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23 ||
5321 dcid < L2CAP_CID_DYN_START ||
5322 dcid > L2CAP_CID_LE_DYN_END))
Johan Hedbergf1496de2013-05-13 14:15:56 +03005323 return -EPROTO;
5324
5325 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5326 dcid, mtu, mps, credits, result);
5327
5328 mutex_lock(&conn->chan_lock);
5329
5330 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5331 if (!chan) {
5332 err = -EBADSLT;
5333 goto unlock;
5334 }
5335
5336 err = 0;
5337
5338 l2cap_chan_lock(chan);
5339
5340 switch (result) {
5341 case L2CAP_CR_SUCCESS:
Johan Hedberg40624182015-11-02 14:39:17 +02005342 if (__l2cap_get_chan_by_dcid(conn, dcid)) {
5343 err = -EBADSLT;
5344 break;
5345 }
5346
Johan Hedbergf1496de2013-05-13 14:15:56 +03005347 chan->ident = 0;
5348 chan->dcid = dcid;
5349 chan->omtu = mtu;
5350 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005351 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005352 l2cap_chan_ready(chan);
5353 break;
5354
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005355 case L2CAP_CR_AUTHENTICATION:
5356 case L2CAP_CR_ENCRYPTION:
5357 /* If we already have MITM protection we can't do
5358 * anything.
5359 */
5360 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5361 l2cap_chan_del(chan, ECONNREFUSED);
5362 break;
5363 }
5364
5365 sec_level = hcon->sec_level + 1;
5366 if (chan->sec_level < sec_level)
5367 chan->sec_level = sec_level;
5368
5369 /* We'll need to send a new Connect Request */
5370 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5371
5372 smp_conn_security(hcon, chan->sec_level);
5373 break;
5374
Johan Hedbergf1496de2013-05-13 14:15:56 +03005375 default:
5376 l2cap_chan_del(chan, ECONNREFUSED);
5377 break;
5378 }
5379
5380 l2cap_chan_unlock(chan);
5381
5382unlock:
5383 mutex_unlock(&conn->chan_lock);
5384
5385 return err;
5386}
5387
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005388static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005389 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5390 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005391{
5392 int err = 0;
5393
5394 switch (cmd->code) {
5395 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005396 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005397 break;
5398
5399 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005400 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005401 break;
5402
5403 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005404 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005405 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005406 break;
5407
5408 case L2CAP_CONF_REQ:
5409 err = l2cap_config_req(conn, cmd, cmd_len, data);
5410 break;
5411
5412 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005413 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005414 break;
5415
5416 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005417 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005418 break;
5419
5420 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005421 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005422 break;
5423
5424 case L2CAP_ECHO_REQ:
5425 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5426 break;
5427
5428 case L2CAP_ECHO_RSP:
5429 break;
5430
5431 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005432 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005433 break;
5434
5435 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005436 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005437 break;
5438
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005439 case L2CAP_CREATE_CHAN_REQ:
5440 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5441 break;
5442
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005443 case L2CAP_MOVE_CHAN_REQ:
5444 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5445 break;
5446
5447 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005448 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005449 break;
5450
5451 case L2CAP_MOVE_CHAN_CFM:
5452 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5453 break;
5454
5455 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005456 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005457 break;
5458
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005459 default:
5460 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5461 err = -EINVAL;
5462 break;
5463 }
5464
5465 return err;
5466}
5467
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005468static int l2cap_le_connect_req(struct l2cap_conn *conn,
5469 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5470 u8 *data)
5471{
5472 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5473 struct l2cap_le_conn_rsp rsp;
5474 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005475 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005476 __le16 psm;
5477 u8 result;
5478
5479 if (cmd_len != sizeof(*req))
5480 return -EPROTO;
5481
5482 scid = __le16_to_cpu(req->scid);
5483 mtu = __le16_to_cpu(req->mtu);
5484 mps = __le16_to_cpu(req->mps);
5485 psm = req->psm;
5486 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005487 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005488
5489 if (mtu < 23 || mps < 23)
5490 return -EPROTO;
5491
5492 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5493 scid, mtu, mps);
5494
5495 /* Check if we have socket listening on psm */
5496 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5497 &conn->hcon->dst, LE_LINK);
5498 if (!pchan) {
5499 result = L2CAP_CR_BAD_PSM;
5500 chan = NULL;
5501 goto response;
5502 }
5503
5504 mutex_lock(&conn->chan_lock);
5505 l2cap_chan_lock(pchan);
5506
Johan Hedberg35dc6f82014-11-13 10:55:18 +02005507 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5508 SMP_ALLOW_STK)) {
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005509 result = L2CAP_CR_AUTHENTICATION;
5510 chan = NULL;
5511 goto response_unlock;
5512 }
5513
Johan Hedberg8a7889c2015-11-02 14:39:15 +02005514 /* Check for valid dynamic CID range */
5515 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5516 result = L2CAP_CR_INVALID_SCID;
5517 chan = NULL;
5518 goto response_unlock;
5519 }
5520
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005521 /* Check if we already have channel with that dcid */
5522 if (__l2cap_get_chan_by_dcid(conn, scid)) {
Johan Hedberg8a7889c2015-11-02 14:39:15 +02005523 result = L2CAP_CR_SCID_IN_USE;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005524 chan = NULL;
5525 goto response_unlock;
5526 }
5527
5528 chan = pchan->ops->new_connection(pchan);
5529 if (!chan) {
5530 result = L2CAP_CR_NO_MEM;
5531 goto response_unlock;
5532 }
5533
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005534 l2cap_le_flowctl_init(chan);
5535
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005536 bacpy(&chan->src, &conn->hcon->src);
5537 bacpy(&chan->dst, &conn->hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02005538 chan->src_type = bdaddr_src_type(conn->hcon);
5539 chan->dst_type = bdaddr_dst_type(conn->hcon);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005540 chan->psm = psm;
5541 chan->dcid = scid;
5542 chan->omtu = mtu;
5543 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005544 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005545
5546 __l2cap_chan_add(conn, chan);
5547 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005548 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005549
5550 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5551
5552 chan->ident = cmd->ident;
5553
5554 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5555 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005556 /* The following result value is actually not defined
5557 * for LE CoC but we use it to let the function know
5558 * that it should bail out after doing its cleanup
5559 * instead of sending a response.
5560 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005561 result = L2CAP_CR_PEND;
5562 chan->ops->defer(chan);
5563 } else {
5564 l2cap_chan_ready(chan);
5565 result = L2CAP_CR_SUCCESS;
5566 }
5567
5568response_unlock:
5569 l2cap_chan_unlock(pchan);
5570 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005571 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005572
5573 if (result == L2CAP_CR_PEND)
5574 return 0;
5575
5576response:
5577 if (chan) {
5578 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005579 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005580 } else {
5581 rsp.mtu = 0;
5582 rsp.mps = 0;
5583 }
5584
5585 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005586 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005587 rsp.result = cpu_to_le16(result);
5588
5589 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5590
5591 return 0;
5592}
5593
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005594static inline int l2cap_le_credits(struct l2cap_conn *conn,
5595 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5596 u8 *data)
5597{
5598 struct l2cap_le_credits *pkt;
5599 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005600 u16 cid, credits, max_credits;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005601
5602 if (cmd_len != sizeof(*pkt))
5603 return -EPROTO;
5604
5605 pkt = (struct l2cap_le_credits *) data;
5606 cid = __le16_to_cpu(pkt->cid);
5607 credits = __le16_to_cpu(pkt->credits);
5608
5609 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5610
5611 chan = l2cap_get_chan_by_dcid(conn, cid);
5612 if (!chan)
5613 return -EBADSLT;
5614
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005615 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5616 if (credits > max_credits) {
5617 BT_ERR("LE credits overflow");
5618 l2cap_send_disconn_req(chan, ECONNRESET);
Martin Townsendee930532014-10-13 19:24:45 +01005619 l2cap_chan_unlock(chan);
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005620
5621 /* Return 0 so that we don't trigger an unnecessary
5622 * command reject packet.
5623 */
5624 return 0;
5625 }
5626
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005627 chan->tx_credits += credits;
5628
5629 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5630 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5631 chan->tx_credits--;
5632 }
5633
5634 if (chan->tx_credits)
5635 chan->ops->resume(chan);
5636
5637 l2cap_chan_unlock(chan);
5638
5639 return 0;
5640}
5641
Johan Hedberg71fb4192013-12-10 10:52:48 +02005642static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5643 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5644 u8 *data)
5645{
5646 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5647 struct l2cap_chan *chan;
5648
5649 if (cmd_len < sizeof(*rej))
5650 return -EPROTO;
5651
5652 mutex_lock(&conn->chan_lock);
5653
5654 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5655 if (!chan)
5656 goto done;
5657
5658 l2cap_chan_lock(chan);
5659 l2cap_chan_del(chan, ECONNREFUSED);
5660 l2cap_chan_unlock(chan);
5661
5662done:
5663 mutex_unlock(&conn->chan_lock);
5664 return 0;
5665}
5666
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005667static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005668 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5669 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005670{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005671 int err = 0;
5672
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005673 switch (cmd->code) {
5674 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005675 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005676 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005677
5678 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005679 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5680 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005681
5682 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005683 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005684
Johan Hedbergf1496de2013-05-13 14:15:56 +03005685 case L2CAP_LE_CONN_RSP:
5686 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005687 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005688
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005689 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005690 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5691 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005692
Johan Hedbergfad5fc892013-12-05 09:45:01 +02005693 case L2CAP_LE_CREDITS:
5694 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5695 break;
5696
Johan Hedberg3defe012013-05-15 10:16:06 +03005697 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005698 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5699 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005700
5701 case L2CAP_DISCONN_RSP:
5702 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005703 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005704
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005705 default:
5706 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005707 err = -EINVAL;
5708 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005709 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005710
5711 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005712}
5713
Johan Hedbergc5623552013-04-29 19:35:33 +03005714static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5715 struct sk_buff *skb)
5716{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005717 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005718 struct l2cap_cmd_hdr *cmd;
5719 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005720 int err;
5721
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005722 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005723 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005724
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005725 if (skb->len < L2CAP_CMD_HDR_SIZE)
5726 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005727
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005728 cmd = (void *) skb->data;
5729 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005730
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005731 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005732
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005733 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005734
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005735 if (len != skb->len || !cmd->ident) {
5736 BT_DBG("corrupted command");
5737 goto drop;
5738 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005739
Johan Hedberg203e6392013-05-15 10:07:15 +03005740 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005741 if (err) {
5742 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005743
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005744 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005745
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005746 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005747 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5748 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005749 }
5750
Marcel Holtmann3b166292013-10-02 08:28:21 -07005751drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005752 kfree_skb(skb);
5753}
5754
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005755static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005756 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005757{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005758 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005759 u8 *data = skb->data;
5760 int len = skb->len;
5761 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005762 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005763
5764 l2cap_raw_recv(conn, skb);
5765
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005766 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005767 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005768
Linus Torvalds1da177e2005-04-16 15:20:36 -07005769 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005770 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005771 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5772 data += L2CAP_CMD_HDR_SIZE;
5773 len -= L2CAP_CMD_HDR_SIZE;
5774
Al Viro88219a02007-07-29 00:17:25 -07005775 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005776
Gustavo Padovan2d792812012-10-06 10:07:01 +01005777 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5778 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005779
Al Viro88219a02007-07-29 00:17:25 -07005780 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005781 BT_DBG("corrupted command");
5782 break;
5783 }
5784
Johan Hedbergc5623552013-04-29 19:35:33 +03005785 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005786 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005787 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005788
5789 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005790
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005791 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005792 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5793 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005794 }
5795
Al Viro88219a02007-07-29 00:17:25 -07005796 data += cmd_len;
5797 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005798 }
5799
Marcel Holtmann3b166292013-10-02 08:28:21 -07005800drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005801 kfree_skb(skb);
5802}
5803
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005804static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005805{
5806 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005807 int hdr_size;
5808
5809 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5810 hdr_size = L2CAP_EXT_HDR_SIZE;
5811 else
5812 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005813
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005814 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005815 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005816 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5817 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5818
5819 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005820 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005821 }
5822 return 0;
5823}
5824
Mat Martineau6ea00482012-05-17 20:53:52 -07005825static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005826{
Mat Martineaue31f7632012-05-17 20:53:41 -07005827 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005828
Mat Martineaue31f7632012-05-17 20:53:41 -07005829 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005830
Mat Martineaue31f7632012-05-17 20:53:41 -07005831 memset(&control, 0, sizeof(control));
5832 control.sframe = 1;
5833 control.final = 1;
5834 control.reqseq = chan->buffer_seq;
5835 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005836
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005837 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005838 control.super = L2CAP_SUPER_RNR;
5839 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005840 }
5841
Mat Martineaue31f7632012-05-17 20:53:41 -07005842 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5843 chan->unacked_frames > 0)
5844 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005845
Mat Martineaue31f7632012-05-17 20:53:41 -07005846 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005847 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005848
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005849 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005850 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5851 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5852 * send it now.
5853 */
5854 control.super = L2CAP_SUPER_RR;
5855 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005856 }
5857}
5858
Gustavo Padovan2d792812012-10-06 10:07:01 +01005859static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5860 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005861{
Mat Martineau84084a32011-07-22 14:54:00 -07005862 /* skb->len reflects data in skb as well as all fragments
5863 * skb->data_len reflects only data in fragments
5864 */
5865 if (!skb_has_frag_list(skb))
5866 skb_shinfo(skb)->frag_list = new_frag;
5867
5868 new_frag->next = NULL;
5869
5870 (*last_frag)->next = new_frag;
5871 *last_frag = new_frag;
5872
5873 skb->len += new_frag->len;
5874 skb->data_len += new_frag->len;
5875 skb->truesize += new_frag->truesize;
5876}
5877
Mat Martineau4b51dae92012-05-17 20:53:37 -07005878static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5879 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005880{
5881 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005882
Mat Martineau4b51dae92012-05-17 20:53:37 -07005883 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005884 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005885 if (chan->sdu)
5886 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005887
Gustavo Padovan80b98022012-05-27 22:27:51 -03005888 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005889 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005890
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005891 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005892 if (chan->sdu)
5893 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005894
Daniel Borkmanndbb50882016-07-27 11:40:14 -07005895 if (!pskb_may_pull(skb, L2CAP_SDULEN_SIZE))
5896 break;
5897
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005898 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005899 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005900
Mat Martineau84084a32011-07-22 14:54:00 -07005901 if (chan->sdu_len > chan->imtu) {
5902 err = -EMSGSIZE;
5903 break;
5904 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005905
Mat Martineau84084a32011-07-22 14:54:00 -07005906 if (skb->len >= chan->sdu_len)
5907 break;
5908
5909 chan->sdu = skb;
5910 chan->sdu_last_frag = skb;
5911
5912 skb = NULL;
5913 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005914 break;
5915
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005916 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005917 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005918 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005919
Mat Martineau84084a32011-07-22 14:54:00 -07005920 append_skb_frag(chan->sdu, skb,
5921 &chan->sdu_last_frag);
5922 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005923
Mat Martineau84084a32011-07-22 14:54:00 -07005924 if (chan->sdu->len >= chan->sdu_len)
5925 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005926
Mat Martineau84084a32011-07-22 14:54:00 -07005927 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005928 break;
5929
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005930 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005931 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005932 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005933
Mat Martineau84084a32011-07-22 14:54:00 -07005934 append_skb_frag(chan->sdu, skb,
5935 &chan->sdu_last_frag);
5936 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005937
Mat Martineau84084a32011-07-22 14:54:00 -07005938 if (chan->sdu->len != chan->sdu_len)
5939 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005940
Gustavo Padovan80b98022012-05-27 22:27:51 -03005941 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005942
Mat Martineau84084a32011-07-22 14:54:00 -07005943 if (!err) {
5944 /* Reassembly complete */
5945 chan->sdu = NULL;
5946 chan->sdu_last_frag = NULL;
5947 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005948 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005949 break;
5950 }
5951
Mat Martineau84084a32011-07-22 14:54:00 -07005952 if (err) {
5953 kfree_skb(skb);
5954 kfree_skb(chan->sdu);
5955 chan->sdu = NULL;
5956 chan->sdu_last_frag = NULL;
5957 chan->sdu_len = 0;
5958 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005959
Mat Martineau84084a32011-07-22 14:54:00 -07005960 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005961}
5962
Mat Martineau32b32732012-10-23 15:24:11 -07005963static int l2cap_resegment(struct l2cap_chan *chan)
5964{
5965 /* Placeholder */
5966 return 0;
5967}
5968
Mat Martineaue3281402011-07-07 09:39:02 -07005969void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132e2010-06-21 19:39:50 -03005970{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005971 u8 event;
5972
5973 if (chan->mode != L2CAP_MODE_ERTM)
5974 return;
5975
5976 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005977 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005978}
5979
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005980static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5981{
Mat Martineau63838722012-05-17 20:53:45 -07005982 int err = 0;
5983 /* Pass sequential frames to l2cap_reassemble_sdu()
5984 * until a gap is encountered.
5985 */
5986
5987 BT_DBG("chan %p", chan);
5988
5989 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5990 struct sk_buff *skb;
5991 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5992 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5993
5994 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5995
5996 if (!skb)
5997 break;
5998
5999 skb_unlink(skb, &chan->srej_q);
6000 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
Johan Hedberga4368ff2015-03-30 23:21:01 +03006001 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
Mat Martineau63838722012-05-17 20:53:45 -07006002 if (err)
6003 break;
6004 }
6005
6006 if (skb_queue_empty(&chan->srej_q)) {
6007 chan->rx_state = L2CAP_RX_STATE_RECV;
6008 l2cap_send_ack(chan);
6009 }
6010
6011 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006012}
6013
6014static void l2cap_handle_srej(struct l2cap_chan *chan,
6015 struct l2cap_ctrl *control)
6016{
Mat Martineauf80842a2012-05-17 20:53:46 -07006017 struct sk_buff *skb;
6018
6019 BT_DBG("chan %p, control %p", chan, control);
6020
6021 if (control->reqseq == chan->next_tx_seq) {
6022 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006023 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07006024 return;
6025 }
6026
6027 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6028
6029 if (skb == NULL) {
6030 BT_DBG("Seq %d not available for retransmission",
6031 control->reqseq);
6032 return;
6033 }
6034
Johan Hedberga4368ff2015-03-30 23:21:01 +03006035 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
Mat Martineauf80842a2012-05-17 20:53:46 -07006036 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006037 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07006038 return;
6039 }
6040
6041 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6042
6043 if (control->poll) {
6044 l2cap_pass_to_tx(chan, control);
6045
6046 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6047 l2cap_retransmit(chan, control);
6048 l2cap_ertm_send(chan);
6049
6050 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6051 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6052 chan->srej_save_reqseq = control->reqseq;
6053 }
6054 } else {
6055 l2cap_pass_to_tx_fbit(chan, control);
6056
6057 if (control->final) {
6058 if (chan->srej_save_reqseq != control->reqseq ||
6059 !test_and_clear_bit(CONN_SREJ_ACT,
6060 &chan->conn_state))
6061 l2cap_retransmit(chan, control);
6062 } else {
6063 l2cap_retransmit(chan, control);
6064 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
6065 set_bit(CONN_SREJ_ACT, &chan->conn_state);
6066 chan->srej_save_reqseq = control->reqseq;
6067 }
6068 }
6069 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006070}
6071
6072static void l2cap_handle_rej(struct l2cap_chan *chan,
6073 struct l2cap_ctrl *control)
6074{
Mat Martineaufcd289d2012-05-17 20:53:47 -07006075 struct sk_buff *skb;
6076
6077 BT_DBG("chan %p, control %p", chan, control);
6078
6079 if (control->reqseq == chan->next_tx_seq) {
6080 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006081 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07006082 return;
6083 }
6084
6085 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6086
6087 if (chan->max_tx && skb &&
Johan Hedberga4368ff2015-03-30 23:21:01 +03006088 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
Mat Martineaufcd289d2012-05-17 20:53:47 -07006089 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006090 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07006091 return;
6092 }
6093
6094 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6095
6096 l2cap_pass_to_tx(chan, control);
6097
6098 if (control->final) {
6099 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6100 l2cap_retransmit_all(chan, control);
6101 } else {
6102 l2cap_retransmit_all(chan, control);
6103 l2cap_ertm_send(chan);
6104 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6105 set_bit(CONN_REJ_ACT, &chan->conn_state);
6106 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006107}
6108
Mat Martineau4b51dae92012-05-17 20:53:37 -07006109static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6110{
6111 BT_DBG("chan %p, txseq %d", chan, txseq);
6112
6113 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6114 chan->expected_tx_seq);
6115
6116 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6117 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01006118 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006119 /* See notes below regarding "double poll" and
6120 * invalid packets.
6121 */
6122 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6123 BT_DBG("Invalid/Ignore - after SREJ");
6124 return L2CAP_TXSEQ_INVALID_IGNORE;
6125 } else {
6126 BT_DBG("Invalid - in window after SREJ sent");
6127 return L2CAP_TXSEQ_INVALID;
6128 }
6129 }
6130
6131 if (chan->srej_list.head == txseq) {
6132 BT_DBG("Expected SREJ");
6133 return L2CAP_TXSEQ_EXPECTED_SREJ;
6134 }
6135
6136 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6137 BT_DBG("Duplicate SREJ - txseq already stored");
6138 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6139 }
6140
6141 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6142 BT_DBG("Unexpected SREJ - not requested");
6143 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6144 }
6145 }
6146
6147 if (chan->expected_tx_seq == txseq) {
6148 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6149 chan->tx_win) {
6150 BT_DBG("Invalid - txseq outside tx window");
6151 return L2CAP_TXSEQ_INVALID;
6152 } else {
6153 BT_DBG("Expected");
6154 return L2CAP_TXSEQ_EXPECTED;
6155 }
6156 }
6157
6158 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006159 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006160 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6161 return L2CAP_TXSEQ_DUPLICATE;
6162 }
6163
6164 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6165 /* A source of invalid packets is a "double poll" condition,
6166 * where delays cause us to send multiple poll packets. If
6167 * the remote stack receives and processes both polls,
6168 * sequence numbers can wrap around in such a way that a
6169 * resent frame has a sequence number that looks like new data
6170 * with a sequence gap. This would trigger an erroneous SREJ
6171 * request.
6172 *
6173 * Fortunately, this is impossible with a tx window that's
6174 * less than half of the maximum sequence number, which allows
6175 * invalid frames to be safely ignored.
6176 *
6177 * With tx window sizes greater than half of the tx window
6178 * maximum, the frame is invalid and cannot be ignored. This
6179 * causes a disconnect.
6180 */
6181
6182 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6183 BT_DBG("Invalid/Ignore - txseq outside tx window");
6184 return L2CAP_TXSEQ_INVALID_IGNORE;
6185 } else {
6186 BT_DBG("Invalid - txseq outside tx window");
6187 return L2CAP_TXSEQ_INVALID;
6188 }
6189 } else {
6190 BT_DBG("Unexpected - txseq indicates missing frames");
6191 return L2CAP_TXSEQ_UNEXPECTED;
6192 }
6193}
6194
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006195static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6196 struct l2cap_ctrl *control,
6197 struct sk_buff *skb, u8 event)
6198{
6199 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006200 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006201
6202 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6203 event);
6204
6205 switch (event) {
6206 case L2CAP_EV_RECV_IFRAME:
6207 switch (l2cap_classify_txseq(chan, control->txseq)) {
6208 case L2CAP_TXSEQ_EXPECTED:
6209 l2cap_pass_to_tx(chan, control);
6210
6211 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6212 BT_DBG("Busy, discarding expected seq %d",
6213 control->txseq);
6214 break;
6215 }
6216
6217 chan->expected_tx_seq = __next_seq(chan,
6218 control->txseq);
6219
6220 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006221 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006222
6223 err = l2cap_reassemble_sdu(chan, skb, control);
6224 if (err)
6225 break;
6226
6227 if (control->final) {
6228 if (!test_and_clear_bit(CONN_REJ_ACT,
6229 &chan->conn_state)) {
6230 control->final = 0;
6231 l2cap_retransmit_all(chan, control);
6232 l2cap_ertm_send(chan);
6233 }
6234 }
6235
6236 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6237 l2cap_send_ack(chan);
6238 break;
6239 case L2CAP_TXSEQ_UNEXPECTED:
6240 l2cap_pass_to_tx(chan, control);
6241
6242 /* Can't issue SREJ frames in the local busy state.
6243 * Drop this frame, it will be seen as missing
6244 * when local busy is exited.
6245 */
6246 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6247 BT_DBG("Busy, discarding unexpected seq %d",
6248 control->txseq);
6249 break;
6250 }
6251
6252 /* There was a gap in the sequence, so an SREJ
6253 * must be sent for each missing frame. The
6254 * current frame is stored for later use.
6255 */
6256 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006257 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006258 BT_DBG("Queued %p (queue len %d)", skb,
6259 skb_queue_len(&chan->srej_q));
6260
6261 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6262 l2cap_seq_list_clear(&chan->srej_list);
6263 l2cap_send_srej(chan, control->txseq);
6264
6265 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6266 break;
6267 case L2CAP_TXSEQ_DUPLICATE:
6268 l2cap_pass_to_tx(chan, control);
6269 break;
6270 case L2CAP_TXSEQ_INVALID_IGNORE:
6271 break;
6272 case L2CAP_TXSEQ_INVALID:
6273 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006274 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006275 break;
6276 }
6277 break;
6278 case L2CAP_EV_RECV_RR:
6279 l2cap_pass_to_tx(chan, control);
6280 if (control->final) {
6281 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6282
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006283 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6284 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006285 control->final = 0;
6286 l2cap_retransmit_all(chan, control);
6287 }
6288
6289 l2cap_ertm_send(chan);
6290 } else if (control->poll) {
6291 l2cap_send_i_or_rr_or_rnr(chan);
6292 } else {
6293 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6294 &chan->conn_state) &&
6295 chan->unacked_frames)
6296 __set_retrans_timer(chan);
6297
6298 l2cap_ertm_send(chan);
6299 }
6300 break;
6301 case L2CAP_EV_RECV_RNR:
6302 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6303 l2cap_pass_to_tx(chan, control);
6304 if (control && control->poll) {
6305 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6306 l2cap_send_rr_or_rnr(chan, 0);
6307 }
6308 __clear_retrans_timer(chan);
6309 l2cap_seq_list_clear(&chan->retrans_list);
6310 break;
6311 case L2CAP_EV_RECV_REJ:
6312 l2cap_handle_rej(chan, control);
6313 break;
6314 case L2CAP_EV_RECV_SREJ:
6315 l2cap_handle_srej(chan, control);
6316 break;
6317 default:
6318 break;
6319 }
6320
6321 if (skb && !skb_in_use) {
6322 BT_DBG("Freeing %p", skb);
6323 kfree_skb(skb);
6324 }
6325
6326 return err;
6327}
6328
6329static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6330 struct l2cap_ctrl *control,
6331 struct sk_buff *skb, u8 event)
6332{
6333 int err = 0;
6334 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006335 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006336
6337 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6338 event);
6339
6340 switch (event) {
6341 case L2CAP_EV_RECV_IFRAME:
6342 switch (l2cap_classify_txseq(chan, txseq)) {
6343 case L2CAP_TXSEQ_EXPECTED:
6344 /* Keep frame for reassembly later */
6345 l2cap_pass_to_tx(chan, control);
6346 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006347 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006348 BT_DBG("Queued %p (queue len %d)", skb,
6349 skb_queue_len(&chan->srej_q));
6350
6351 chan->expected_tx_seq = __next_seq(chan, txseq);
6352 break;
6353 case L2CAP_TXSEQ_EXPECTED_SREJ:
6354 l2cap_seq_list_pop(&chan->srej_list);
6355
6356 l2cap_pass_to_tx(chan, control);
6357 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006358 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006359 BT_DBG("Queued %p (queue len %d)", skb,
6360 skb_queue_len(&chan->srej_q));
6361
6362 err = l2cap_rx_queued_iframes(chan);
6363 if (err)
6364 break;
6365
6366 break;
6367 case L2CAP_TXSEQ_UNEXPECTED:
6368 /* Got a frame that can't be reassembled yet.
6369 * Save it for later, and send SREJs to cover
6370 * the missing frames.
6371 */
6372 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006373 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006374 BT_DBG("Queued %p (queue len %d)", skb,
6375 skb_queue_len(&chan->srej_q));
6376
6377 l2cap_pass_to_tx(chan, control);
6378 l2cap_send_srej(chan, control->txseq);
6379 break;
6380 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6381 /* This frame was requested with an SREJ, but
6382 * some expected retransmitted frames are
6383 * missing. Request retransmission of missing
6384 * SREJ'd frames.
6385 */
6386 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006387 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006388 BT_DBG("Queued %p (queue len %d)", skb,
6389 skb_queue_len(&chan->srej_q));
6390
6391 l2cap_pass_to_tx(chan, control);
6392 l2cap_send_srej_list(chan, control->txseq);
6393 break;
6394 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6395 /* We've already queued this frame. Drop this copy. */
6396 l2cap_pass_to_tx(chan, control);
6397 break;
6398 case L2CAP_TXSEQ_DUPLICATE:
6399 /* Expecting a later sequence number, so this frame
6400 * was already received. Ignore it completely.
6401 */
6402 break;
6403 case L2CAP_TXSEQ_INVALID_IGNORE:
6404 break;
6405 case L2CAP_TXSEQ_INVALID:
6406 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006407 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006408 break;
6409 }
6410 break;
6411 case L2CAP_EV_RECV_RR:
6412 l2cap_pass_to_tx(chan, control);
6413 if (control->final) {
6414 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6415
6416 if (!test_and_clear_bit(CONN_REJ_ACT,
6417 &chan->conn_state)) {
6418 control->final = 0;
6419 l2cap_retransmit_all(chan, control);
6420 }
6421
6422 l2cap_ertm_send(chan);
6423 } else if (control->poll) {
6424 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6425 &chan->conn_state) &&
6426 chan->unacked_frames) {
6427 __set_retrans_timer(chan);
6428 }
6429
6430 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6431 l2cap_send_srej_tail(chan);
6432 } else {
6433 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6434 &chan->conn_state) &&
6435 chan->unacked_frames)
6436 __set_retrans_timer(chan);
6437
6438 l2cap_send_ack(chan);
6439 }
6440 break;
6441 case L2CAP_EV_RECV_RNR:
6442 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6443 l2cap_pass_to_tx(chan, control);
6444 if (control->poll) {
6445 l2cap_send_srej_tail(chan);
6446 } else {
6447 struct l2cap_ctrl rr_control;
6448 memset(&rr_control, 0, sizeof(rr_control));
6449 rr_control.sframe = 1;
6450 rr_control.super = L2CAP_SUPER_RR;
6451 rr_control.reqseq = chan->buffer_seq;
6452 l2cap_send_sframe(chan, &rr_control);
6453 }
6454
6455 break;
6456 case L2CAP_EV_RECV_REJ:
6457 l2cap_handle_rej(chan, control);
6458 break;
6459 case L2CAP_EV_RECV_SREJ:
6460 l2cap_handle_srej(chan, control);
6461 break;
6462 }
6463
6464 if (skb && !skb_in_use) {
6465 BT_DBG("Freeing %p", skb);
6466 kfree_skb(skb);
6467 }
6468
6469 return err;
6470}
6471
Mat Martineau32b32732012-10-23 15:24:11 -07006472static int l2cap_finish_move(struct l2cap_chan *chan)
6473{
6474 BT_DBG("chan %p", chan);
6475
6476 chan->rx_state = L2CAP_RX_STATE_RECV;
6477
6478 if (chan->hs_hcon)
6479 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6480 else
6481 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6482
6483 return l2cap_resegment(chan);
6484}
6485
6486static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6487 struct l2cap_ctrl *control,
6488 struct sk_buff *skb, u8 event)
6489{
6490 int err;
6491
6492 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6493 event);
6494
6495 if (!control->poll)
6496 return -EPROTO;
6497
6498 l2cap_process_reqseq(chan, control->reqseq);
6499
6500 if (!skb_queue_empty(&chan->tx_q))
6501 chan->tx_send_head = skb_peek(&chan->tx_q);
6502 else
6503 chan->tx_send_head = NULL;
6504
6505 /* Rewind next_tx_seq to the point expected
6506 * by the receiver.
6507 */
6508 chan->next_tx_seq = control->reqseq;
6509 chan->unacked_frames = 0;
6510
6511 err = l2cap_finish_move(chan);
6512 if (err)
6513 return err;
6514
6515 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6516 l2cap_send_i_or_rr_or_rnr(chan);
6517
6518 if (event == L2CAP_EV_RECV_IFRAME)
6519 return -EPROTO;
6520
6521 return l2cap_rx_state_recv(chan, control, NULL, event);
6522}
6523
6524static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6525 struct l2cap_ctrl *control,
6526 struct sk_buff *skb, u8 event)
6527{
6528 int err;
6529
6530 if (!control->final)
6531 return -EPROTO;
6532
6533 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6534
6535 chan->rx_state = L2CAP_RX_STATE_RECV;
6536 l2cap_process_reqseq(chan, control->reqseq);
6537
6538 if (!skb_queue_empty(&chan->tx_q))
6539 chan->tx_send_head = skb_peek(&chan->tx_q);
6540 else
6541 chan->tx_send_head = NULL;
6542
6543 /* Rewind next_tx_seq to the point expected
6544 * by the receiver.
6545 */
6546 chan->next_tx_seq = control->reqseq;
6547 chan->unacked_frames = 0;
6548
6549 if (chan->hs_hcon)
6550 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6551 else
6552 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6553
6554 err = l2cap_resegment(chan);
6555
6556 if (!err)
6557 err = l2cap_rx_state_recv(chan, control, skb, event);
6558
6559 return err;
6560}
6561
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006562static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6563{
6564 /* Make sure reqseq is for a packet that has been sent but not acked */
6565 u16 unacked;
6566
6567 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6568 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6569}
6570
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006571static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6572 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006573{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006574 int err = 0;
6575
6576 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6577 control, skb, event, chan->rx_state);
6578
6579 if (__valid_reqseq(chan, control->reqseq)) {
6580 switch (chan->rx_state) {
6581 case L2CAP_RX_STATE_RECV:
6582 err = l2cap_rx_state_recv(chan, control, skb, event);
6583 break;
6584 case L2CAP_RX_STATE_SREJ_SENT:
6585 err = l2cap_rx_state_srej_sent(chan, control, skb,
6586 event);
6587 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006588 case L2CAP_RX_STATE_WAIT_P:
6589 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6590 break;
6591 case L2CAP_RX_STATE_WAIT_F:
6592 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6593 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006594 default:
6595 /* shut it down */
6596 break;
6597 }
6598 } else {
6599 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6600 control->reqseq, chan->next_tx_seq,
6601 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006602 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006603 }
6604
6605 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006606}
6607
6608static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6609 struct sk_buff *skb)
6610{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006611 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6612 chan->rx_state);
6613
6614 if (l2cap_classify_txseq(chan, control->txseq) ==
6615 L2CAP_TXSEQ_EXPECTED) {
6616 l2cap_pass_to_tx(chan, control);
6617
6618 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6619 __next_seq(chan, chan->buffer_seq));
6620
6621 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6622
6623 l2cap_reassemble_sdu(chan, skb, control);
6624 } else {
6625 if (chan->sdu) {
6626 kfree_skb(chan->sdu);
6627 chan->sdu = NULL;
6628 }
6629 chan->sdu_last_frag = NULL;
6630 chan->sdu_len = 0;
6631
6632 if (skb) {
6633 BT_DBG("Freeing %p", skb);
6634 kfree_skb(skb);
6635 }
6636 }
6637
6638 chan->last_acked_seq = control->txseq;
6639 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6640
Prasanna Karthik9a544212015-11-19 12:05:35 +00006641 return 0;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006642}
6643
6644static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6645{
Johan Hedberga4368ff2015-03-30 23:21:01 +03006646 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006647 u16 len;
6648 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006649
Mat Martineaub76bbd62012-04-11 10:48:43 -07006650 __unpack_control(chan, skb);
6651
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006652 len = skb->len;
6653
6654 /*
6655 * We can just drop the corrupted I-frame here.
6656 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006657 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006658 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006659 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006660 goto drop;
6661
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006662 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006663 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006664
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006665 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006666 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006667
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006668 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006669 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006670 goto drop;
6671 }
6672
Daniel Borkmanndbb50882016-07-27 11:40:14 -07006673 if ((chan->mode == L2CAP_MODE_ERTM ||
6674 chan->mode == L2CAP_MODE_STREAMING) && sk_filter(chan->data, skb))
6675 goto drop;
6676
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006677 if (!control->sframe) {
6678 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006679
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006680 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6681 control->sar, control->reqseq, control->final,
6682 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006683
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006684 /* Validate F-bit - F=0 always valid, F=1 only
6685 * valid in TX WAIT_F
6686 */
6687 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006688 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006689
6690 if (chan->mode != L2CAP_MODE_STREAMING) {
6691 event = L2CAP_EV_RECV_IFRAME;
6692 err = l2cap_rx(chan, control, skb, event);
6693 } else {
6694 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006695 }
6696
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006697 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006698 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006699 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006700 const u8 rx_func_to_event[4] = {
6701 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6702 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6703 };
6704
6705 /* Only I-frames are expected in streaming mode */
6706 if (chan->mode == L2CAP_MODE_STREAMING)
6707 goto drop;
6708
6709 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6710 control->reqseq, control->final, control->poll,
6711 control->super);
6712
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006713 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006714 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006715 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006716 goto drop;
6717 }
6718
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006719 /* Validate F and P bits */
6720 if (control->final && (control->poll ||
6721 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6722 goto drop;
6723
6724 event = rx_func_to_event[control->super];
6725 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006726 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006727 }
6728
6729 return 0;
6730
6731drop:
6732 kfree_skb(skb);
6733 return 0;
6734}
6735
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006736static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6737{
6738 struct l2cap_conn *conn = chan->conn;
6739 struct l2cap_le_credits pkt;
6740 u16 return_credits;
6741
6742 /* We return more credits to the sender only after the amount of
6743 * credits falls below half of the initial amount.
6744 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006745 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006746 return;
6747
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006748 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006749
6750 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6751
6752 chan->rx_credits += return_credits;
6753
6754 pkt.cid = cpu_to_le16(chan->scid);
6755 pkt.credits = cpu_to_le16(return_credits);
6756
6757 chan->ident = l2cap_get_ident(conn);
6758
6759 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6760}
6761
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006762static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6763{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006764 int err;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006765
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006766 if (!chan->rx_credits) {
6767 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006768 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006769 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006770 }
6771
6772 if (chan->imtu < skb->len) {
6773 BT_ERR("Too big LE L2CAP PDU");
6774 return -ENOBUFS;
6775 }
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006776
6777 chan->rx_credits--;
6778 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6779
6780 l2cap_chan_le_send_credits(chan);
6781
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006782 err = 0;
6783
6784 if (!chan->sdu) {
6785 u16 sdu_len;
6786
6787 sdu_len = get_unaligned_le16(skb->data);
6788 skb_pull(skb, L2CAP_SDULEN_SIZE);
6789
6790 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6791 sdu_len, skb->len, chan->imtu);
6792
6793 if (sdu_len > chan->imtu) {
6794 BT_ERR("Too big LE L2CAP SDU length received");
6795 err = -EMSGSIZE;
6796 goto failed;
6797 }
6798
6799 if (skb->len > sdu_len) {
6800 BT_ERR("Too much LE L2CAP data received");
6801 err = -EINVAL;
6802 goto failed;
6803 }
6804
6805 if (skb->len == sdu_len)
6806 return chan->ops->recv(chan, skb);
6807
6808 chan->sdu = skb;
6809 chan->sdu_len = sdu_len;
6810 chan->sdu_last_frag = skb;
6811
Luiz Augusto von Dentz203ec3c2018-09-04 13:39:22 +03006812 /* Detect if remote is not able to use the selected MPS */
6813 if (skb->len + L2CAP_SDULEN_SIZE < chan->mps) {
6814 u16 mps_len = skb->len + L2CAP_SDULEN_SIZE;
6815
6816 /* Adjust the number of credits */
6817 BT_DBG("chan->mps %u -> %u", chan->mps, mps_len);
6818 chan->mps = mps_len;
6819 l2cap_chan_le_send_credits(chan);
6820 }
6821
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006822 return 0;
6823 }
6824
6825 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6826 chan->sdu->len, skb->len, chan->sdu_len);
6827
6828 if (chan->sdu->len + skb->len > chan->sdu_len) {
6829 BT_ERR("Too much LE L2CAP data received");
6830 err = -EINVAL;
6831 goto failed;
6832 }
6833
6834 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6835 skb = NULL;
6836
6837 if (chan->sdu->len == chan->sdu_len) {
6838 err = chan->ops->recv(chan, chan->sdu);
6839 if (!err) {
6840 chan->sdu = NULL;
6841 chan->sdu_last_frag = NULL;
6842 chan->sdu_len = 0;
6843 }
6844 }
6845
6846failed:
6847 if (err) {
6848 kfree_skb(skb);
6849 kfree_skb(chan->sdu);
6850 chan->sdu = NULL;
6851 chan->sdu_last_frag = NULL;
6852 chan->sdu_len = 0;
6853 }
6854
6855 /* We can't return an error here since we took care of the skb
6856 * freeing internally. An error return would cause the caller to
6857 * do a double-free of the skb.
6858 */
6859 return 0;
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006860}
6861
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006862static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6863 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006864{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006865 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006866
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006867 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006868 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006869 if (cid == L2CAP_CID_A2MP) {
6870 chan = a2mp_channel_create(conn, skb);
6871 if (!chan) {
6872 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006873 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006874 }
6875
6876 l2cap_chan_lock(chan);
6877 } else {
6878 BT_DBG("unknown cid 0x%4.4x", cid);
6879 /* Drop packet and return */
6880 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006881 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006882 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006883 }
6884
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006885 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006886
Johan Hedberg315917e2015-02-16 11:42:11 +02006887 /* If we receive data on a fixed channel before the info req/rsp
6888 * procdure is done simply assume that the channel is supported
6889 * and mark it as ready.
6890 */
6891 if (chan->chan_type == L2CAP_CHAN_FIXED)
6892 l2cap_chan_ready(chan);
6893
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03006894 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006895 goto drop;
6896
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006897 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006898 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc892013-12-05 09:45:01 +02006899 if (l2cap_le_data_rcv(chan, skb) < 0)
6900 goto drop;
6901
6902 goto done;
6903
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006904 case L2CAP_MODE_BASIC:
6905 /* If socket recv buffers overflows we drop data here
6906 * which is *bad* because L2CAP has to be reliable.
6907 * But we don't have any other choice. L2CAP doesn't
6908 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909
Szymon Janc2c96e032014-02-18 20:48:34 +01006910 if (chan->imtu < skb->len) {
6911 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006912 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006913 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914
Gustavo Padovan80b98022012-05-27 22:27:51 -03006915 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006916 goto done;
6917 break;
6918
6919 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006920 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006921 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006922 goto done;
6923
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006924 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006925 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006926 break;
6927 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006928
6929drop:
6930 kfree_skb(skb);
6931
6932done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006933 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006934}
6935
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006936static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6937 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006938{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006939 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006940 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006941
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006942 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006943 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006944
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006945 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6946 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006947 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006948 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006949
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006950 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006951
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03006952 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006953 goto drop;
6954
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006955 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006956 goto drop;
6957
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006958 /* Store remote BD_ADDR and PSM for msg_name */
Johan Hedberga4368ff2015-03-30 23:21:01 +03006959 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6960 bt_cb(skb)->l2cap.psm = psm;
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006961
Johan Hedberga24cce12014-08-07 22:56:42 +03006962 if (!chan->ops->recv(chan, skb)) {
6963 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006964 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006965 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006966
6967drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006968 l2cap_chan_put(chan);
6969free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006970 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006971}
6972
6973static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6974{
6975 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006976 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006977 u16 cid, len;
6978 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006979
Johan Hedberg61a939c2014-01-17 20:45:11 +02006980 if (hcon->state != BT_CONNECTED) {
6981 BT_DBG("queueing pending rx skb");
6982 skb_queue_tail(&conn->pending_rx, skb);
6983 return;
6984 }
6985
Linus Torvalds1da177e2005-04-16 15:20:36 -07006986 skb_pull(skb, L2CAP_HDR_SIZE);
6987 cid = __le16_to_cpu(lh->cid);
6988 len = __le16_to_cpu(lh->len);
6989
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006990 if (len != skb->len) {
6991 kfree_skb(skb);
6992 return;
6993 }
6994
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006995 /* Since we can't actively block incoming LE connections we must
6996 * at least ensure that we ignore incoming data from them.
6997 */
6998 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006999 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
Johan Hedberga250e042015-01-15 13:06:44 +02007000 bdaddr_dst_type(hcon))) {
Johan Hedberge4931502014-07-02 09:36:21 +03007001 kfree_skb(skb);
7002 return;
7003 }
7004
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7006
7007 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007008 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007009 l2cap_sig_channel(conn, skb);
7010 break;
7011
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007012 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02007013 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03007014 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007015 l2cap_conless_channel(conn, psm, skb);
7016 break;
7017
Marcel Holtmanna2877622013-10-02 23:46:54 -07007018 case L2CAP_CID_LE_SIGNALING:
7019 l2cap_le_sig_channel(conn, skb);
7020 break;
7021
Linus Torvalds1da177e2005-04-16 15:20:36 -07007022 default:
7023 l2cap_data_channel(conn, cid, skb);
7024 break;
7025 }
7026}
7027
Johan Hedberg61a939c2014-01-17 20:45:11 +02007028static void process_pending_rx(struct work_struct *work)
7029{
7030 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
7031 pending_rx_work);
7032 struct sk_buff *skb;
7033
7034 BT_DBG("");
7035
7036 while ((skb = skb_dequeue(&conn->pending_rx)))
7037 l2cap_recv_frame(conn, skb);
7038}
7039
Johan Hedberg162b49e2014-01-17 20:45:10 +02007040static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
7041{
7042 struct l2cap_conn *conn = hcon->l2cap_data;
7043 struct hci_chan *hchan;
7044
7045 if (conn)
7046 return conn;
7047
7048 hchan = hci_chan_create(hcon);
7049 if (!hchan)
7050 return NULL;
7051
Johan Hedberg27f70f32014-07-21 10:50:06 +03007052 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007053 if (!conn) {
7054 hci_chan_del(hchan);
7055 return NULL;
7056 }
7057
7058 kref_init(&conn->ref);
7059 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03007060 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007061 conn->hchan = hchan;
7062
7063 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
7064
7065 switch (hcon->type) {
7066 case LE_LINK:
7067 if (hcon->hdev->le_mtu) {
7068 conn->mtu = hcon->hdev->le_mtu;
7069 break;
7070 }
7071 /* fall through */
7072 default:
7073 conn->mtu = hcon->hdev->acl_mtu;
7074 break;
7075 }
7076
7077 conn->feat_mask = 0;
7078
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02007079 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
7080
7081 if (hcon->type == ACL_LINK &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07007082 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02007083 conn->local_fixed_chan |= L2CAP_FC_A2MP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007084
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07007085 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
Marcel Holtmannf9be9e82014-12-06 00:35:45 +01007086 (bredr_sc_enabled(hcon->hdev) ||
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07007087 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
Johan Hedbergb5ae3442014-08-14 12:34:26 +03007088 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7089
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02007090 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007091 mutex_init(&conn->chan_lock);
7092
7093 INIT_LIST_HEAD(&conn->chan_l);
7094 INIT_LIST_HEAD(&conn->users);
7095
Johan Hedberg276d8072014-08-11 22:06:41 +03007096 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007097
Johan Hedberg61a939c2014-01-17 20:45:11 +02007098 skb_queue_head_init(&conn->pending_rx);
7099 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03007100 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02007101
Johan Hedberg162b49e2014-01-17 20:45:10 +02007102 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7103
7104 return conn;
7105}
7106
7107static bool is_valid_psm(u16 psm, u8 dst_type) {
7108 if (!psm)
7109 return false;
7110
7111 if (bdaddr_type_is_le(dst_type))
7112 return (psm <= 0x00ff);
7113
7114 /* PSM must be odd and lsb of upper byte must be 0 */
7115 return ((psm & 0x0101) == 0x0001);
7116}
7117
7118int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7119 bdaddr_t *dst, u8 dst_type)
7120{
7121 struct l2cap_conn *conn;
7122 struct hci_conn *hcon;
7123 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007124 int err;
7125
7126 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7127 dst_type, __le16_to_cpu(psm));
7128
Johan Hedberg39385cb2016-11-12 17:03:07 +02007129 hdev = hci_get_route(dst, &chan->src, chan->src_type);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007130 if (!hdev)
7131 return -EHOSTUNREACH;
7132
7133 hci_dev_lock(hdev);
7134
Johan Hedberg162b49e2014-01-17 20:45:10 +02007135 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7136 chan->chan_type != L2CAP_CHAN_RAW) {
7137 err = -EINVAL;
7138 goto done;
7139 }
7140
Johan Hedberg21626e62014-01-24 10:35:41 +02007141 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7142 err = -EINVAL;
7143 goto done;
7144 }
7145
7146 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02007147 err = -EINVAL;
7148 goto done;
7149 }
7150
7151 switch (chan->mode) {
7152 case L2CAP_MODE_BASIC:
7153 break;
7154 case L2CAP_MODE_LE_FLOWCTL:
7155 l2cap_le_flowctl_init(chan);
7156 break;
7157 case L2CAP_MODE_ERTM:
7158 case L2CAP_MODE_STREAMING:
7159 if (!disable_ertm)
7160 break;
7161 /* fall through */
7162 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007163 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007164 goto done;
7165 }
7166
7167 switch (chan->state) {
7168 case BT_CONNECT:
7169 case BT_CONNECT2:
7170 case BT_CONFIG:
7171 /* Already connecting */
7172 err = 0;
7173 goto done;
7174
7175 case BT_CONNECTED:
7176 /* Already connected */
7177 err = -EISCONN;
7178 goto done;
7179
7180 case BT_OPEN:
7181 case BT_BOUND:
7182 /* Can connect */
7183 break;
7184
7185 default:
7186 err = -EBADFD;
7187 goto done;
7188 }
7189
7190 /* Set destination address and psm */
7191 bacpy(&chan->dst, dst);
7192 chan->dst_type = dst_type;
7193
7194 chan->psm = psm;
7195 chan->dcid = cid;
7196
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007197 if (bdaddr_type_is_le(dst_type)) {
7198 /* Convert from L2CAP channel address type to HCI address type
7199 */
7200 if (dst_type == BDADDR_LE_PUBLIC)
7201 dst_type = ADDR_LE_DEV_PUBLIC;
7202 else
7203 dst_type = ADDR_LE_DEV_RANDOM;
7204
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07007205 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
Johan Hedberg0ad06aa2015-11-11 14:44:57 +02007206 hcon = hci_connect_le(hdev, dst, dst_type,
7207 chan->sec_level,
7208 HCI_LE_CONN_TIMEOUT,
Szymon Jancb0a2a2b2018-04-03 13:40:06 +02007209 HCI_ROLE_SLAVE, NULL);
Johan Hedberge804d252014-07-16 11:42:28 +03007210 else
Johan Hedberg0ad06aa2015-11-11 14:44:57 +02007211 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7212 chan->sec_level,
7213 HCI_LE_CONN_TIMEOUT);
Johan Hedbergcdd62752014-07-07 15:02:28 +03007214
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007215 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007216 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007217 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007218 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007219
7220 if (IS_ERR(hcon)) {
7221 err = PTR_ERR(hcon);
7222 goto done;
7223 }
7224
7225 conn = l2cap_conn_add(hcon);
7226 if (!conn) {
7227 hci_conn_drop(hcon);
7228 err = -ENOMEM;
7229 goto done;
7230 }
7231
Johan Hedberg02e246a2014-10-02 10:16:22 +03007232 mutex_lock(&conn->chan_lock);
7233 l2cap_chan_lock(chan);
7234
Johan Hedberg162b49e2014-01-17 20:45:10 +02007235 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7236 hci_conn_drop(hcon);
7237 err = -EBUSY;
Johan Hedberg02e246a2014-10-02 10:16:22 +03007238 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007239 }
7240
7241 /* Update source addr of the socket */
7242 bacpy(&chan->src, &hcon->src);
Johan Hedberga250e042015-01-15 13:06:44 +02007243 chan->src_type = bdaddr_src_type(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007244
Johan Hedberg02e246a2014-10-02 10:16:22 +03007245 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007246
7247 /* l2cap_chan_add takes its own ref so we can drop this one */
7248 hci_conn_drop(hcon);
7249
7250 l2cap_state_change(chan, BT_CONNECT);
7251 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7252
Johan Hedberg61202e42014-01-28 15:16:48 -08007253 /* Release chan->sport so that it can be reused by other
7254 * sockets (as it's only used for listening sockets).
7255 */
7256 write_lock(&chan_list_lock);
7257 chan->sport = 0;
7258 write_unlock(&chan_list_lock);
7259
Johan Hedberg162b49e2014-01-17 20:45:10 +02007260 if (hcon->state == BT_CONNECTED) {
7261 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7262 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007263 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007264 l2cap_state_change(chan, BT_CONNECTED);
7265 } else
7266 l2cap_do_start(chan);
7267 }
7268
7269 err = 0;
7270
Johan Hedberg02e246a2014-10-02 10:16:22 +03007271chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007272 l2cap_chan_unlock(chan);
Johan Hedberg02e246a2014-10-02 10:16:22 +03007273 mutex_unlock(&conn->chan_lock);
7274done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007275 hci_dev_unlock(hdev);
7276 hci_dev_put(hdev);
7277 return err;
7278}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007279EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007280
Linus Torvalds1da177e2005-04-16 15:20:36 -07007281/* ---- L2CAP interface with lower layer (HCI) ---- */
7282
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007283int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007284{
7285 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007286 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007287
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007288 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007289
7290 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007291 read_lock(&chan_list_lock);
7292 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007293 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007294 continue;
7295
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007296 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007297 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007298 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007299 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007301 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007302 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007303 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007304 lm2 |= HCI_LM_MASTER;
7305 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007306 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007307 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007308
7309 return exact ? lm1 : lm2;
7310}
7311
Johan Hedberge760ec12014-08-07 22:56:47 +03007312/* Find the next fixed channel in BT_LISTEN state, continue iteration
7313 * from an existing channel in the list or from the beginning of the
7314 * global list (by passing NULL as first parameter).
7315 */
7316static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg327a7192015-01-15 13:06:45 +02007317 struct hci_conn *hcon)
Johan Hedberge760ec12014-08-07 22:56:47 +03007318{
Johan Hedberg327a7192015-01-15 13:06:45 +02007319 u8 src_type = bdaddr_src_type(hcon);
7320
Johan Hedberge760ec12014-08-07 22:56:47 +03007321 read_lock(&chan_list_lock);
7322
7323 if (c)
7324 c = list_next_entry(c, global_l);
7325 else
7326 c = list_entry(chan_list.next, typeof(*c), global_l);
7327
7328 list_for_each_entry_from(c, &chan_list, global_l) {
7329 if (c->chan_type != L2CAP_CHAN_FIXED)
7330 continue;
7331 if (c->state != BT_LISTEN)
7332 continue;
Johan Hedberg327a7192015-01-15 13:06:45 +02007333 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
Johan Hedberge760ec12014-08-07 22:56:47 +03007334 continue;
Johan Hedberg327a7192015-01-15 13:06:45 +02007335 if (src_type != c->src_type)
Johan Hedberg54a1b622014-08-07 22:56:48 +03007336 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007337
7338 l2cap_chan_hold(c);
7339 read_unlock(&chan_list_lock);
7340 return c;
7341 }
7342
7343 read_unlock(&chan_list_lock);
7344
7345 return NULL;
7346}
7347
Johan Hedberg539c4962015-02-18 14:53:57 +02007348static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007349{
Johan Hedberge760ec12014-08-07 22:56:47 +03007350 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007351 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007352 struct l2cap_chan *pchan;
7353 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007354
Johan Hedberg539c4962015-02-18 14:53:57 +02007355 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7356 return;
7357
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007358 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007359
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007360 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007361 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007362 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007363 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007364
7365 conn = l2cap_conn_add(hcon);
7366 if (!conn)
7367 return;
7368
Johan Hedberga250e042015-01-15 13:06:44 +02007369 dst_type = bdaddr_dst_type(hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007370
7371 /* If device is blocked, do not create channels for it */
7372 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7373 return;
7374
7375 /* Find fixed channels and notify them of the new connection. We
7376 * use multiple individual lookups, continuing each time where
7377 * we left off, because the list lock would prevent calling the
7378 * potentially sleeping l2cap_chan_lock() function.
7379 */
Johan Hedberg327a7192015-01-15 13:06:45 +02007380 pchan = l2cap_global_fixed_chan(NULL, hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007381 while (pchan) {
7382 struct l2cap_chan *chan, *next;
7383
7384 /* Client fixed channels should override server ones */
7385 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7386 goto next;
7387
7388 l2cap_chan_lock(pchan);
7389 chan = pchan->ops->new_connection(pchan);
7390 if (chan) {
7391 bacpy(&chan->src, &hcon->src);
7392 bacpy(&chan->dst, &hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02007393 chan->src_type = bdaddr_src_type(hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007394 chan->dst_type = dst_type;
7395
7396 __l2cap_chan_add(conn, chan);
7397 }
7398
7399 l2cap_chan_unlock(pchan);
7400next:
Johan Hedberg327a7192015-01-15 13:06:45 +02007401 next = l2cap_global_fixed_chan(pchan, hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007402 l2cap_chan_put(pchan);
7403 pchan = next;
7404 }
7405
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007406 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407}
7408
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007409int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007410{
7411 struct l2cap_conn *conn = hcon->l2cap_data;
7412
7413 BT_DBG("hcon %p", hcon);
7414
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007415 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007416 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007417 return conn->disc_reason;
7418}
7419
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007420static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007421{
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007422 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7423 return;
7424
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425 BT_DBG("hcon %p reason %d", hcon, reason);
7426
Joe Perchese1750722011-06-29 18:18:29 -07007427 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428}
7429
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007430static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007431{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007432 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007433 return;
7434
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007435 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007436 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007437 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007438 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7439 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007440 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007441 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007442 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007443 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007444 }
7445}
7446
Johan Hedberg354fe802015-02-18 14:53:56 +02007447static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007449 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007450 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451
Marcel Holtmann01394182006-07-03 10:02:46 +02007452 if (!conn)
Johan Hedberg354fe802015-02-18 14:53:56 +02007453 return;
Marcel Holtmann01394182006-07-03 10:02:46 +02007454
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007455 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007457 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007458
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007459 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007460 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007461
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007462 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7463 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007464
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007465 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007466 l2cap_chan_unlock(chan);
7467 continue;
7468 }
7469
Johan Hedberg191eb392014-08-07 22:56:45 +03007470 if (!status && encrypt)
7471 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007472
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007473 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007474 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007475 continue;
7476 }
7477
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007478 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007479 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007480 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007481 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007482 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007483 continue;
7484 }
7485
Gustavo F. Padovan89bc5002011-06-03 00:19:47 -03007486 if (chan->state == BT_CONNECT) {
Marcel Holtmann68d1e282019-06-22 15:47:01 +02007487 if (!status && l2cap_check_enc_key_size(hcon))
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007488 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007489 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007490 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergfa37c1a2014-11-13 10:55:17 +02007491 } else if (chan->state == BT_CONNECT2 &&
7492 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007493 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007494 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007495
Marcel Holtmann68d1e282019-06-22 15:47:01 +02007496 if (!status && l2cap_check_enc_key_size(hcon)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007497 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007498 res = L2CAP_CR_PEND;
7499 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007500 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007501 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007502 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007503 res = L2CAP_CR_SUCCESS;
7504 stat = L2CAP_CS_NO_INFO;
7505 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007506 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007507 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007508 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007509 res = L2CAP_CR_SEC_BLOCK;
7510 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007511 }
7512
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007513 rsp.scid = cpu_to_le16(chan->dcid);
7514 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007515 rsp.result = cpu_to_le16(res);
7516 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007517 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007518 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007519
7520 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7521 res == L2CAP_CR_SUCCESS) {
7522 char buf[128];
7523 set_bit(CONF_REQ_SENT, &chan->conf_state);
7524 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7525 L2CAP_CONF_REQ,
Ben Seri6300c8b2017-09-09 23:15:59 +02007526 l2cap_build_conf_req(chan, buf, sizeof(buf)),
Mat Martineau2d369352012-05-23 14:59:30 -07007527 buf);
7528 chan->num_conf_req++;
7529 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007530 }
7531
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007532 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533 }
7534
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007535 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536}
7537
Arron Wang9b4c3332015-06-09 17:47:22 +08007538void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007539{
7540 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007541 struct l2cap_hdr *hdr;
7542 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007544 /* For AMP controller do not create l2cap conn */
Marcel Holtmannca8bee52016-07-05 14:30:14 +02007545 if (!conn && hcon->hdev->dev_type != HCI_PRIMARY)
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007546 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007548 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007549 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007550
7551 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552 goto drop;
7553
7554 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7555
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007556 switch (flags) {
7557 case ACL_START:
7558 case ACL_START_NO_FLUSH:
7559 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560 if (conn->rx_len) {
7561 BT_ERR("Unexpected start frame (len %d)", skb->len);
7562 kfree_skb(conn->rx_skb);
7563 conn->rx_skb = NULL;
7564 conn->rx_len = 0;
7565 l2cap_conn_unreliable(conn, ECOMM);
7566 }
7567
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007568 /* Start fragment always begin with Basic L2CAP header */
7569 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570 BT_ERR("Frame is too short (len %d)", skb->len);
7571 l2cap_conn_unreliable(conn, ECOMM);
7572 goto drop;
7573 }
7574
7575 hdr = (struct l2cap_hdr *) skb->data;
7576 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7577
7578 if (len == skb->len) {
7579 /* Complete frame received */
7580 l2cap_recv_frame(conn, skb);
Arron Wang9b4c3332015-06-09 17:47:22 +08007581 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007582 }
7583
7584 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7585
7586 if (skb->len > len) {
7587 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007588 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007589 l2cap_conn_unreliable(conn, ECOMM);
7590 goto drop;
7591 }
7592
7593 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007594 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007595 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007596 goto drop;
7597
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007598 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007599 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007600 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007601 break;
7602
7603 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007604 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7605
7606 if (!conn->rx_len) {
7607 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7608 l2cap_conn_unreliable(conn, ECOMM);
7609 goto drop;
7610 }
7611
7612 if (skb->len > conn->rx_len) {
7613 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007614 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007615 kfree_skb(conn->rx_skb);
7616 conn->rx_skb = NULL;
7617 conn->rx_len = 0;
7618 l2cap_conn_unreliable(conn, ECOMM);
7619 goto drop;
7620 }
7621
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007622 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007623 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007624 conn->rx_len -= skb->len;
7625
7626 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007627 /* Complete frame received. l2cap_recv_frame
7628 * takes ownership of the skb so set the global
7629 * rx_skb pointer to NULL first.
7630 */
7631 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007632 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007633 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007634 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007635 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007636 }
7637
7638drop:
7639 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007640}
7641
Johan Hedberg354fe802015-02-18 14:53:56 +02007642static struct hci_cb l2cap_cb = {
7643 .name = "L2CAP",
Johan Hedberg539c4962015-02-18 14:53:57 +02007644 .connect_cfm = l2cap_connect_cfm,
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007645 .disconn_cfm = l2cap_disconn_cfm,
Johan Hedberg354fe802015-02-18 14:53:56 +02007646 .security_cfm = l2cap_security_cfm,
7647};
7648
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007649static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007650{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007651 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007652
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007653 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007654
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007655 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmanneeb5a062015-01-14 13:44:21 -08007656 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7657 &c->src, c->src_type, &c->dst, c->dst_type,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007658 c->state, __le16_to_cpu(c->psm),
7659 c->scid, c->dcid, c->imtu, c->omtu,
7660 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007661 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007662
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007663 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007664
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007665 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007666}
7667
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007668static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7669{
7670 return single_open(file, l2cap_debugfs_show, inode->i_private);
7671}
7672
7673static const struct file_operations l2cap_debugfs_fops = {
7674 .open = l2cap_debugfs_open,
7675 .read = seq_read,
7676 .llseek = seq_lseek,
7677 .release = single_release,
7678};
7679
7680static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007681
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007682int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007683{
7684 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007685
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007686 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007687 if (err < 0)
7688 return err;
7689
Johan Hedberg354fe802015-02-18 14:53:56 +02007690 hci_register_cb(&l2cap_cb);
7691
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007692 if (IS_ERR_OR_NULL(bt_debugfs))
7693 return 0;
7694
7695 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7696 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007697
Samuel Ortiz40b93972014-05-14 17:53:35 +02007698 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007699 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007700 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007701 &le_default_mps);
7702
Linus Torvalds1da177e2005-04-16 15:20:36 -07007703 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007704}
7705
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007706void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007707{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007708 debugfs_remove(l2cap_debugfs);
Johan Hedberg354fe802015-02-18 14:53:56 +02007709 hci_unregister_cb(&l2cap_cb);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007710 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007711}
7712
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007713module_param(disable_ertm, bool, 0644);
7714MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");