blob: fdb7989e29976742a6617b7c54b87c7562aa5a06 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070049
Johannes Bergb5ad8b72011-06-01 08:54:45 +020050static LIST_HEAD(chan_list);
51static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020053static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
54static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010057 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030058static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010059 void *data);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -030060static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020061static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070062
Gustavo Padovand6603662012-05-21 13:58:22 -030063static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010064 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070065
Johan Hedberga250e042015-01-15 13:06:44 +020066static inline u8 bdaddr_type(u8 link_type, u8 bdaddr_type)
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067{
Johan Hedberga250e042015-01-15 13:06:44 +020068 if (link_type == LE_LINK) {
69 if (bdaddr_type == ADDR_LE_DEV_PUBLIC)
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070070 return BDADDR_LE_PUBLIC;
71 else
72 return BDADDR_LE_RANDOM;
73 }
74
75 return BDADDR_BREDR;
76}
77
Johan Hedberga250e042015-01-15 13:06:44 +020078static inline u8 bdaddr_src_type(struct hci_conn *hcon)
79{
80 return bdaddr_type(hcon->type, hcon->src_type);
81}
82
83static inline u8 bdaddr_dst_type(struct hci_conn *hcon)
84{
85 return bdaddr_type(hcon->type, hcon->dst_type);
86}
87
Marcel Holtmann01394182006-07-03 10:02:46 +020088/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030089
Gustavo Padovan2d792812012-10-06 10:07:01 +010090static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
91 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020092{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020093 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030094
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020095 list_for_each_entry(c, &conn->chan_l, list) {
96 if (c->dcid == cid)
97 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020098 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020099 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200100}
101
Gustavo Padovan2d792812012-10-06 10:07:01 +0100102static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
103 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200104{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200105 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300106
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200107 list_for_each_entry(c, &conn->chan_l, list) {
108 if (c->scid == cid)
109 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200110 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200111 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200112}
113
114/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700115 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100116static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
117 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200118{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300119 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300120
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200121 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300122 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700123 if (c)
124 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200125 mutex_unlock(&conn->chan_lock);
126
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300127 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200128}
129
Mat Martineaub1a130b2012-10-23 15:24:09 -0700130/* Find channel with given DCID.
131 * Returns locked channel.
132 */
133static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
134 u16 cid)
135{
136 struct l2cap_chan *c;
137
138 mutex_lock(&conn->chan_lock);
139 c = __l2cap_get_chan_by_dcid(conn, cid);
140 if (c)
141 l2cap_chan_lock(c);
142 mutex_unlock(&conn->chan_lock);
143
144 return c;
145}
146
Gustavo Padovan2d792812012-10-06 10:07:01 +0100147static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
148 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200149{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200150 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300151
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200152 list_for_each_entry(c, &conn->chan_l, list) {
153 if (c->ident == ident)
154 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200155 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200156 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200157}
158
Mat Martineau5b155ef2012-10-23 15:24:14 -0700159static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
160 u8 ident)
161{
162 struct l2cap_chan *c;
163
164 mutex_lock(&conn->chan_lock);
165 c = __l2cap_get_chan_by_ident(conn, ident);
166 if (c)
167 l2cap_chan_lock(c);
168 mutex_unlock(&conn->chan_lock);
169
170 return c;
171}
172
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300173static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300174{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300175 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300176
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300177 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700178 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100179 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180 }
Szymon Janc250938c2011-11-16 09:32:22 +0100181 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300182}
183
184int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
185{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 int err;
187
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200188 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300189
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300190 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300191 err = -EADDRINUSE;
192 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300193 }
194
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300195 if (psm) {
196 chan->psm = psm;
197 chan->sport = psm;
198 err = 0;
199 } else {
200 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300201
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300202 err = -EINVAL;
203 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300204 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 chan->psm = cpu_to_le16(p);
206 chan->sport = cpu_to_le16(p);
207 err = 0;
208 break;
209 }
210 }
211
212done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200213 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300214 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300215}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300216EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217
218int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
219{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200220 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300221
Johan Hedberg14824302014-08-07 22:56:50 +0300222 /* Override the defaults (which are for conn-oriented) */
223 chan->omtu = L2CAP_DEFAULT_MTU;
224 chan->chan_type = L2CAP_CHAN_FIXED;
225
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300226 chan->scid = scid;
227
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200228 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300229
230 return 0;
231}
232
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300233static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200234{
Johan Hedberge77af752013-10-08 10:31:00 +0200235 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200236
Johan Hedberge77af752013-10-08 10:31:00 +0200237 if (conn->hcon->type == LE_LINK)
238 dyn_end = L2CAP_CID_LE_DYN_END;
239 else
240 dyn_end = L2CAP_CID_DYN_END;
241
Johan Hedbergab0c1272015-11-02 14:39:16 +0200242 for (cid = L2CAP_CID_DYN_START; cid <= dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300243 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200244 return cid;
245 }
246
247 return 0;
248}
249
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200250static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300251{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200252 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100253 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200254
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300255 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300256 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300257}
258
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
260 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200261{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300262 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300263 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200264}
265
266static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
267{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300268 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200269}
270
Mat Martineau4239d162012-05-17 20:53:49 -0700271static void __set_retrans_timer(struct l2cap_chan *chan)
272{
273 if (!delayed_work_pending(&chan->monitor_timer) &&
274 chan->retrans_timeout) {
275 l2cap_set_timer(chan, &chan->retrans_timer,
276 msecs_to_jiffies(chan->retrans_timeout));
277 }
278}
279
280static void __set_monitor_timer(struct l2cap_chan *chan)
281{
282 __clear_retrans_timer(chan);
283 if (chan->monitor_timeout) {
284 l2cap_set_timer(chan, &chan->monitor_timer,
285 msecs_to_jiffies(chan->monitor_timeout));
286 }
287}
288
Mat Martineau608bcc62012-05-17 20:53:32 -0700289static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
290 u16 seq)
291{
292 struct sk_buff *skb;
293
294 skb_queue_walk(head, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +0300295 if (bt_cb(skb)->l2cap.txseq == seq)
Mat Martineau608bcc62012-05-17 20:53:32 -0700296 return skb;
297 }
298
299 return NULL;
300}
301
Mat Martineau3c588192012-04-11 10:48:42 -0700302/* ---- L2CAP sequence number lists ---- */
303
304/* For ERTM, ordered lists of sequence numbers must be tracked for
305 * SREJ requests that are received and for frames that are to be
306 * retransmitted. These seq_list functions implement a singly-linked
307 * list in an array, where membership in the list can also be checked
308 * in constant time. Items can also be added to the tail of the list
309 * and removed from the head in constant time, without further memory
310 * allocs or frees.
311 */
312
313static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
314{
315 size_t alloc_size, i;
316
317 /* Allocated size is a power of 2 to map sequence numbers
318 * (which may be up to 14 bits) in to a smaller array that is
319 * sized for the negotiated ERTM transmit windows.
320 */
321 alloc_size = roundup_pow_of_two(size);
322
323 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
324 if (!seq_list->list)
325 return -ENOMEM;
326
327 seq_list->mask = alloc_size - 1;
328 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
329 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
330 for (i = 0; i < alloc_size; i++)
331 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
332
333 return 0;
334}
335
336static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
337{
338 kfree(seq_list->list);
339}
340
341static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
342 u16 seq)
343{
344 /* Constant-time check for list membership */
345 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
346}
347
Mat Martineau3c588192012-04-11 10:48:42 -0700348static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
349{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200350 u16 seq = seq_list->head;
351 u16 mask = seq_list->mask;
352
353 seq_list->head = seq_list->list[seq & mask];
354 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
355
356 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
357 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
358 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
359 }
360
361 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700362}
363
364static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
365{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300366 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700367
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300368 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
369 return;
370
371 for (i = 0; i <= seq_list->mask; i++)
372 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
373
374 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
375 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700376}
377
378static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
379{
380 u16 mask = seq_list->mask;
381
382 /* All appends happen in constant time */
383
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300384 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
385 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700386
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300387 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
388 seq_list->head = seq;
389 else
390 seq_list->list[seq_list->tail & mask] = seq;
391
392 seq_list->tail = seq;
393 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700394}
395
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300396static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300397{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300398 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100399 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200400 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300401 int reason;
402
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200403 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300404
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200405 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200406 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300407
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300408 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300409 reason = ECONNREFUSED;
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300410 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100411 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300412 reason = ECONNREFUSED;
413 else
414 reason = ETIMEDOUT;
415
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300416 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300417
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200418 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300419
Gustavo Padovan80b98022012-05-27 22:27:51 -0300420 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200421 mutex_unlock(&conn->chan_lock);
422
Ulisses Furquim371fd832011-12-21 20:02:36 -0200423 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300424}
425
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300426struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200427{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300428 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200429
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300430 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
431 if (!chan)
432 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200433
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200434 mutex_init(&chan->lock);
435
Johan Hedbergff714112014-11-13 09:46:04 +0200436 /* Set default lock nesting level */
437 atomic_set(&chan->nesting, L2CAP_NESTING_NORMAL);
438
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200439 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300440 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200441 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300442
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300443 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300444
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300445 chan->state = BT_OPEN;
446
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530447 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300448
Mat Martineau28270112012-05-17 21:14:09 -0700449 /* This flag is cleared in l2cap_chan_ready() */
450 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
451
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300452 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100453
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300454 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200455}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300456EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200457
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530458static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300459{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530460 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
461
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530462 BT_DBG("chan %p", chan);
463
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200464 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300465 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200466 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300467
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530468 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300469}
470
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530471void l2cap_chan_hold(struct l2cap_chan *c)
472{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530473 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530474
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530475 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530476}
477
478void l2cap_chan_put(struct l2cap_chan *c)
479{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530480 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530481
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530482 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530483}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300484EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530485
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300486void l2cap_chan_set_defaults(struct l2cap_chan *chan)
487{
488 chan->fcs = L2CAP_FCS_CRC16;
489 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
490 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
491 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300492 chan->remote_max_tx = chan->max_tx;
493 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700494 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300495 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300496 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
497 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
498 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
499 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300500
501 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
502}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300503EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300504
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200505static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300506{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200507 chan->sdu = NULL;
508 chan->sdu_last_frag = NULL;
509 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300510 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200511 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800512 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200513
514 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300515}
516
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300517void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200518{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300519 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200520 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200521
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200522 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100523
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300524 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200526 switch (chan->chan_type) {
527 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200528 /* Alloc CID for connection-oriented socket */
529 chan->scid = l2cap_alloc_cid(conn);
530 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300531 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200532 break;
533
534 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200535 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300536 chan->scid = L2CAP_CID_CONN_LESS;
537 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300538 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200539 break;
540
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200541 case L2CAP_CHAN_FIXED:
542 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300543 break;
544
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200545 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300547 chan->scid = L2CAP_CID_SIGNALING;
548 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300549 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200550 }
551
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300552 chan->local_id = L2CAP_BESTEFFORT_ID;
553 chan->local_stype = L2CAP_SERV_BESTEFFORT;
554 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
555 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
556 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300557 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300558
Ulisses Furquim371fd832011-12-21 20:02:36 -0200559 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300560
Johan Hedbergc16900c2014-08-15 21:17:06 +0300561 /* Only keep a reference for fixed channels if they requested it */
562 if (chan->chan_type != L2CAP_CHAN_FIXED ||
563 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
564 hci_conn_hold(conn->hcon);
Johan Hedberg5ee98912013-04-29 19:35:43 +0300565
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200566 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200567}
568
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300569void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200570{
571 mutex_lock(&conn->chan_lock);
572 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200573 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200574}
575
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300576void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200577{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300578 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200579
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300580 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200581
Johan Hedberg49d11742014-11-13 14:37:50 +0200582 BT_DBG("chan %p, conn %p, err %d, state %s", chan, conn, err,
583 state_to_string(chan->state));
Marcel Holtmann01394182006-07-03 10:02:46 +0200584
Johan Hedberg72847ce2014-08-08 09:28:03 +0300585 chan->ops->teardown(chan, err);
586
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900587 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300588 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300589 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200590 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200591
Ulisses Furquim371fd832011-12-21 20:02:36 -0200592 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300593
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300594 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300595
Johan Hedbergc16900c2014-08-15 21:17:06 +0300596 /* Reference was only held for non-fixed channels or
597 * fixed channels that explicitly requested it using the
598 * FLAG_HOLD_HCI_CONN flag.
599 */
600 if (chan->chan_type != L2CAP_CHAN_FIXED ||
601 test_bit(FLAG_HOLD_HCI_CONN, &chan->flags))
David Herrmann76a68ba2013-04-06 20:28:37 +0200602 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300603
604 if (mgr && mgr->bredr_chan == chan)
605 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200606 }
607
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200608 if (chan->hs_hchan) {
609 struct hci_chan *hs_hchan = chan->hs_hchan;
610
611 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
612 amp_disconnect_logical_link(hs_hchan);
613 }
614
Mat Martineau28270112012-05-17 21:14:09 -0700615 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300616 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300617
Gustavo Padovanee556f62012-05-18 20:22:38 -0300618 switch(chan->mode) {
619 case L2CAP_MODE_BASIC:
620 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300621
Johan Hedberg38319712013-05-17 12:49:23 +0300622 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300623 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300624 break;
625
Gustavo Padovanee556f62012-05-18 20:22:38 -0300626 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300627 __clear_retrans_timer(chan);
628 __clear_monitor_timer(chan);
629 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300630
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300631 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300632
Mat Martineau3c588192012-04-11 10:48:42 -0700633 l2cap_seq_list_free(&chan->srej_list);
634 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300635
636 /* fall through */
637
638 case L2CAP_MODE_STREAMING:
639 skb_queue_purge(&chan->tx_q);
640 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300641 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300642
643 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200644}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300645EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200646
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300647static void l2cap_conn_update_id_addr(struct work_struct *work)
Johan Hedberg387a33e2014-02-18 21:41:33 +0200648{
Johan Hedbergf3d82d02014-09-05 22:19:50 +0300649 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
650 id_addr_update_work);
651 struct hci_conn *hcon = conn->hcon;
Johan Hedberg387a33e2014-02-18 21:41:33 +0200652 struct l2cap_chan *chan;
653
654 mutex_lock(&conn->chan_lock);
655
656 list_for_each_entry(chan, &conn->chan_l, list) {
657 l2cap_chan_lock(chan);
658 bacpy(&chan->dst, &hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +0200659 chan->dst_type = bdaddr_dst_type(hcon);
Johan Hedberg387a33e2014-02-18 21:41:33 +0200660 l2cap_chan_unlock(chan);
661 }
662
663 mutex_unlock(&conn->chan_lock);
664}
665
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300666static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
667{
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_le_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_AUTHORIZATION;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.dcid = cpu_to_le16(chan->scid);
680 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200681 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300682 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300683 rsp.result = cpu_to_le16(result);
684
685 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
686 &rsp);
687}
688
Johan Hedberg791d60f2013-05-14 22:24:44 +0300689static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
690{
691 struct l2cap_conn *conn = chan->conn;
692 struct l2cap_conn_rsp rsp;
693 u16 result;
694
695 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
696 result = L2CAP_CR_SEC_BLOCK;
697 else
698 result = L2CAP_CR_BAD_PSM;
699
700 l2cap_state_change(chan, BT_DISCONN);
701
702 rsp.scid = cpu_to_le16(chan->dcid);
703 rsp.dcid = cpu_to_le16(chan->scid);
704 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700705 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300706
707 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
708}
709
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300710void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300711{
712 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300713
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700714 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300715
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300716 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300717 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100718 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300719 break;
720
721 case BT_CONNECTED:
722 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800723 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300724 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200725 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300726 } else
727 l2cap_chan_del(chan, reason);
728 break;
729
730 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300731 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
732 if (conn->hcon->type == ACL_LINK)
733 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300734 else if (conn->hcon->type == LE_LINK)
735 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300736 }
737
738 l2cap_chan_del(chan, reason);
739 break;
740
741 case BT_CONNECT:
742 case BT_DISCONN:
743 l2cap_chan_del(chan, reason);
744 break;
745
746 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100747 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300748 break;
749 }
750}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300751EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300752
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300753static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530754{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700755 switch (chan->chan_type) {
756 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300757 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530758 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800759 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530760 return HCI_AT_DEDICATED_BONDING_MITM;
761 case BT_SECURITY_MEDIUM:
762 return HCI_AT_DEDICATED_BONDING;
763 default:
764 return HCI_AT_NO_BONDING;
765 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700766 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700767 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700768 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700769 if (chan->sec_level == BT_SECURITY_LOW)
770 chan->sec_level = BT_SECURITY_SDP;
771 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800772 if (chan->sec_level == BT_SECURITY_HIGH ||
773 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700774 return HCI_AT_NO_BONDING_MITM;
775 else
776 return HCI_AT_NO_BONDING;
777 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700778 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700779 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700780 if (chan->sec_level == BT_SECURITY_LOW)
781 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530782
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800783 if (chan->sec_level == BT_SECURITY_HIGH ||
784 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700785 return HCI_AT_NO_BONDING_MITM;
786 else
787 return HCI_AT_NO_BONDING;
788 }
789 /* fall through */
790 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300791 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530792 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800793 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530794 return HCI_AT_GENERAL_BONDING_MITM;
795 case BT_SECURITY_MEDIUM:
796 return HCI_AT_GENERAL_BONDING;
797 default:
798 return HCI_AT_NO_BONDING;
799 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700800 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530801 }
802}
803
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200804/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300805int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200806{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300807 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100808 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200809
Johan Hedberga17de2f2013-05-14 13:25:37 +0300810 if (conn->hcon->type == LE_LINK)
811 return smp_conn_security(conn->hcon, chan->sec_level);
812
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300813 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100814
Johan Hedberge7cafc42014-07-17 15:35:38 +0300815 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
816 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200817}
818
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200819static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200820{
821 u8 id;
822
823 /* Get next available identificator.
824 * 1 - 128 are used by kernel.
825 * 129 - 199 are reserved.
826 * 200 - 254 are used by utilities like l2ping, etc.
827 */
828
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200829 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200830
831 if (++conn->tx_ident > 128)
832 conn->tx_ident = 1;
833
834 id = conn->tx_ident;
835
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200836 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200837
838 return id;
839}
840
Gustavo Padovan2d792812012-10-06 10:07:01 +0100841static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
842 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200843{
844 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200845 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200846
847 BT_DBG("code 0x%2.2x", code);
848
849 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300850 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200851
Steven Walterf6af6752014-11-19 09:41:17 -0500852 /* Use NO_FLUSH if supported or we have an LE link (which does
853 * not support auto-flushing packets) */
854 if (lmp_no_flush_capable(conn->hcon->hdev) ||
855 conn->hcon->type == LE_LINK)
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200856 flags = ACL_START_NO_FLUSH;
857 else
858 flags = ACL_START;
859
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700860 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200861 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700862
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200863 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200864}
865
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700866static bool __chan_is_moving(struct l2cap_chan *chan)
867{
868 return chan->move_state != L2CAP_MOVE_STABLE &&
869 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
870}
871
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200872static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
873{
874 struct hci_conn *hcon = chan->conn->hcon;
875 u16 flags;
876
877 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100878 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200879
Mat Martineaud5f8a752012-10-23 15:24:18 -0700880 if (chan->hs_hcon && !__chan_is_moving(chan)) {
881 if (chan->hs_hchan)
882 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
883 else
884 kfree_skb(skb);
885
886 return;
887 }
888
Steven Walterf6af6752014-11-19 09:41:17 -0500889 /* Use NO_FLUSH for LE links (where this is the only option) or
890 * if the BR/EDR link supports it and flushing has not been
891 * explicitly requested (through FLAG_FLUSHABLE).
892 */
893 if (hcon->type == LE_LINK ||
894 (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
895 lmp_no_flush_capable(hcon->hdev)))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200896 flags = ACL_START_NO_FLUSH;
897 else
898 flags = ACL_START;
899
900 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
901 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902}
903
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700904static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
905{
906 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
907 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
908
909 if (enh & L2CAP_CTRL_FRAME_TYPE) {
910 /* S-Frame */
911 control->sframe = 1;
912 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
913 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
914
915 control->sar = 0;
916 control->txseq = 0;
917 } else {
918 /* I-Frame */
919 control->sframe = 0;
920 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
921 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
922
923 control->poll = 0;
924 control->super = 0;
925 }
926}
927
928static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
929{
930 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
931 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
932
933 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
934 /* S-Frame */
935 control->sframe = 1;
936 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
937 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
938
939 control->sar = 0;
940 control->txseq = 0;
941 } else {
942 /* I-Frame */
943 control->sframe = 0;
944 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
945 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
946
947 control->poll = 0;
948 control->super = 0;
949 }
950}
951
952static inline void __unpack_control(struct l2cap_chan *chan,
953 struct sk_buff *skb)
954{
955 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
956 __unpack_extended_control(get_unaligned_le32(skb->data),
Johan Hedberga4368ff2015-03-30 23:21:01 +0300957 &bt_cb(skb)->l2cap);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700958 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700959 } else {
960 __unpack_enhanced_control(get_unaligned_le16(skb->data),
Johan Hedberga4368ff2015-03-30 23:21:01 +0300961 &bt_cb(skb)->l2cap);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700962 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700963 }
964}
965
966static u32 __pack_extended_control(struct l2cap_ctrl *control)
967{
968 u32 packed;
969
970 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
971 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
972
973 if (control->sframe) {
974 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
975 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
976 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
977 } else {
978 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
979 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
980 }
981
982 return packed;
983}
984
985static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
986{
987 u16 packed;
988
989 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
990 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
991
992 if (control->sframe) {
993 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
994 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
995 packed |= L2CAP_CTRL_FRAME_TYPE;
996 } else {
997 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
998 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
999 }
1000
1001 return packed;
1002}
1003
1004static inline void __pack_control(struct l2cap_chan *chan,
1005 struct l2cap_ctrl *control,
1006 struct sk_buff *skb)
1007{
1008 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1009 put_unaligned_le32(__pack_extended_control(control),
1010 skb->data + L2CAP_HDR_SIZE);
1011 } else {
1012 put_unaligned_le16(__pack_enhanced_control(control),
1013 skb->data + L2CAP_HDR_SIZE);
1014 }
1015}
1016
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001017static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
1018{
1019 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1020 return L2CAP_EXT_HDR_SIZE;
1021 else
1022 return L2CAP_ENH_HDR_SIZE;
1023}
1024
Mat Martineaua67d7f62012-05-17 20:53:35 -07001025static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
1026 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001027{
1028 struct sk_buff *skb;
1029 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -03001030 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001031
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001032 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001033 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001034
Mat Martineaua67d7f62012-05-17 20:53:35 -07001035 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001036
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001037 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001038 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001039
1040 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001041 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001042 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001043
Mat Martineaua67d7f62012-05-17 20:53:35 -07001044 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1045 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1046 else
1047 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001048
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001049 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001050 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001051 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001052 }
1053
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001054 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001055 return skb;
1056}
1057
1058static void l2cap_send_sframe(struct l2cap_chan *chan,
1059 struct l2cap_ctrl *control)
1060{
1061 struct sk_buff *skb;
1062 u32 control_field;
1063
1064 BT_DBG("chan %p, control %p", chan, control);
1065
1066 if (!control->sframe)
1067 return;
1068
Mat Martineaub99e13a2012-10-23 15:24:19 -07001069 if (__chan_is_moving(chan))
1070 return;
1071
Mat Martineaua67d7f62012-05-17 20:53:35 -07001072 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1073 !control->poll)
1074 control->final = 1;
1075
1076 if (control->super == L2CAP_SUPER_RR)
1077 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1078 else if (control->super == L2CAP_SUPER_RNR)
1079 set_bit(CONN_RNR_SENT, &chan->conn_state);
1080
1081 if (control->super != L2CAP_SUPER_SREJ) {
1082 chan->last_acked_seq = control->reqseq;
1083 __clear_ack_timer(chan);
1084 }
1085
1086 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1087 control->final, control->poll, control->super);
1088
1089 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1090 control_field = __pack_extended_control(control);
1091 else
1092 control_field = __pack_enhanced_control(control);
1093
1094 skb = l2cap_create_sframe_pdu(chan, control_field);
1095 if (!IS_ERR(skb))
1096 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001097}
1098
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001099static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001100{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001101 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001102
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001103 BT_DBG("chan %p, poll %d", chan, poll);
1104
1105 memset(&control, 0, sizeof(control));
1106 control.sframe = 1;
1107 control.poll = poll;
1108
1109 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1110 control.super = L2CAP_SUPER_RNR;
1111 else
1112 control.super = L2CAP_SUPER_RR;
1113
1114 control.reqseq = chan->buffer_seq;
1115 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001116}
1117
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001118static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001119{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001120 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1121 return true;
1122
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001123 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001124}
1125
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001126static bool __amp_capable(struct l2cap_chan *chan)
1127{
1128 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001129 struct hci_dev *hdev;
1130 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001131
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02001132 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001133 return false;
1134
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02001135 if (!(conn->remote_fixed_chan & L2CAP_FC_A2MP))
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001136 return false;
1137
1138 read_lock(&hci_dev_list_lock);
1139 list_for_each_entry(hdev, &hci_dev_list, list) {
1140 if (hdev->amp_type != AMP_TYPE_BREDR &&
1141 test_bit(HCI_UP, &hdev->flags)) {
1142 amp_available = true;
1143 break;
1144 }
1145 }
1146 read_unlock(&hci_dev_list_lock);
1147
1148 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1149 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001150
1151 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001152}
1153
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001154static bool l2cap_check_efs(struct l2cap_chan *chan)
1155{
1156 /* Check EFS parameters */
1157 return true;
1158}
1159
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001160void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001161{
1162 struct l2cap_conn *conn = chan->conn;
1163 struct l2cap_conn_req req;
1164
1165 req.scid = cpu_to_le16(chan->scid);
1166 req.psm = chan->psm;
1167
1168 chan->ident = l2cap_get_ident(conn);
1169
1170 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1171
1172 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1173}
1174
Mat Martineau8eb200b2012-10-23 15:24:17 -07001175static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1176{
1177 struct l2cap_create_chan_req req;
1178 req.scid = cpu_to_le16(chan->scid);
1179 req.psm = chan->psm;
1180 req.amp_id = amp_id;
1181
1182 chan->ident = l2cap_get_ident(chan->conn);
1183
1184 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1185 sizeof(req), &req);
1186}
1187
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001188static void l2cap_move_setup(struct l2cap_chan *chan)
1189{
1190 struct sk_buff *skb;
1191
1192 BT_DBG("chan %p", chan);
1193
1194 if (chan->mode != L2CAP_MODE_ERTM)
1195 return;
1196
1197 __clear_retrans_timer(chan);
1198 __clear_monitor_timer(chan);
1199 __clear_ack_timer(chan);
1200
1201 chan->retry_count = 0;
1202 skb_queue_walk(&chan->tx_q, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +03001203 if (bt_cb(skb)->l2cap.retries)
1204 bt_cb(skb)->l2cap.retries = 1;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001205 else
1206 break;
1207 }
1208
1209 chan->expected_tx_seq = chan->buffer_seq;
1210
1211 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1212 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1213 l2cap_seq_list_clear(&chan->retrans_list);
1214 l2cap_seq_list_clear(&chan->srej_list);
1215 skb_queue_purge(&chan->srej_q);
1216
1217 chan->tx_state = L2CAP_TX_STATE_XMIT;
1218 chan->rx_state = L2CAP_RX_STATE_MOVE;
1219
1220 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1221}
1222
Mat Martineau5f3847a2012-10-23 15:24:12 -07001223static void l2cap_move_done(struct l2cap_chan *chan)
1224{
1225 u8 move_role = chan->move_role;
1226 BT_DBG("chan %p", chan);
1227
1228 chan->move_state = L2CAP_MOVE_STABLE;
1229 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1230
1231 if (chan->mode != L2CAP_MODE_ERTM)
1232 return;
1233
1234 switch (move_role) {
1235 case L2CAP_MOVE_ROLE_INITIATOR:
1236 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1237 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1238 break;
1239 case L2CAP_MOVE_ROLE_RESPONDER:
1240 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1241 break;
1242 }
1243}
1244
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001245static void l2cap_chan_ready(struct l2cap_chan *chan)
1246{
Johan Hedberg315917e2015-02-16 11:42:11 +02001247 /* The channel may have already been flagged as connected in
1248 * case of receiving data before the L2CAP info req/rsp
1249 * procedure is complete.
1250 */
1251 if (chan->state == BT_CONNECTED)
1252 return;
1253
Mat Martineau28270112012-05-17 21:14:09 -07001254 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001255 chan->conf_state = 0;
1256 __clear_chan_timer(chan);
1257
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001258 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1259 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001260
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001261 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001262
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001263 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001264}
1265
Johan Hedbergf1496de2013-05-13 14:15:56 +03001266static void l2cap_le_connect(struct l2cap_chan *chan)
1267{
1268 struct l2cap_conn *conn = chan->conn;
1269 struct l2cap_le_conn_req req;
1270
Johan Hedberg595177f2013-12-02 22:12:22 +02001271 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1272 return;
1273
Johan Hedbergf1496de2013-05-13 14:15:56 +03001274 req.psm = chan->psm;
1275 req.scid = cpu_to_le16(chan->scid);
1276 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001277 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001278 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001279
1280 chan->ident = l2cap_get_ident(conn);
1281
1282 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1283 sizeof(req), &req);
1284}
1285
1286static void l2cap_le_start(struct l2cap_chan *chan)
1287{
1288 struct l2cap_conn *conn = chan->conn;
1289
1290 if (!smp_conn_security(conn->hcon, chan->sec_level))
1291 return;
1292
1293 if (!chan->psm) {
1294 l2cap_chan_ready(chan);
1295 return;
1296 }
1297
1298 if (chan->state == BT_CONNECT)
1299 l2cap_le_connect(chan);
1300}
1301
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001302static void l2cap_start_connection(struct l2cap_chan *chan)
1303{
1304 if (__amp_capable(chan)) {
1305 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1306 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001307 } else if (chan->conn->hcon->type == LE_LINK) {
1308 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001309 } else {
1310 l2cap_send_conn_req(chan);
1311 }
1312}
1313
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001314static void l2cap_request_info(struct l2cap_conn *conn)
1315{
1316 struct l2cap_info_req req;
1317
1318 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1319 return;
1320
1321 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
1322
1323 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1324 conn->info_ident = l2cap_get_ident(conn);
1325
1326 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
1327
1328 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1329 sizeof(req), &req);
1330}
1331
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001332static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001333{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001334 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001335
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001336 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001337 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001338 return;
1339 }
1340
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001341 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)) {
1342 l2cap_request_info(conn);
1343 return;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001344 }
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001345
1346 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1347 return;
1348
1349 if (l2cap_chan_check_security(chan, true) &&
1350 __l2cap_no_conn_pending(chan))
1351 l2cap_start_connection(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001352}
1353
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001354static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1355{
1356 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001357 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001358 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1359
1360 switch (mode) {
1361 case L2CAP_MODE_ERTM:
1362 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1363 case L2CAP_MODE_STREAMING:
1364 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1365 default:
1366 return 0x00;
1367 }
1368}
1369
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001370static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001371{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001372 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001373 struct l2cap_disconn_req req;
1374
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001375 if (!conn)
1376 return;
1377
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001378 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001379 __clear_retrans_timer(chan);
1380 __clear_monitor_timer(chan);
1381 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001382 }
1383
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001384 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001385 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001386 return;
1387 }
1388
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001389 req.dcid = cpu_to_le16(chan->dcid);
1390 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001391 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1392 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001393
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001394 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001395}
1396
Linus Torvalds1da177e2005-04-16 15:20:36 -07001397/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001398static void l2cap_conn_start(struct l2cap_conn *conn)
1399{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001400 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001401
1402 BT_DBG("conn %p", conn);
1403
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001404 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001405
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001406 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001407 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001408
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001409 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001410 l2cap_chan_ready(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001411 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001412 continue;
1413 }
1414
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001415 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001416 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001417 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001418 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001419 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001420 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001421
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001422 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001423 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001424 &chan->conf_state)) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001425 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001426 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001427 continue;
1428 }
1429
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001430 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001431
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001432 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001433 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001434 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001435 rsp.scid = cpu_to_le16(chan->dcid);
1436 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001437
Johan Hedberge7cafc42014-07-17 15:35:38 +03001438 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001439 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001440 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1441 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001442 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001443
1444 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001445 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001446 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1447 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001448 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001449 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001450 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1451 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001452 }
1453
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001454 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001455 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001456
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001457 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001458 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001459 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001460 continue;
1461 }
1462
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001463 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001464 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001465 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001466 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001467 }
1468
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001469 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001470 }
1471
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001472 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001473}
1474
Ville Tervob62f3282011-02-10 22:38:50 -03001475static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1476{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001477 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001478 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001479
Johan Hedberge760ec12014-08-07 22:56:47 +03001480 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001481
Johan Hedberge760ec12014-08-07 22:56:47 +03001482 /* For outgoing pairing which doesn't necessarily have an
1483 * associated socket (e.g. mgmt_pair_device).
1484 */
1485 if (hcon->out)
1486 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001487
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001488 /* For LE slave connections, make sure the connection interval
1489 * is in the range of the minium and maximum interval that has
1490 * been configured for this connection. If not, then trigger
1491 * the connection update procedure.
1492 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001493 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001494 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1495 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1496 struct l2cap_conn_param_update_req req;
1497
1498 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1499 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1500 req.latency = cpu_to_le16(hcon->le_conn_latency);
1501 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1502
1503 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1504 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1505 }
Ville Tervob62f3282011-02-10 22:38:50 -03001506}
1507
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001508static void l2cap_conn_ready(struct l2cap_conn *conn)
1509{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001510 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001511 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001512
1513 BT_DBG("conn %p", conn);
1514
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001515 if (hcon->type == ACL_LINK)
1516 l2cap_request_info(conn);
1517
Johan Hedberge760ec12014-08-07 22:56:47 +03001518 mutex_lock(&conn->chan_lock);
1519
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001520 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001521
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001522 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001523
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001524 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001525 l2cap_chan_unlock(chan);
1526 continue;
1527 }
1528
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001529 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001530 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001531 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Johan Hedbergaeaeb4b2014-09-10 17:37:46 -07001532 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
1533 l2cap_chan_ready(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001534 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001535 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001536 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001537
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001538 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001539 }
1540
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001541 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001542
Johan Hedberg79a05722014-08-08 09:28:04 +03001543 if (hcon->type == LE_LINK)
1544 l2cap_le_conn_ready(conn);
1545
Johan Hedberg61a939c2014-01-17 20:45:11 +02001546 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001547}
1548
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001549/* Notify sockets that we cannot guaranty reliability anymore */
1550static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1551{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001552 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001553
1554 BT_DBG("conn %p", conn);
1555
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001556 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001557
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001558 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001559 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001560 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001561 }
1562
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001563 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001564}
1565
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001566static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001567{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001568 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001569 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001570
Marcel Holtmann984947d2009-02-06 23:35:19 +01001571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001572 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001573
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001574 l2cap_conn_start(conn);
1575}
1576
David Herrmann2c8e1412013-04-06 20:28:45 +02001577/*
1578 * l2cap_user
1579 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1580 * callback is called during registration. The ->remove callback is called
1581 * during unregistration.
1582 * An l2cap_user object can either be explicitly unregistered or when the
1583 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1584 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1585 * External modules must own a reference to the l2cap_conn object if they intend
1586 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1587 * any time if they don't.
1588 */
1589
1590int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1591{
1592 struct hci_dev *hdev = conn->hcon->hdev;
1593 int ret;
1594
1595 /* We need to check whether l2cap_conn is registered. If it is not, we
1596 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1597 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1598 * relies on the parent hci_conn object to be locked. This itself relies
1599 * on the hci_dev object to be locked. So we must lock the hci device
1600 * here, too. */
1601
1602 hci_dev_lock(hdev);
1603
Alexey Dobriyan835a6a22015-06-10 20:28:33 +03001604 if (!list_empty(&user->list)) {
David Herrmann2c8e1412013-04-06 20:28:45 +02001605 ret = -EINVAL;
1606 goto out_unlock;
1607 }
1608
1609 /* conn->hchan is NULL after l2cap_conn_del() was called */
1610 if (!conn->hchan) {
1611 ret = -ENODEV;
1612 goto out_unlock;
1613 }
1614
1615 ret = user->probe(conn, user);
1616 if (ret)
1617 goto out_unlock;
1618
1619 list_add(&user->list, &conn->users);
1620 ret = 0;
1621
1622out_unlock:
1623 hci_dev_unlock(hdev);
1624 return ret;
1625}
1626EXPORT_SYMBOL(l2cap_register_user);
1627
1628void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1629{
1630 struct hci_dev *hdev = conn->hcon->hdev;
1631
1632 hci_dev_lock(hdev);
1633
Alexey Dobriyan835a6a22015-06-10 20:28:33 +03001634 if (list_empty(&user->list))
David Herrmann2c8e1412013-04-06 20:28:45 +02001635 goto out_unlock;
1636
Tedd Ho-Jeong Anab944c82015-06-30 11:43:40 -07001637 list_del_init(&user->list);
David Herrmann2c8e1412013-04-06 20:28:45 +02001638 user->remove(conn, user);
1639
1640out_unlock:
1641 hci_dev_unlock(hdev);
1642}
1643EXPORT_SYMBOL(l2cap_unregister_user);
1644
1645static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1646{
1647 struct l2cap_user *user;
1648
1649 while (!list_empty(&conn->users)) {
1650 user = list_first_entry(&conn->users, struct l2cap_user, list);
Tedd Ho-Jeong Anab944c82015-06-30 11:43:40 -07001651 list_del_init(&user->list);
David Herrmann2c8e1412013-04-06 20:28:45 +02001652 user->remove(conn, user);
1653 }
1654}
1655
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001656static void l2cap_conn_del(struct hci_conn *hcon, int err)
1657{
1658 struct l2cap_conn *conn = hcon->l2cap_data;
1659 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001660
1661 if (!conn)
1662 return;
1663
1664 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1665
1666 kfree_skb(conn->rx_skb);
1667
Johan Hedberg61a939c2014-01-17 20:45:11 +02001668 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001669
1670 /* We can not call flush_work(&conn->pending_rx_work) here since we
1671 * might block if we are running on a worker from the same workqueue
1672 * pending_rx_work is waiting on.
1673 */
1674 if (work_pending(&conn->pending_rx_work))
1675 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001676
Johan Hedbergf3d82d02014-09-05 22:19:50 +03001677 if (work_pending(&conn->id_addr_update_work))
1678 cancel_work_sync(&conn->id_addr_update_work);
1679
David Herrmann2c8e1412013-04-06 20:28:45 +02001680 l2cap_unregister_all_users(conn);
1681
Johan Hedberge31fb862014-08-18 20:33:28 +03001682 /* Force the connection to be immediately dropped */
1683 hcon->disc_timeout = 0;
1684
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001685 mutex_lock(&conn->chan_lock);
1686
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001687 /* Kill channels */
1688 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001689 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001690 l2cap_chan_lock(chan);
1691
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001692 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001693
1694 l2cap_chan_unlock(chan);
1695
Gustavo Padovan80b98022012-05-27 22:27:51 -03001696 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001697 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001698 }
1699
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001700 mutex_unlock(&conn->chan_lock);
1701
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001702 hci_chan_del(conn->hchan);
1703
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001704 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001705 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001706
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001707 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001708 conn->hchan = NULL;
1709 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001710}
1711
David Herrmann9c903e32013-04-06 20:28:44 +02001712static void l2cap_conn_free(struct kref *ref)
1713{
1714 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1715
1716 hci_conn_put(conn->hcon);
1717 kfree(conn);
1718}
1719
Johan Hedberg51bb84572014-08-15 21:06:57 +03001720struct l2cap_conn *l2cap_conn_get(struct l2cap_conn *conn)
David Herrmann9c903e32013-04-06 20:28:44 +02001721{
1722 kref_get(&conn->ref);
Johan Hedberg51bb84572014-08-15 21:06:57 +03001723 return conn;
David Herrmann9c903e32013-04-06 20:28:44 +02001724}
1725EXPORT_SYMBOL(l2cap_conn_get);
1726
1727void l2cap_conn_put(struct l2cap_conn *conn)
1728{
1729 kref_put(&conn->ref, l2cap_conn_free);
1730}
1731EXPORT_SYMBOL(l2cap_conn_put);
1732
Linus Torvalds1da177e2005-04-16 15:20:36 -07001733/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001734
Ido Yarivc2287682012-04-20 15:46:07 -03001735/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001736 * Returns closest match.
1737 */
Ido Yarivc2287682012-04-20 15:46:07 -03001738static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1739 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001740 bdaddr_t *dst,
1741 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001742{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001743 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001744
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001745 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001746
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001747 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001748 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 continue;
1750
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001751 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1752 continue;
1753
1754 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1755 continue;
1756
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001757 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001758 int src_match, dst_match;
1759 int src_any, dst_any;
1760
Linus Torvalds1da177e2005-04-16 15:20:36 -07001761 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001762 src_match = !bacmp(&c->src, src);
1763 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001764 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001765 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001766 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001767 return c;
1768 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001769
1770 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001771 src_any = !bacmp(&c->src, BDADDR_ANY);
1772 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001773 if ((src_match && dst_any) || (src_any && dst_match) ||
1774 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001775 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 }
1777 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001778
Johan Hedberga24cce12014-08-07 22:56:42 +03001779 if (c1)
1780 l2cap_chan_hold(c1);
1781
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001782 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001783
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001784 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001785}
1786
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001787static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001788{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001789 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001790 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001791
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001792 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001793
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001794 l2cap_chan_lock(chan);
1795
Mat Martineau80909e02012-05-17 20:53:50 -07001796 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001797 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001798 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001799 return;
1800 }
1801
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001802 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001803
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001804 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001805 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001806}
1807
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001808static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001809{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001810 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001811 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001812
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001813 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001814
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001815 l2cap_chan_lock(chan);
1816
Mat Martineau80909e02012-05-17 20:53:50 -07001817 if (!chan->conn) {
1818 l2cap_chan_unlock(chan);
1819 l2cap_chan_put(chan);
1820 return;
1821 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001822
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001823 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001824 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001825 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001826}
1827
Gustavo Padovand6603662012-05-21 13:58:22 -03001828static void l2cap_streaming_send(struct l2cap_chan *chan,
1829 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001830{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001831 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001832 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001833
Mat Martineau37339372012-05-17 20:53:33 -07001834 BT_DBG("chan %p, skbs %p", chan, skbs);
1835
Mat Martineaub99e13a2012-10-23 15:24:19 -07001836 if (__chan_is_moving(chan))
1837 return;
1838
Mat Martineau37339372012-05-17 20:53:33 -07001839 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1840
1841 while (!skb_queue_empty(&chan->tx_q)) {
1842
1843 skb = skb_dequeue(&chan->tx_q);
1844
Johan Hedberga4368ff2015-03-30 23:21:01 +03001845 bt_cb(skb)->l2cap.retries = 1;
1846 control = &bt_cb(skb)->l2cap;
Mat Martineau37339372012-05-17 20:53:33 -07001847
1848 control->reqseq = 0;
1849 control->txseq = chan->next_tx_seq;
1850
1851 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001852
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001853 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001854 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1855 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001856 }
1857
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001858 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001859
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001860 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001861
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001862 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001863 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001864 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001865}
1866
Szymon Janc67c9e842011-07-28 16:24:33 +02001867static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001868{
1869 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001870 struct l2cap_ctrl *control;
1871 int sent = 0;
1872
1873 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001874
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001875 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001876 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001877
Mat Martineau94122bb2012-05-02 09:42:02 -07001878 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1879 return 0;
1880
Mat Martineaub99e13a2012-10-23 15:24:19 -07001881 if (__chan_is_moving(chan))
1882 return 0;
1883
Mat Martineau18a48e72012-05-17 20:53:34 -07001884 while (chan->tx_send_head &&
1885 chan->unacked_frames < chan->remote_tx_win &&
1886 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001887
Mat Martineau18a48e72012-05-17 20:53:34 -07001888 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001889
Johan Hedberga4368ff2015-03-30 23:21:01 +03001890 bt_cb(skb)->l2cap.retries = 1;
1891 control = &bt_cb(skb)->l2cap;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001892
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001893 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001894 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001895
Mat Martineau18a48e72012-05-17 20:53:34 -07001896 control->reqseq = chan->buffer_seq;
1897 chan->last_acked_seq = chan->buffer_seq;
1898 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001899
Mat Martineau18a48e72012-05-17 20:53:34 -07001900 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001901
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001902 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001903 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1904 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001905 }
1906
Mat Martineau18a48e72012-05-17 20:53:34 -07001907 /* Clone after data has been modified. Data is assumed to be
1908 read-only (for locking purposes) on cloned sk_buffs.
1909 */
1910 tx_skb = skb_clone(skb, GFP_KERNEL);
1911
1912 if (!tx_skb)
1913 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001914
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001915 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001916
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001917 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001918 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001919 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001920 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001921
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001922 if (skb_queue_is_last(&chan->tx_q, skb))
1923 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001924 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001925 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001926
1927 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001928 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001929 }
1930
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001931 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1932 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001933
1934 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001935}
1936
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001937static void l2cap_ertm_resend(struct l2cap_chan *chan)
1938{
1939 struct l2cap_ctrl control;
1940 struct sk_buff *skb;
1941 struct sk_buff *tx_skb;
1942 u16 seq;
1943
1944 BT_DBG("chan %p", chan);
1945
1946 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1947 return;
1948
Mat Martineaub99e13a2012-10-23 15:24:19 -07001949 if (__chan_is_moving(chan))
1950 return;
1951
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001952 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1953 seq = l2cap_seq_list_pop(&chan->retrans_list);
1954
1955 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1956 if (!skb) {
1957 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001958 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001959 continue;
1960 }
1961
Johan Hedberga4368ff2015-03-30 23:21:01 +03001962 bt_cb(skb)->l2cap.retries++;
1963 control = bt_cb(skb)->l2cap;
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001964
1965 if (chan->max_tx != 0 &&
Johan Hedberga4368ff2015-03-30 23:21:01 +03001966 bt_cb(skb)->l2cap.retries > chan->max_tx) {
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001967 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001968 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001969 l2cap_seq_list_clear(&chan->retrans_list);
1970 break;
1971 }
1972
1973 control.reqseq = chan->buffer_seq;
1974 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1975 control.final = 1;
1976 else
1977 control.final = 0;
1978
1979 if (skb_cloned(skb)) {
1980 /* Cloned sk_buffs are read-only, so we need a
1981 * writeable copy
1982 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001983 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001984 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001985 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001986 }
1987
1988 if (!tx_skb) {
1989 l2cap_seq_list_clear(&chan->retrans_list);
1990 break;
1991 }
1992
1993 /* Update skb contents */
1994 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1995 put_unaligned_le32(__pack_extended_control(&control),
1996 tx_skb->data + L2CAP_HDR_SIZE);
1997 } else {
1998 put_unaligned_le16(__pack_enhanced_control(&control),
1999 tx_skb->data + L2CAP_HDR_SIZE);
2000 }
2001
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02002002 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002003 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02002004 u16 fcs = crc16(0, (u8 *) tx_skb->data,
2005 tx_skb->len - L2CAP_FCS_SIZE);
2006 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
2007 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002008 }
2009
2010 l2cap_do_send(chan, tx_skb);
2011
2012 BT_DBG("Resent txseq %d", control.txseq);
2013
2014 chan->last_acked_seq = chan->buffer_seq;
2015 }
2016}
2017
Mat Martineauf80842a2012-05-17 20:53:46 -07002018static void l2cap_retransmit(struct l2cap_chan *chan,
2019 struct l2cap_ctrl *control)
2020{
2021 BT_DBG("chan %p, control %p", chan, control);
2022
2023 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
2024 l2cap_ertm_resend(chan);
2025}
2026
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002027static void l2cap_retransmit_all(struct l2cap_chan *chan,
2028 struct l2cap_ctrl *control)
2029{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002030 struct sk_buff *skb;
2031
2032 BT_DBG("chan %p, control %p", chan, control);
2033
2034 if (control->poll)
2035 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2036
2037 l2cap_seq_list_clear(&chan->retrans_list);
2038
2039 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2040 return;
2041
2042 if (chan->unacked_frames) {
2043 skb_queue_walk(&chan->tx_q, skb) {
Johan Hedberga4368ff2015-03-30 23:21:01 +03002044 if (bt_cb(skb)->l2cap.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002045 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002046 break;
2047 }
2048
2049 skb_queue_walk_from(&chan->tx_q, skb) {
2050 if (skb == chan->tx_send_head)
2051 break;
2052
2053 l2cap_seq_list_append(&chan->retrans_list,
Johan Hedberga4368ff2015-03-30 23:21:01 +03002054 bt_cb(skb)->l2cap.txseq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002055 }
2056
2057 l2cap_ertm_resend(chan);
2058 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002059}
2060
Szymon Jancb17e73b2012-01-11 10:59:47 +01002061static void l2cap_send_ack(struct l2cap_chan *chan)
2062{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002063 struct l2cap_ctrl control;
2064 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2065 chan->last_acked_seq);
2066 int threshold;
2067
2068 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2069 chan, chan->last_acked_seq, chan->buffer_seq);
2070
2071 memset(&control, 0, sizeof(control));
2072 control.sframe = 1;
2073
2074 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2075 chan->rx_state == L2CAP_RX_STATE_RECV) {
2076 __clear_ack_timer(chan);
2077 control.super = L2CAP_SUPER_RNR;
2078 control.reqseq = chan->buffer_seq;
2079 l2cap_send_sframe(chan, &control);
2080 } else {
2081 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2082 l2cap_ertm_send(chan);
2083 /* If any i-frames were sent, they included an ack */
2084 if (chan->buffer_seq == chan->last_acked_seq)
2085 frames_to_ack = 0;
2086 }
2087
Mat Martineauc20f8e32012-07-10 05:47:07 -07002088 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002089 * Calculate without mul or div
2090 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002091 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002092 threshold += threshold << 1;
2093 threshold >>= 2;
2094
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002095 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002096 threshold);
2097
2098 if (frames_to_ack >= threshold) {
2099 __clear_ack_timer(chan);
2100 control.super = L2CAP_SUPER_RR;
2101 control.reqseq = chan->buffer_seq;
2102 l2cap_send_sframe(chan, &control);
2103 frames_to_ack = 0;
2104 }
2105
2106 if (frames_to_ack)
2107 __set_ack_timer(chan);
2108 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002109}
2110
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002111static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2112 struct msghdr *msg, int len,
2113 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002115 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002116 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002117 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
Al Viro17836392014-11-24 17:07:38 -05002119 if (copy_from_iter(skb_put(skb, count), count, &msg->msg_iter) != count)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002120 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 sent += count;
2123 len -= count;
2124
2125 /* Continuation fragments (no L2CAP header) */
2126 frag = &skb_shinfo(skb)->frag_list;
2127 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002128 struct sk_buff *tmp;
2129
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130 count = min_t(unsigned int, conn->mtu, len);
2131
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002132 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002133 msg->msg_flags & MSG_DONTWAIT);
2134 if (IS_ERR(tmp))
2135 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002136
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002137 *frag = tmp;
2138
Al Viro17836392014-11-24 17:07:38 -05002139 if (copy_from_iter(skb_put(*frag, count), count,
2140 &msg->msg_iter) != count)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002141 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002142
2143 sent += count;
2144 len -= count;
2145
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002146 skb->len += (*frag)->len;
2147 skb->data_len += (*frag)->len;
2148
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 frag = &(*frag)->next;
2150 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002151
2152 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002153}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002154
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002155static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002156 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002157{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002158 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002159 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002160 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002161 struct l2cap_hdr *lh;
2162
Marcel Holtmann8d463212014-06-05 15:22:51 +02002163 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2164 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002165
2166 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002167
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002168 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002169 msg->msg_flags & MSG_DONTWAIT);
2170 if (IS_ERR(skb))
2171 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002172
2173 /* Create L2CAP header */
2174 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002175 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002176 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002177 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002178
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2183 }
2184 return skb;
2185}
2186
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002187static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002188 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002189{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002190 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002191 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002192 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002193 struct l2cap_hdr *lh;
2194
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002195 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002196
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002197 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002198
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002199 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002200 msg->msg_flags & MSG_DONTWAIT);
2201 if (IS_ERR(skb))
2202 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002203
2204 /* Create L2CAP header */
2205 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002206 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002207 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002208
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002209 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002210 if (unlikely(err < 0)) {
2211 kfree_skb(skb);
2212 return ERR_PTR(err);
2213 }
2214 return skb;
2215}
2216
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002217static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002218 struct msghdr *msg, size_t len,
2219 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002220{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002221 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002222 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002223 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002224 struct l2cap_hdr *lh;
2225
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002226 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002227
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002228 if (!conn)
2229 return ERR_PTR(-ENOTCONN);
2230
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002231 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002232
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002233 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002234 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002235
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002236 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002237 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002238
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002239 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002240
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002241 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002242 msg->msg_flags & MSG_DONTWAIT);
2243 if (IS_ERR(skb))
2244 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002245
2246 /* Create L2CAP header */
2247 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002248 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002249 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002250
Mat Martineau18a48e72012-05-17 20:53:34 -07002251 /* Control header is populated later */
2252 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2253 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2254 else
2255 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002256
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002257 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002258 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002259
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002260 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002261 if (unlikely(err < 0)) {
2262 kfree_skb(skb);
2263 return ERR_PTR(err);
2264 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002265
Johan Hedberga4368ff2015-03-30 23:21:01 +03002266 bt_cb(skb)->l2cap.fcs = chan->fcs;
2267 bt_cb(skb)->l2cap.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002268 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002269}
2270
Mat Martineau94122bb2012-05-02 09:42:02 -07002271static int l2cap_segment_sdu(struct l2cap_chan *chan,
2272 struct sk_buff_head *seg_queue,
2273 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002274{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002275 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002276 u16 sdu_len;
2277 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002278 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002279
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002280 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002281
Mat Martineau94122bb2012-05-02 09:42:02 -07002282 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2283 * so fragmented skbs are not used. The HCI layer's handling
2284 * of fragmented skbs is not compatible with ERTM's queueing.
2285 */
2286
2287 /* PDU size is derived from the HCI MTU */
2288 pdu_len = chan->conn->mtu;
2289
Mat Martineaua5495742012-10-23 15:24:21 -07002290 /* Constrain PDU size for BR/EDR connections */
2291 if (!chan->hs_hcon)
2292 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002293
2294 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002295 if (chan->fcs)
2296 pdu_len -= L2CAP_FCS_SIZE;
2297
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002298 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002299
2300 /* Remote device may have requested smaller PDUs */
2301 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2302
2303 if (len <= pdu_len) {
2304 sar = L2CAP_SAR_UNSEGMENTED;
2305 sdu_len = 0;
2306 pdu_len = len;
2307 } else {
2308 sar = L2CAP_SAR_START;
2309 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002310 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002311
2312 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002313 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002314
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002315 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002316 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002317 return PTR_ERR(skb);
2318 }
2319
Johan Hedberga4368ff2015-03-30 23:21:01 +03002320 bt_cb(skb)->l2cap.sar = sar;
Mat Martineau94122bb2012-05-02 09:42:02 -07002321 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002322
Mat Martineau94122bb2012-05-02 09:42:02 -07002323 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002324 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002325 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002326
2327 if (len <= pdu_len) {
2328 sar = L2CAP_SAR_END;
2329 pdu_len = len;
2330 } else {
2331 sar = L2CAP_SAR_CONTINUE;
2332 }
2333 }
2334
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002335 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002336}
2337
Johan Hedberg177f8f22013-05-31 17:54:51 +03002338static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2339 struct msghdr *msg,
2340 size_t len, u16 sdulen)
2341{
2342 struct l2cap_conn *conn = chan->conn;
2343 struct sk_buff *skb;
2344 int err, count, hlen;
2345 struct l2cap_hdr *lh;
2346
2347 BT_DBG("chan %p len %zu", chan, len);
2348
2349 if (!conn)
2350 return ERR_PTR(-ENOTCONN);
2351
2352 hlen = L2CAP_HDR_SIZE;
2353
2354 if (sdulen)
2355 hlen += L2CAP_SDULEN_SIZE;
2356
2357 count = min_t(unsigned int, (conn->mtu - hlen), len);
2358
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002359 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002360 msg->msg_flags & MSG_DONTWAIT);
2361 if (IS_ERR(skb))
2362 return skb;
2363
2364 /* Create L2CAP header */
2365 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2366 lh->cid = cpu_to_le16(chan->dcid);
2367 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2368
2369 if (sdulen)
2370 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2371
2372 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2373 if (unlikely(err < 0)) {
2374 kfree_skb(skb);
2375 return ERR_PTR(err);
2376 }
2377
2378 return skb;
2379}
2380
2381static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2382 struct sk_buff_head *seg_queue,
2383 struct msghdr *msg, size_t len)
2384{
2385 struct sk_buff *skb;
2386 size_t pdu_len;
2387 u16 sdu_len;
2388
2389 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2390
Johan Hedberg177f8f22013-05-31 17:54:51 +03002391 sdu_len = len;
Johan Hedberg72c6fb92014-08-15 21:06:51 +03002392 pdu_len = chan->remote_mps - L2CAP_SDULEN_SIZE;
Johan Hedberg177f8f22013-05-31 17:54:51 +03002393
2394 while (len > 0) {
2395 if (len <= pdu_len)
2396 pdu_len = len;
2397
2398 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2399 if (IS_ERR(skb)) {
2400 __skb_queue_purge(seg_queue);
2401 return PTR_ERR(skb);
2402 }
2403
2404 __skb_queue_tail(seg_queue, skb);
2405
2406 len -= pdu_len;
2407
2408 if (sdu_len) {
2409 sdu_len = 0;
2410 pdu_len += L2CAP_SDULEN_SIZE;
2411 }
2412 }
2413
2414 return 0;
2415}
2416
Marcel Holtmann8d463212014-06-05 15:22:51 +02002417int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002418{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002419 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002420 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002421 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002422
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002423 if (!chan->conn)
2424 return -ENOTCONN;
2425
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002426 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002427 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002428 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002429 if (IS_ERR(skb))
2430 return PTR_ERR(skb);
2431
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002432 /* Channel lock is released before requesting new skb and then
2433 * reacquired thus we need to recheck channel state.
2434 */
2435 if (chan->state != BT_CONNECTED) {
2436 kfree_skb(skb);
2437 return -ENOTCONN;
2438 }
2439
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002440 l2cap_do_send(chan, skb);
2441 return len;
2442 }
2443
2444 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002445 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002446 /* Check outgoing MTU */
2447 if (len > chan->omtu)
2448 return -EMSGSIZE;
2449
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002450 if (!chan->tx_credits)
2451 return -EAGAIN;
2452
Johan Hedberg177f8f22013-05-31 17:54:51 +03002453 __skb_queue_head_init(&seg_queue);
2454
2455 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2456
2457 if (chan->state != BT_CONNECTED) {
2458 __skb_queue_purge(&seg_queue);
2459 err = -ENOTCONN;
2460 }
2461
2462 if (err)
2463 return err;
2464
2465 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2466
2467 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2468 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2469 chan->tx_credits--;
2470 }
2471
2472 if (!chan->tx_credits)
2473 chan->ops->suspend(chan);
2474
2475 err = len;
2476
2477 break;
2478
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002479 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002480 /* Check outgoing MTU */
2481 if (len > chan->omtu)
2482 return -EMSGSIZE;
2483
2484 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002485 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002486 if (IS_ERR(skb))
2487 return PTR_ERR(skb);
2488
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002489 /* Channel lock is released before requesting new skb and then
2490 * reacquired thus we need to recheck channel state.
2491 */
2492 if (chan->state != BT_CONNECTED) {
2493 kfree_skb(skb);
2494 return -ENOTCONN;
2495 }
2496
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002497 l2cap_do_send(chan, skb);
2498 err = len;
2499 break;
2500
2501 case L2CAP_MODE_ERTM:
2502 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002503 /* Check outgoing MTU */
2504 if (len > chan->omtu) {
2505 err = -EMSGSIZE;
2506 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002507 }
2508
Mat Martineau94122bb2012-05-02 09:42:02 -07002509 __skb_queue_head_init(&seg_queue);
2510
2511 /* Do segmentation before calling in to the state machine,
2512 * since it's possible to block while waiting for memory
2513 * allocation.
2514 */
2515 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2516
2517 /* The channel could have been closed while segmenting,
2518 * check that it is still connected.
2519 */
2520 if (chan->state != BT_CONNECTED) {
2521 __skb_queue_purge(&seg_queue);
2522 err = -ENOTCONN;
2523 }
2524
2525 if (err)
2526 break;
2527
Mat Martineau37339372012-05-17 20:53:33 -07002528 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002529 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002530 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002531 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002532
Gustavo Padovand6603662012-05-21 13:58:22 -03002533 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002534
Mat Martineau94122bb2012-05-02 09:42:02 -07002535 /* If the skbs were not queued for sending, they'll still be in
2536 * seg_queue and need to be purged.
2537 */
2538 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002539 break;
2540
2541 default:
2542 BT_DBG("bad state %1.1x", chan->mode);
2543 err = -EBADFD;
2544 }
2545
2546 return err;
2547}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002548EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002549
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002550static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2551{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002552 struct l2cap_ctrl control;
2553 u16 seq;
2554
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002555 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002556
2557 memset(&control, 0, sizeof(control));
2558 control.sframe = 1;
2559 control.super = L2CAP_SUPER_SREJ;
2560
2561 for (seq = chan->expected_tx_seq; seq != txseq;
2562 seq = __next_seq(chan, seq)) {
2563 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2564 control.reqseq = seq;
2565 l2cap_send_sframe(chan, &control);
2566 l2cap_seq_list_append(&chan->srej_list, seq);
2567 }
2568 }
2569
2570 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002571}
2572
2573static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2574{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002575 struct l2cap_ctrl control;
2576
2577 BT_DBG("chan %p", chan);
2578
2579 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2580 return;
2581
2582 memset(&control, 0, sizeof(control));
2583 control.sframe = 1;
2584 control.super = L2CAP_SUPER_SREJ;
2585 control.reqseq = chan->srej_list.tail;
2586 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002587}
2588
2589static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2590{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002591 struct l2cap_ctrl control;
2592 u16 initial_head;
2593 u16 seq;
2594
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002595 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002596
2597 memset(&control, 0, sizeof(control));
2598 control.sframe = 1;
2599 control.super = L2CAP_SUPER_SREJ;
2600
2601 /* Capture initial list head to allow only one pass through the list. */
2602 initial_head = chan->srej_list.head;
2603
2604 do {
2605 seq = l2cap_seq_list_pop(&chan->srej_list);
2606 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2607 break;
2608
2609 control.reqseq = seq;
2610 l2cap_send_sframe(chan, &control);
2611 l2cap_seq_list_append(&chan->srej_list, seq);
2612 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002613}
2614
Mat Martineau608bcc62012-05-17 20:53:32 -07002615static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2616{
2617 struct sk_buff *acked_skb;
2618 u16 ackseq;
2619
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002620 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002621
2622 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2623 return;
2624
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002625 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002626 chan->expected_ack_seq, chan->unacked_frames);
2627
2628 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2629 ackseq = __next_seq(chan, ackseq)) {
2630
2631 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2632 if (acked_skb) {
2633 skb_unlink(acked_skb, &chan->tx_q);
2634 kfree_skb(acked_skb);
2635 chan->unacked_frames--;
2636 }
2637 }
2638
2639 chan->expected_ack_seq = reqseq;
2640
2641 if (chan->unacked_frames == 0)
2642 __clear_retrans_timer(chan);
2643
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002644 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002645}
2646
2647static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2648{
2649 BT_DBG("chan %p", chan);
2650
2651 chan->expected_tx_seq = chan->buffer_seq;
2652 l2cap_seq_list_clear(&chan->srej_list);
2653 skb_queue_purge(&chan->srej_q);
2654 chan->rx_state = L2CAP_RX_STATE_RECV;
2655}
2656
Gustavo Padovand6603662012-05-21 13:58:22 -03002657static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2658 struct l2cap_ctrl *control,
2659 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002660{
Mat Martineau608bcc62012-05-17 20:53:32 -07002661 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2662 event);
2663
2664 switch (event) {
2665 case L2CAP_EV_DATA_REQUEST:
2666 if (chan->tx_send_head == NULL)
2667 chan->tx_send_head = skb_peek(skbs);
2668
2669 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2670 l2cap_ertm_send(chan);
2671 break;
2672 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2673 BT_DBG("Enter LOCAL_BUSY");
2674 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2675
2676 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2677 /* The SREJ_SENT state must be aborted if we are to
2678 * enter the LOCAL_BUSY state.
2679 */
2680 l2cap_abort_rx_srej_sent(chan);
2681 }
2682
2683 l2cap_send_ack(chan);
2684
2685 break;
2686 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2687 BT_DBG("Exit LOCAL_BUSY");
2688 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2689
2690 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2691 struct l2cap_ctrl local_control;
2692
2693 memset(&local_control, 0, sizeof(local_control));
2694 local_control.sframe = 1;
2695 local_control.super = L2CAP_SUPER_RR;
2696 local_control.poll = 1;
2697 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002698 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002699
2700 chan->retry_count = 1;
2701 __set_monitor_timer(chan);
2702 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2703 }
2704 break;
2705 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2706 l2cap_process_reqseq(chan, control->reqseq);
2707 break;
2708 case L2CAP_EV_EXPLICIT_POLL:
2709 l2cap_send_rr_or_rnr(chan, 1);
2710 chan->retry_count = 1;
2711 __set_monitor_timer(chan);
2712 __clear_ack_timer(chan);
2713 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2714 break;
2715 case L2CAP_EV_RETRANS_TO:
2716 l2cap_send_rr_or_rnr(chan, 1);
2717 chan->retry_count = 1;
2718 __set_monitor_timer(chan);
2719 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2720 break;
2721 case L2CAP_EV_RECV_FBIT:
2722 /* Nothing to process */
2723 break;
2724 default:
2725 break;
2726 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002727}
2728
Gustavo Padovand6603662012-05-21 13:58:22 -03002729static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2730 struct l2cap_ctrl *control,
2731 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002732{
Mat Martineau608bcc62012-05-17 20:53:32 -07002733 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2734 event);
2735
2736 switch (event) {
2737 case L2CAP_EV_DATA_REQUEST:
2738 if (chan->tx_send_head == NULL)
2739 chan->tx_send_head = skb_peek(skbs);
2740 /* Queue data, but don't send. */
2741 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2742 break;
2743 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2744 BT_DBG("Enter LOCAL_BUSY");
2745 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2746
2747 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2748 /* The SREJ_SENT state must be aborted if we are to
2749 * enter the LOCAL_BUSY state.
2750 */
2751 l2cap_abort_rx_srej_sent(chan);
2752 }
2753
2754 l2cap_send_ack(chan);
2755
2756 break;
2757 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2758 BT_DBG("Exit LOCAL_BUSY");
2759 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2760
2761 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2762 struct l2cap_ctrl local_control;
2763 memset(&local_control, 0, sizeof(local_control));
2764 local_control.sframe = 1;
2765 local_control.super = L2CAP_SUPER_RR;
2766 local_control.poll = 1;
2767 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002768 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002769
2770 chan->retry_count = 1;
2771 __set_monitor_timer(chan);
2772 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2773 }
2774 break;
2775 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2776 l2cap_process_reqseq(chan, control->reqseq);
2777
2778 /* Fall through */
2779
2780 case L2CAP_EV_RECV_FBIT:
2781 if (control && control->final) {
2782 __clear_monitor_timer(chan);
2783 if (chan->unacked_frames > 0)
2784 __set_retrans_timer(chan);
2785 chan->retry_count = 0;
2786 chan->tx_state = L2CAP_TX_STATE_XMIT;
2787 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2788 }
2789 break;
2790 case L2CAP_EV_EXPLICIT_POLL:
2791 /* Ignore */
2792 break;
2793 case L2CAP_EV_MONITOR_TO:
2794 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2795 l2cap_send_rr_or_rnr(chan, 1);
2796 __set_monitor_timer(chan);
2797 chan->retry_count++;
2798 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002799 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002800 }
2801 break;
2802 default:
2803 break;
2804 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002805}
2806
Gustavo Padovand6603662012-05-21 13:58:22 -03002807static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2808 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002809{
Mat Martineau608bcc62012-05-17 20:53:32 -07002810 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2811 chan, control, skbs, event, chan->tx_state);
2812
2813 switch (chan->tx_state) {
2814 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002815 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002816 break;
2817 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002818 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002819 break;
2820 default:
2821 /* Ignore event */
2822 break;
2823 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002824}
2825
Mat Martineau4b51dae92012-05-17 20:53:37 -07002826static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2827 struct l2cap_ctrl *control)
2828{
2829 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002830 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002831}
2832
Mat Martineauf80842a2012-05-17 20:53:46 -07002833static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2834 struct l2cap_ctrl *control)
2835{
2836 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002837 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002838}
2839
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840/* Copy frame to all raw sockets on that connection */
2841static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2842{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002843 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002844 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845
2846 BT_DBG("conn %p", conn);
2847
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002848 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002849
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002850 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002851 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852 continue;
2853
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002854 /* Don't send frame to the channel it came from */
Johan Hedberga4368ff2015-03-30 23:21:01 +03002855 if (bt_cb(skb)->l2cap.chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002856 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002857
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002858 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002859 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002860 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002861 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002862 kfree_skb(nskb);
2863 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002864
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002865 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002866}
2867
2868/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002869static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2870 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871{
2872 struct sk_buff *skb, **frag;
2873 struct l2cap_cmd_hdr *cmd;
2874 struct l2cap_hdr *lh;
2875 int len, count;
2876
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002877 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2878 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002879
Anderson Lizardo300b9622013-06-02 16:30:40 -04002880 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2881 return NULL;
2882
Linus Torvalds1da177e2005-04-16 15:20:36 -07002883 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2884 count = min_t(unsigned int, conn->mtu, len);
2885
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002886 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002887 if (!skb)
2888 return NULL;
2889
2890 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002891 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002892
2893 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002894 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002895 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002896 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002897
2898 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2899 cmd->code = code;
2900 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002901 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002902
2903 if (dlen) {
2904 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2905 memcpy(skb_put(skb, count), data, count);
2906 data += count;
2907 }
2908
2909 len -= skb->len;
2910
2911 /* Continuation fragments (no L2CAP header) */
2912 frag = &skb_shinfo(skb)->frag_list;
2913 while (len) {
2914 count = min_t(unsigned int, conn->mtu, len);
2915
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002916 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002917 if (!*frag)
2918 goto fail;
2919
2920 memcpy(skb_put(*frag, count), data, count);
2921
2922 len -= count;
2923 data += count;
2924
2925 frag = &(*frag)->next;
2926 }
2927
2928 return skb;
2929
2930fail:
2931 kfree_skb(skb);
2932 return NULL;
2933}
2934
Gustavo Padovan2d792812012-10-06 10:07:01 +01002935static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2936 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002937{
2938 struct l2cap_conf_opt *opt = *ptr;
2939 int len;
2940
2941 len = L2CAP_CONF_OPT_SIZE + opt->len;
2942 *ptr += len;
2943
2944 *type = opt->type;
2945 *olen = opt->len;
2946
2947 switch (opt->len) {
2948 case 1:
2949 *val = *((u8 *) opt->val);
2950 break;
2951
2952 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002953 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002954 break;
2955
2956 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002957 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002958 break;
2959
2960 default:
2961 *val = (unsigned long) opt->val;
2962 break;
2963 }
2964
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002965 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002966 return len;
2967}
2968
Linus Torvalds1da177e2005-04-16 15:20:36 -07002969static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2970{
2971 struct l2cap_conf_opt *opt = *ptr;
2972
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002973 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002974
2975 opt->type = type;
2976 opt->len = len;
2977
2978 switch (len) {
2979 case 1:
2980 *((u8 *) opt->val) = val;
2981 break;
2982
2983 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002984 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002985 break;
2986
2987 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002988 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002989 break;
2990
2991 default:
2992 memcpy(opt->val, (void *) val, len);
2993 break;
2994 }
2995
2996 *ptr += L2CAP_CONF_OPT_SIZE + len;
2997}
2998
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002999static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
3000{
3001 struct l2cap_conf_efs efs;
3002
Szymon Janc1ec918c2011-11-16 09:32:21 +01003003 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003004 case L2CAP_MODE_ERTM:
3005 efs.id = chan->local_id;
3006 efs.stype = chan->local_stype;
3007 efs.msdu = cpu_to_le16(chan->local_msdu);
3008 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003009 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
3010 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003011 break;
3012
3013 case L2CAP_MODE_STREAMING:
3014 efs.id = 1;
3015 efs.stype = L2CAP_SERV_BESTEFFORT;
3016 efs.msdu = cpu_to_le16(chan->local_msdu);
3017 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
3018 efs.acc_lat = 0;
3019 efs.flush_to = 0;
3020 break;
3021
3022 default:
3023 return;
3024 }
3025
3026 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003027 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003028}
3029
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003030static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003031{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003032 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003033 ack_timer.work);
3034 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003035
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003036 BT_DBG("chan %p", chan);
3037
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003038 l2cap_chan_lock(chan);
3039
Mat Martineau03625202012-05-17 20:53:51 -07003040 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3041 chan->last_acked_seq);
3042
3043 if (frames_to_ack)
3044 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003045
3046 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003047 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003048}
3049
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003050int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003051{
Mat Martineau3c588192012-04-11 10:48:42 -07003052 int err;
3053
Mat Martineau105bdf92012-04-27 16:50:48 -07003054 chan->next_tx_seq = 0;
3055 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003056 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003057 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003058 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003059 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003060 chan->last_acked_seq = 0;
3061 chan->sdu = NULL;
3062 chan->sdu_last_frag = NULL;
3063 chan->sdu_len = 0;
3064
Mat Martineaud34c34f2012-05-14 14:49:27 -07003065 skb_queue_head_init(&chan->tx_q);
3066
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003067 chan->local_amp_id = AMP_ID_BREDR;
3068 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003069 chan->move_state = L2CAP_MOVE_STABLE;
3070 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3071
Mat Martineau105bdf92012-04-27 16:50:48 -07003072 if (chan->mode != L2CAP_MODE_ERTM)
3073 return 0;
3074
3075 chan->rx_state = L2CAP_RX_STATE_RECV;
3076 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003077
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003078 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3079 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3080 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003081
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003082 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003083
Mat Martineau3c588192012-04-11 10:48:42 -07003084 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3085 if (err < 0)
3086 return err;
3087
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003088 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3089 if (err < 0)
3090 l2cap_seq_list_free(&chan->srej_list);
3091
3092 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003093}
3094
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003095static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3096{
3097 switch (mode) {
3098 case L2CAP_MODE_STREAMING:
3099 case L2CAP_MODE_ERTM:
3100 if (l2cap_mode_supported(mode, remote_feat_mask))
3101 return mode;
3102 /* fall through */
3103 default:
3104 return L2CAP_MODE_BASIC;
3105 }
3106}
3107
Marcel Holtmann848566b2013-10-01 22:59:22 -07003108static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003109{
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003110 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3111 (conn->feat_mask & L2CAP_FEAT_EXT_WINDOW));
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003112}
3113
Marcel Holtmann848566b2013-10-01 22:59:22 -07003114static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003115{
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003116 return ((conn->local_fixed_chan & L2CAP_FC_A2MP) &&
3117 (conn->feat_mask & L2CAP_FEAT_EXT_FLOW));
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003118}
3119
Mat Martineau36c86c82012-10-23 15:24:20 -07003120static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3121 struct l2cap_conf_rfc *rfc)
3122{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003123 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003124 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3125
3126 /* Class 1 devices have must have ERTM timeouts
3127 * exceeding the Link Supervision Timeout. The
3128 * default Link Supervision Timeout for AMP
3129 * controllers is 10 seconds.
3130 *
3131 * Class 1 devices use 0xffffffff for their
3132 * best-effort flush timeout, so the clamping logic
3133 * will result in a timeout that meets the above
3134 * requirement. ERTM timeouts are 16-bit values, so
3135 * the maximum timeout is 65.535 seconds.
3136 */
3137
3138 /* Convert timeout to milliseconds and round */
3139 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3140
3141 /* This is the recommended formula for class 2 devices
3142 * that start ERTM timers when packets are sent to the
3143 * controller.
3144 */
3145 ertm_to = 3 * ertm_to + 500;
3146
3147 if (ertm_to > 0xffff)
3148 ertm_to = 0xffff;
3149
3150 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3151 rfc->monitor_timeout = rfc->retrans_timeout;
3152 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003153 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3154 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003155 }
3156}
3157
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003158static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3159{
3160 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003161 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003162 /* use extended control field */
3163 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003164 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3165 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003166 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003167 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003168 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3169 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003170 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003171}
3172
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003173static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003174{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003175 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003176 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003177 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003178 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003179
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003180 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003181
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003182 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003183 goto done;
3184
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003185 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003186 case L2CAP_MODE_STREAMING:
3187 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003188 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003189 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003190
Marcel Holtmann848566b2013-10-01 22:59:22 -07003191 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003192 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3193
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003194 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003195 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003196 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003197 break;
3198 }
3199
3200done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003201 if (chan->imtu != L2CAP_DEFAULT_MTU)
3202 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003203
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003204 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003205 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003206 if (disable_ertm)
3207 break;
3208
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003209 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003210 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003211 break;
3212
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003213 rfc.mode = L2CAP_MODE_BASIC;
3214 rfc.txwin_size = 0;
3215 rfc.max_transmit = 0;
3216 rfc.retrans_timeout = 0;
3217 rfc.monitor_timeout = 0;
3218 rfc.max_pdu_size = 0;
3219
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003220 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003221 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003222 break;
3223
3224 case L2CAP_MODE_ERTM:
3225 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003226 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003227
3228 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003229
3230 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003231 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3232 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003233 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003234
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003235 l2cap_txwin_setup(chan);
3236
3237 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003238 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003239
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003241 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003242
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003243 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3244 l2cap_add_opt_efs(&ptr, chan);
3245
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003246 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3247 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003248 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003249
3250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3251 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003253 chan->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3255 chan->fcs);
3256 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003257 break;
3258
3259 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003260 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003261 rfc.mode = L2CAP_MODE_STREAMING;
3262 rfc.txwin_size = 0;
3263 rfc.max_transmit = 0;
3264 rfc.retrans_timeout = 0;
3265 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003266
3267 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003268 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3269 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003270 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003271
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003272 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003273 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003274
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003275 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3276 l2cap_add_opt_efs(&ptr, chan);
3277
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003278 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3279 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003280 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003281 chan->fcs = L2CAP_FCS_NONE;
3282 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3283 chan->fcs);
3284 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003285 break;
3286 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003287
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003288 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003289 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003290
3291 return ptr - data;
3292}
3293
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003294static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003295{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003296 struct l2cap_conf_rsp *rsp = data;
3297 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003298 void *req = chan->conf_req;
3299 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003300 int type, hint, olen;
3301 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003302 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003303 struct l2cap_conf_efs efs;
3304 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003305 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003306 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003307 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003309 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003310
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003311 while (len >= L2CAP_CONF_OPT_SIZE) {
3312 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003313
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003314 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003315 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003316
3317 switch (type) {
3318 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003319 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003320 break;
3321
3322 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003323 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003324 break;
3325
3326 case L2CAP_CONF_QOS:
3327 break;
3328
Marcel Holtmann6464f352007-10-20 13:39:51 +02003329 case L2CAP_CONF_RFC:
3330 if (olen == sizeof(rfc))
3331 memcpy(&rfc, (void *) val, olen);
3332 break;
3333
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003334 case L2CAP_CONF_FCS:
3335 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003336 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003337 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003338
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003339 case L2CAP_CONF_EFS:
3340 remote_efs = 1;
3341 if (olen == sizeof(efs))
3342 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003343 break;
3344
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003345 case L2CAP_CONF_EWS:
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02003346 if (!(chan->conn->local_fixed_chan & L2CAP_FC_A2MP))
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003347 return -ECONNREFUSED;
3348
3349 set_bit(FLAG_EXT_CTRL, &chan->flags);
3350 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003351 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003352 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003353 break;
3354
3355 default:
3356 if (hint)
3357 break;
3358
3359 result = L2CAP_CONF_UNKNOWN;
3360 *((u8 *) ptr++) = type;
3361 break;
3362 }
3363 }
3364
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003365 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003366 goto done;
3367
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003368 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003369 case L2CAP_MODE_STREAMING:
3370 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003371 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003372 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003373 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003374 break;
3375 }
3376
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003377 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003378 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003379 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3380 else
3381 return -ECONNREFUSED;
3382 }
3383
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003384 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003385 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003386
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003387 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003388 }
3389
3390done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003391 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003392 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003393 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003394
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003395 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003396 return -ECONNREFUSED;
3397
Gustavo Padovan2d792812012-10-06 10:07:01 +01003398 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3399 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003400 }
3401
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003402 if (result == L2CAP_CONF_SUCCESS) {
3403 /* Configure output options and let the other side know
3404 * which ones we don't like. */
3405
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003406 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3407 result = L2CAP_CONF_UNACCEPT;
3408 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003409 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003410 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003411 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003413
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003414 if (remote_efs) {
3415 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003416 efs.stype != L2CAP_SERV_NOTRAFIC &&
3417 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003418
3419 result = L2CAP_CONF_UNACCEPT;
3420
3421 if (chan->num_conf_req >= 1)
3422 return -ECONNREFUSED;
3423
3424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003425 sizeof(efs),
3426 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003427 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003428 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003429 result = L2CAP_CONF_PENDING;
3430 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003431 }
3432 }
3433
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003434 switch (rfc.mode) {
3435 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003436 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003437 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003438 break;
3439
3440 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003441 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3442 chan->remote_tx_win = rfc.txwin_size;
3443 else
3444 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3445
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003446 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003447
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003448 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003449 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3450 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003451 rfc.max_pdu_size = cpu_to_le16(size);
3452 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003453
Mat Martineau36c86c82012-10-23 15:24:20 -07003454 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003455
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003456 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003457
3458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003459 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003460
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003461 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3462 chan->remote_id = efs.id;
3463 chan->remote_stype = efs.stype;
3464 chan->remote_msdu = le16_to_cpu(efs.msdu);
3465 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003466 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003467 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003468 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003469 chan->remote_sdu_itime =
3470 le32_to_cpu(efs.sdu_itime);
3471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003472 sizeof(efs),
3473 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003474 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003475 break;
3476
3477 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003478 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003479 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3480 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003481 rfc.max_pdu_size = cpu_to_le16(size);
3482 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003483
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003484 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003485
Gustavo Padovan2d792812012-10-06 10:07:01 +01003486 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3487 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003488
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489 break;
3490
3491 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003492 result = L2CAP_CONF_UNACCEPT;
3493
3494 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003495 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003496 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003497
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003498 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003499 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003500 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003501 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003502 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003503 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003504
3505 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003506}
3507
Gustavo Padovan2d792812012-10-06 10:07:01 +01003508static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3509 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003511 struct l2cap_conf_req *req = data;
3512 void *ptr = req->data;
3513 int type, olen;
3514 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003515 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003516 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003517
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003518 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003519
3520 while (len >= L2CAP_CONF_OPT_SIZE) {
3521 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3522
3523 switch (type) {
3524 case L2CAP_CONF_MTU:
3525 if (val < L2CAP_DEFAULT_MIN_MTU) {
3526 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003527 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003528 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003529 chan->imtu = val;
3530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003531 break;
3532
3533 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003534 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003535 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003536 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003537 break;
3538
3539 case L2CAP_CONF_RFC:
3540 if (olen == sizeof(rfc))
3541 memcpy(&rfc, (void *)val, olen);
3542
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003543 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003544 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003545 return -ECONNREFUSED;
3546
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003547 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003548
3549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003550 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003551 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003552
3553 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003554 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003555 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003556 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003557 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003558
3559 case L2CAP_CONF_EFS:
3560 if (olen == sizeof(efs))
3561 memcpy(&efs, (void *)val, olen);
3562
3563 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003564 efs.stype != L2CAP_SERV_NOTRAFIC &&
3565 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003566 return -ECONNREFUSED;
3567
Gustavo Padovan2d792812012-10-06 10:07:01 +01003568 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3569 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003570 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003571
3572 case L2CAP_CONF_FCS:
3573 if (*result == L2CAP_CONF_PENDING)
3574 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003575 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003576 &chan->conf_state);
3577 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003578 }
3579 }
3580
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003581 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003582 return -ECONNREFUSED;
3583
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003584 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003585
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003586 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003587 switch (rfc.mode) {
3588 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003589 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3590 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3591 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003592 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3593 chan->ack_win = min_t(u16, chan->ack_win,
3594 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003595
3596 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3597 chan->local_msdu = le16_to_cpu(efs.msdu);
3598 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003599 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003600 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3601 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003602 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003603 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003604 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003605
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003606 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003607 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003608 }
3609 }
3610
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003611 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003612 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003613
3614 return ptr - data;
3615}
3616
Gustavo Padovan2d792812012-10-06 10:07:01 +01003617static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3618 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003619{
3620 struct l2cap_conf_rsp *rsp = data;
3621 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003622
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003623 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003624
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003625 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003626 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003627 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003628
3629 return ptr - data;
3630}
3631
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003632void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3633{
3634 struct l2cap_le_conn_rsp rsp;
3635 struct l2cap_conn *conn = chan->conn;
3636
3637 BT_DBG("chan %p", chan);
3638
3639 rsp.dcid = cpu_to_le16(chan->scid);
3640 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003641 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003642 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003643 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003644
3645 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3646 &rsp);
3647}
3648
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003649void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003650{
3651 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003652 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003653 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003654 u8 rsp_code;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003655
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003656 rsp.scid = cpu_to_le16(chan->dcid);
3657 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003658 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3659 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003660
3661 if (chan->hs_hcon)
3662 rsp_code = L2CAP_CREATE_CHAN_RSP;
3663 else
3664 rsp_code = L2CAP_CONN_RSP;
3665
3666 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3667
3668 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003669
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003670 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003671 return;
3672
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003673 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003674 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003675 chan->num_conf_req++;
3676}
3677
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003678static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003679{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003680 int type, olen;
3681 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003682 /* Use sane default values in case a misbehaving remote device
3683 * did not send an RFC or extended window size option.
3684 */
3685 u16 txwin_ext = chan->ack_win;
3686 struct l2cap_conf_rfc rfc = {
3687 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003688 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3689 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003690 .max_pdu_size = cpu_to_le16(chan->imtu),
3691 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3692 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003693
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003694 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003695
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003696 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003697 return;
3698
3699 while (len >= L2CAP_CONF_OPT_SIZE) {
3700 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3701
Mat Martineauc20f8e32012-07-10 05:47:07 -07003702 switch (type) {
3703 case L2CAP_CONF_RFC:
3704 if (olen == sizeof(rfc))
3705 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003706 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003707 case L2CAP_CONF_EWS:
3708 txwin_ext = val;
3709 break;
3710 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003711 }
3712
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003713 switch (rfc.mode) {
3714 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003715 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3716 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003717 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3718 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3719 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3720 else
3721 chan->ack_win = min_t(u16, chan->ack_win,
3722 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003723 break;
3724 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003725 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003726 }
3727}
3728
Gustavo Padovan2d792812012-10-06 10:07:01 +01003729static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003730 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3731 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003732{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003733 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003734
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003735 if (cmd_len < sizeof(*rej))
3736 return -EPROTO;
3737
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003738 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003739 return 0;
3740
3741 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003742 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003743 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003744
3745 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003746 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003747
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003748 l2cap_conn_start(conn);
3749 }
3750
3751 return 0;
3752}
3753
Mat Martineau17009152012-10-23 15:24:07 -07003754static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3755 struct l2cap_cmd_hdr *cmd,
3756 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003757{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003758 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3759 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003760 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003761 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003762
3763 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003764 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003765
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003766 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767
3768 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003769 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003770 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003771 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003772 result = L2CAP_CR_BAD_PSM;
3773 goto sendresp;
3774 }
3775
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003776 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003777 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003778
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003779 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003780 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003781 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003782 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003783 result = L2CAP_CR_SEC_BLOCK;
3784 goto response;
3785 }
3786
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787 result = L2CAP_CR_NO_MEM;
3788
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003789 /* Check if we already have channel with that dcid */
3790 if (__l2cap_get_chan_by_dcid(conn, scid))
3791 goto response;
3792
Gustavo Padovan80b98022012-05-27 22:27:51 -03003793 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003794 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003795 goto response;
3796
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003797 /* For certain devices (ex: HID mouse), support for authentication,
3798 * pairing and bonding is optional. For such devices, inorder to avoid
3799 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3800 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3801 */
3802 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3803
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003804 bacpy(&chan->src, &conn->hcon->src);
3805 bacpy(&chan->dst, &conn->hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02003806 chan->src_type = bdaddr_src_type(conn->hcon);
3807 chan->dst_type = bdaddr_dst_type(conn->hcon);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003808 chan->psm = psm;
3809 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003810 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003811
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003812 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003813
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003814 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003815
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003816 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003817
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003818 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003819
Marcel Holtmann984947d2009-02-06 23:35:19 +01003820 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003821 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003822 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003823 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003824 result = L2CAP_CR_PEND;
3825 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003826 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003827 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003828 /* Force pending result for AMP controllers.
3829 * The connection will succeed after the
3830 * physical link is up.
3831 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003832 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003833 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003834 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003835 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003836 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003837 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003838 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003839 status = L2CAP_CS_NO_INFO;
3840 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003841 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003842 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003843 result = L2CAP_CR_PEND;
3844 status = L2CAP_CS_AUTHEN_PEND;
3845 }
3846 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003847 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003848 result = L2CAP_CR_PEND;
3849 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003850 }
3851
Linus Torvalds1da177e2005-04-16 15:20:36 -07003852response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003853 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003854 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003855 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003856
3857sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003858 rsp.scid = cpu_to_le16(scid);
3859 rsp.dcid = cpu_to_le16(dcid);
3860 rsp.result = cpu_to_le16(result);
3861 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003862 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003863
3864 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3865 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003866 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003867
3868 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3869 conn->info_ident = l2cap_get_ident(conn);
3870
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003871 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003872
Gustavo Padovan2d792812012-10-06 10:07:01 +01003873 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3874 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003875 }
3876
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003877 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003878 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003879 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003880 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003881 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003882 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003883 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003884 }
Mat Martineau17009152012-10-23 15:24:07 -07003885
3886 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003887}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003888
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003889static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003890 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003891{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303892 struct hci_dev *hdev = conn->hcon->hdev;
3893 struct hci_conn *hcon = conn->hcon;
3894
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003895 if (cmd_len < sizeof(struct l2cap_conn_req))
3896 return -EPROTO;
3897
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303898 hci_dev_lock(hdev);
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07003899 if (hci_dev_test_flag(hdev, HCI_MGMT) &&
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303900 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
Alfonso Acosta48ec92f2014-10-07 08:44:10 +00003901 mgmt_device_connected(hdev, hcon, 0, NULL, 0);
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303902 hci_dev_unlock(hdev);
3903
Gustavo Padovan300229f2012-10-12 19:40:40 +08003904 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 return 0;
3906}
3907
Mat Martineau5909cf32012-10-23 15:24:08 -07003908static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003909 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3910 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911{
3912 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3913 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003914 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003915 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003916 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003918 if (cmd_len < sizeof(*rsp))
3919 return -EPROTO;
3920
Linus Torvalds1da177e2005-04-16 15:20:36 -07003921 scid = __le16_to_cpu(rsp->scid);
3922 dcid = __le16_to_cpu(rsp->dcid);
3923 result = __le16_to_cpu(rsp->result);
3924 status = __le16_to_cpu(rsp->status);
3925
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003927 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003928
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003929 mutex_lock(&conn->chan_lock);
3930
Linus Torvalds1da177e2005-04-16 15:20:36 -07003931 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003932 chan = __l2cap_get_chan_by_scid(conn, scid);
3933 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003934 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003935 goto unlock;
3936 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003937 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003938 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3939 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003940 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003941 goto unlock;
3942 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003943 }
3944
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003945 err = 0;
3946
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003947 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003948
Linus Torvalds1da177e2005-04-16 15:20:36 -07003949 switch (result) {
3950 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03003951 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003952 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003953 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003954 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003955
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003956 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003957 break;
3958
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003960 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003961 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 break;
3963
3964 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003965 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966 break;
3967
3968 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003969 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 break;
3971 }
3972
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003973 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003974
3975unlock:
3976 mutex_unlock(&conn->chan_lock);
3977
3978 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979}
3980
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003981static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003982{
3983 /* FCS is enabled only in ERTM or streaming mode, if one or both
3984 * sides request it.
3985 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003986 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003987 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003988 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003989 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003990}
3991
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003992static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3993 u8 ident, u16 flags)
3994{
3995 struct l2cap_conn *conn = chan->conn;
3996
3997 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3998 flags);
3999
4000 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
4001 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
4002
4003 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
4004 l2cap_build_conf_rsp(chan, data,
4005 L2CAP_CONF_SUCCESS, flags), data);
4006}
4007
Johan Hedberg662d6522013-10-16 11:20:47 +03004008static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
4009 u16 scid, u16 dcid)
4010{
4011 struct l2cap_cmd_rej_cid rej;
4012
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004013 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03004014 rej.scid = __cpu_to_le16(scid);
4015 rej.dcid = __cpu_to_le16(dcid);
4016
4017 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
4018}
4019
Gustavo Padovan2d792812012-10-06 10:07:01 +01004020static inline int l2cap_config_req(struct l2cap_conn *conn,
4021 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4022 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023{
4024 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4025 u16 dcid, flags;
4026 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004027 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004028 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004030 if (cmd_len < sizeof(*req))
4031 return -EPROTO;
4032
Linus Torvalds1da177e2005-04-16 15:20:36 -07004033 dcid = __le16_to_cpu(req->dcid);
4034 flags = __le16_to_cpu(req->flags);
4035
4036 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4037
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004038 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004039 if (!chan) {
4040 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4041 return 0;
4042 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004043
David S. Miller033b1142011-07-21 13:38:42 -07004044 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004045 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4046 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004047 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004048 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004049
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004050 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004051 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004052 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004053 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004054 l2cap_build_conf_rsp(chan, rsp,
4055 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004056 goto unlock;
4057 }
4058
4059 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004060 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4061 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004063 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004064 /* Incomplete config. Send empty response. */
4065 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004066 l2cap_build_conf_rsp(chan, rsp,
4067 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004068 goto unlock;
4069 }
4070
4071 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004072 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004073 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004074 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004075 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004076 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004077
Mat Martineau1500109b2012-10-23 15:24:15 -07004078 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004079 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004080 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004081
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004082 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004083 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004084
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004085 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004086 goto unlock;
4087
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004088 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004089 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004090
Mat Martineau105bdf92012-04-27 16:50:48 -07004091 if (chan->mode == L2CAP_MODE_ERTM ||
4092 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004093 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004094
Mat Martineau3c588192012-04-11 10:48:42 -07004095 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004096 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004097 else
4098 l2cap_chan_ready(chan);
4099
Marcel Holtmann876d9482007-10-20 13:35:42 +02004100 goto unlock;
4101 }
4102
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004103 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004104 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004105 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004106 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004107 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004108 }
4109
Stephen Hemminger49c922b2014-10-27 21:12:20 -07004110 /* Got Conf Rsp PENDING from remote side and assume we sent
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004111 Conf Rsp PENDING in the code above */
4112 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004113 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004114
4115 /* check compatibility */
4116
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004117 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004118 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004119 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4120 else
4121 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004122 }
4123
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004125 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004126 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004127}
4128
Gustavo Padovan2d792812012-10-06 10:07:01 +01004129static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004130 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4131 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132{
4133 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4134 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004135 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004136 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004137 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004138
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004139 if (cmd_len < sizeof(*rsp))
4140 return -EPROTO;
4141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 scid = __le16_to_cpu(rsp->scid);
4143 flags = __le16_to_cpu(rsp->flags);
4144 result = __le16_to_cpu(rsp->result);
4145
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004146 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4147 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004148
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004149 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004150 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 return 0;
4152
4153 switch (result) {
4154 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004155 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004156 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004157 break;
4158
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004159 case L2CAP_CONF_PENDING:
4160 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4161
4162 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4163 char buf[64];
4164
4165 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004166 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004167 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004168 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004169 goto done;
4170 }
4171
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004172 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004173 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4174 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004175 } else {
4176 if (l2cap_check_efs(chan)) {
4177 amp_create_logical_link(chan);
4178 chan->ident = cmd->ident;
4179 }
4180 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004181 }
4182 goto done;
4183
Linus Torvalds1da177e2005-04-16 15:20:36 -07004184 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004185 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004186 char req[64];
4187
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004188 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004189 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004190 goto done;
4191 }
4192
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004193 /* throw out any old stored conf requests */
4194 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004195 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004196 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004197 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004198 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004199 goto done;
4200 }
4201
4202 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004203 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004204 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004205 if (result != L2CAP_CONF_SUCCESS)
4206 goto done;
4207 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 }
4209
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004210 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004211 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004212
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004213 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004214 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004215 goto done;
4216 }
4217
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004218 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004219 goto done;
4220
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004221 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004222
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004223 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004224 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004225
Mat Martineau105bdf92012-04-27 16:50:48 -07004226 if (chan->mode == L2CAP_MODE_ERTM ||
4227 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004228 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004229
Mat Martineau3c588192012-04-11 10:48:42 -07004230 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004231 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004232 else
4233 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004234 }
4235
4236done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004237 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004238 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004239}
4240
Gustavo Padovan2d792812012-10-06 10:07:01 +01004241static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004242 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4243 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004244{
4245 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4246 struct l2cap_disconn_rsp rsp;
4247 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004248 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004249
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004250 if (cmd_len != sizeof(*req))
4251 return -EPROTO;
4252
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 scid = __le16_to_cpu(req->scid);
4254 dcid = __le16_to_cpu(req->dcid);
4255
4256 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4257
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004258 mutex_lock(&conn->chan_lock);
4259
4260 chan = __l2cap_get_chan_by_scid(conn, dcid);
4261 if (!chan) {
4262 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004263 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4264 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004266
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004267 l2cap_chan_lock(chan);
4268
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004269 rsp.dcid = cpu_to_le16(chan->scid);
4270 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004271 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4272
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004273 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004274
Mat Martineau61d6ef32012-04-27 16:50:50 -07004275 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004276 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004277
4278 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004279
Gustavo Padovan80b98022012-05-27 22:27:51 -03004280 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004281 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004282
4283 mutex_unlock(&conn->chan_lock);
4284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 return 0;
4286}
4287
Gustavo Padovan2d792812012-10-06 10:07:01 +01004288static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004289 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4290 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004291{
4292 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4293 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004294 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004296 if (cmd_len != sizeof(*rsp))
4297 return -EPROTO;
4298
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 scid = __le16_to_cpu(rsp->scid);
4300 dcid = __le16_to_cpu(rsp->dcid);
4301
4302 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4303
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004304 mutex_lock(&conn->chan_lock);
4305
4306 chan = __l2cap_get_chan_by_scid(conn, scid);
4307 if (!chan) {
4308 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004312 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004313
Mat Martineau61d6ef32012-04-27 16:50:50 -07004314 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004315 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004316
4317 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004318
Gustavo Padovan80b98022012-05-27 22:27:51 -03004319 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004320 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004321
4322 mutex_unlock(&conn->chan_lock);
4323
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 return 0;
4325}
4326
Gustavo Padovan2d792812012-10-06 10:07:01 +01004327static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004328 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4329 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004330{
4331 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004332 u16 type;
4333
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004334 if (cmd_len != sizeof(*req))
4335 return -EPROTO;
4336
Linus Torvalds1da177e2005-04-16 15:20:36 -07004337 type = __le16_to_cpu(req->type);
4338
4339 BT_DBG("type 0x%4.4x", type);
4340
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004341 if (type == L2CAP_IT_FEAT_MASK) {
4342 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004343 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004344 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004345 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4346 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004347 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004348 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004349 | L2CAP_FEAT_FCS;
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004350 if (conn->local_fixed_chan & L2CAP_FC_A2MP)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004351 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004352 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004353
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004354 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004355 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4356 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004357 } else if (type == L2CAP_IT_FIXED_CHAN) {
4358 u8 buf[12];
4359 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004360
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004361 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4362 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004363 rsp->data[0] = conn->local_fixed_chan;
4364 memset(rsp->data + 1, 0, 7);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004365 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4366 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004367 } else {
4368 struct l2cap_info_rsp rsp;
4369 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004370 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004371 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4372 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374
4375 return 0;
4376}
4377
Gustavo Padovan2d792812012-10-06 10:07:01 +01004378static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004379 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4380 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381{
4382 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4383 u16 type, result;
4384
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304385 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004386 return -EPROTO;
4387
Linus Torvalds1da177e2005-04-16 15:20:36 -07004388 type = __le16_to_cpu(rsp->type);
4389 result = __le16_to_cpu(rsp->result);
4390
4391 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4392
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004393 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4394 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004395 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004396 return 0;
4397
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004398 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004399
Ville Tervoadb08ed2010-08-04 09:43:33 +03004400 if (result != L2CAP_IR_SUCCESS) {
4401 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4402 conn->info_ident = 0;
4403
4404 l2cap_conn_start(conn);
4405
4406 return 0;
4407 }
4408
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004409 switch (type) {
4410 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004411 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004412
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004414 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004416
4417 conn->info_ident = l2cap_get_ident(conn);
4418
4419 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004420 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004421 } else {
4422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4423 conn->info_ident = 0;
4424
4425 l2cap_conn_start(conn);
4426 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004427 break;
4428
4429 case L2CAP_IT_FIXED_CHAN:
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004430 conn->remote_fixed_chan = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004431 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004432 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004433
4434 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004435 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004436 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004437
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 return 0;
4439}
4440
Mat Martineau17009152012-10-23 15:24:07 -07004441static int l2cap_create_channel_req(struct l2cap_conn *conn,
4442 struct l2cap_cmd_hdr *cmd,
4443 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004444{
4445 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004446 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004447 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004448 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004449 u16 psm, scid;
4450
4451 if (cmd_len != sizeof(*req))
4452 return -EPROTO;
4453
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004454 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004455 return -EINVAL;
4456
4457 psm = le16_to_cpu(req->psm);
4458 scid = le16_to_cpu(req->scid);
4459
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004460 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004461
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004462 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004463 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004464 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4465 req->amp_id);
4466 return 0;
4467 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004468
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004469 /* Validate AMP controller id */
4470 hdev = hci_dev_get(req->amp_id);
4471 if (!hdev)
4472 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004473
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004474 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004475 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004476 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004477 }
4478
4479 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4480 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004481 if (chan) {
4482 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4483 struct hci_conn *hs_hcon;
4484
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004485 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4486 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004487 if (!hs_hcon) {
4488 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004489 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4490 chan->dcid);
4491 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004492 }
4493
4494 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4495
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004496 mgr->bredr_chan = chan;
4497 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004498 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004499 conn->mtu = hdev->block_mtu;
4500 }
4501
4502 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004503
4504 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004505
4506error:
4507 rsp.dcid = 0;
4508 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004509 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4510 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004511
4512 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4513 sizeof(rsp), &rsp);
4514
Johan Hedbergdc280802013-09-16 13:05:13 +03004515 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004516}
4517
Mat Martineau8eb200b2012-10-23 15:24:17 -07004518static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4519{
4520 struct l2cap_move_chan_req req;
4521 u8 ident;
4522
4523 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4524
4525 ident = l2cap_get_ident(chan->conn);
4526 chan->ident = ident;
4527
4528 req.icid = cpu_to_le16(chan->scid);
4529 req.dest_amp_id = dest_amp_id;
4530
4531 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4532 &req);
4533
4534 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4535}
4536
Mat Martineau1500109b2012-10-23 15:24:15 -07004537static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004538{
4539 struct l2cap_move_chan_rsp rsp;
4540
Mat Martineau1500109b2012-10-23 15:24:15 -07004541 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004542
Mat Martineau1500109b2012-10-23 15:24:15 -07004543 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004544 rsp.result = cpu_to_le16(result);
4545
Mat Martineau1500109b2012-10-23 15:24:15 -07004546 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4547 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004548}
4549
Mat Martineau5b155ef2012-10-23 15:24:14 -07004550static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004551{
4552 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004553
Mat Martineau5b155ef2012-10-23 15:24:14 -07004554 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004555
Mat Martineau5b155ef2012-10-23 15:24:14 -07004556 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004557
Mat Martineau5b155ef2012-10-23 15:24:14 -07004558 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004559 cfm.result = cpu_to_le16(result);
4560
Mat Martineau5b155ef2012-10-23 15:24:14 -07004561 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4562 sizeof(cfm), &cfm);
4563
4564 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4565}
4566
4567static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4568{
4569 struct l2cap_move_chan_cfm cfm;
4570
4571 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4572
4573 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004574 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004575
4576 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4577 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004578}
4579
4580static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004581 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004582{
4583 struct l2cap_move_chan_cfm_rsp rsp;
4584
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004585 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004586
4587 rsp.icid = cpu_to_le16(icid);
4588 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4589}
4590
Mat Martineau5f3847a2012-10-23 15:24:12 -07004591static void __release_logical_link(struct l2cap_chan *chan)
4592{
4593 chan->hs_hchan = NULL;
4594 chan->hs_hcon = NULL;
4595
4596 /* Placeholder - release the logical link */
4597}
4598
Mat Martineau1500109b2012-10-23 15:24:15 -07004599static void l2cap_logical_fail(struct l2cap_chan *chan)
4600{
4601 /* Logical link setup failed */
4602 if (chan->state != BT_CONNECTED) {
4603 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004604 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004605 return;
4606 }
4607
4608 switch (chan->move_role) {
4609 case L2CAP_MOVE_ROLE_RESPONDER:
4610 l2cap_move_done(chan);
4611 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4612 break;
4613 case L2CAP_MOVE_ROLE_INITIATOR:
4614 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4615 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4616 /* Remote has only sent pending or
4617 * success responses, clean up
4618 */
4619 l2cap_move_done(chan);
4620 }
4621
4622 /* Other amp move states imply that the move
4623 * has already aborted
4624 */
4625 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4626 break;
4627 }
4628}
4629
4630static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4631 struct hci_chan *hchan)
4632{
4633 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004634
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004635 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004636 chan->hs_hcon->l2cap_data = chan->conn;
4637
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004638 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004639
4640 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004641 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004642
4643 set_default_fcs(chan);
4644
4645 err = l2cap_ertm_init(chan);
4646 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004647 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004648 else
4649 l2cap_chan_ready(chan);
4650 }
4651}
4652
4653static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4654 struct hci_chan *hchan)
4655{
4656 chan->hs_hcon = hchan->conn;
4657 chan->hs_hcon->l2cap_data = chan->conn;
4658
4659 BT_DBG("move_state %d", chan->move_state);
4660
4661 switch (chan->move_state) {
4662 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4663 /* Move confirm will be sent after a success
4664 * response is received
4665 */
4666 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4667 break;
4668 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4669 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4670 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4671 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4672 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4673 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4674 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4675 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4676 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4677 }
4678 break;
4679 default:
4680 /* Move was not in expected state, free the channel */
4681 __release_logical_link(chan);
4682
4683 chan->move_state = L2CAP_MOVE_STABLE;
4684 }
4685}
4686
4687/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004688void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4689 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004690{
Mat Martineau1500109b2012-10-23 15:24:15 -07004691 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4692
4693 if (status) {
4694 l2cap_logical_fail(chan);
4695 __release_logical_link(chan);
4696 return;
4697 }
4698
4699 if (chan->state != BT_CONNECTED) {
4700 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004701 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004702 l2cap_logical_finish_create(chan, hchan);
4703 } else {
4704 l2cap_logical_finish_move(chan, hchan);
4705 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004706}
4707
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004708void l2cap_move_start(struct l2cap_chan *chan)
4709{
4710 BT_DBG("chan %p", chan);
4711
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004712 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004713 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4714 return;
4715 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4716 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4717 /* Placeholder - start physical link setup */
4718 } else {
4719 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4720 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4721 chan->move_id = 0;
4722 l2cap_move_setup(chan);
4723 l2cap_send_move_chan_req(chan, 0);
4724 }
4725}
4726
Mat Martineau8eb200b2012-10-23 15:24:17 -07004727static void l2cap_do_create(struct l2cap_chan *chan, int result,
4728 u8 local_amp_id, u8 remote_amp_id)
4729{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004730 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4731 local_amp_id, remote_amp_id);
4732
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004733 chan->fcs = L2CAP_FCS_NONE;
4734
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004735 /* Outgoing channel on AMP */
4736 if (chan->state == BT_CONNECT) {
4737 if (result == L2CAP_CR_SUCCESS) {
4738 chan->local_amp_id = local_amp_id;
4739 l2cap_send_create_chan_req(chan, remote_amp_id);
4740 } else {
4741 /* Revert to BR/EDR connect */
4742 l2cap_send_conn_req(chan);
4743 }
4744
4745 return;
4746 }
4747
4748 /* Incoming channel on AMP */
4749 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004750 struct l2cap_conn_rsp rsp;
4751 char buf[128];
4752 rsp.scid = cpu_to_le16(chan->dcid);
4753 rsp.dcid = cpu_to_le16(chan->scid);
4754
Mat Martineau8eb200b2012-10-23 15:24:17 -07004755 if (result == L2CAP_CR_SUCCESS) {
4756 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004757 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4758 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004759 } else {
4760 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004761 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4762 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004763 }
4764
4765 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4766 sizeof(rsp), &rsp);
4767
4768 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004769 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004770 set_bit(CONF_REQ_SENT, &chan->conf_state);
4771 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4772 L2CAP_CONF_REQ,
4773 l2cap_build_conf_req(chan, buf), buf);
4774 chan->num_conf_req++;
4775 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004776 }
4777}
4778
4779static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4780 u8 remote_amp_id)
4781{
4782 l2cap_move_setup(chan);
4783 chan->move_id = local_amp_id;
4784 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4785
4786 l2cap_send_move_chan_req(chan, remote_amp_id);
4787}
4788
4789static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4790{
4791 struct hci_chan *hchan = NULL;
4792
4793 /* Placeholder - get hci_chan for logical link */
4794
4795 if (hchan) {
4796 if (hchan->state == BT_CONNECTED) {
4797 /* Logical link is ready to go */
4798 chan->hs_hcon = hchan->conn;
4799 chan->hs_hcon->l2cap_data = chan->conn;
4800 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4801 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4802
4803 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4804 } else {
4805 /* Wait for logical link to be ready */
4806 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4807 }
4808 } else {
4809 /* Logical link not available */
4810 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4811 }
4812}
4813
4814static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4815{
4816 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4817 u8 rsp_result;
4818 if (result == -EINVAL)
4819 rsp_result = L2CAP_MR_BAD_ID;
4820 else
4821 rsp_result = L2CAP_MR_NOT_ALLOWED;
4822
4823 l2cap_send_move_chan_rsp(chan, rsp_result);
4824 }
4825
4826 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4827 chan->move_state = L2CAP_MOVE_STABLE;
4828
4829 /* Restart data transmission */
4830 l2cap_ertm_send(chan);
4831}
4832
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004833/* Invoke with locked chan */
4834void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004835{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004836 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004837 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004838
Mat Martineau8eb200b2012-10-23 15:24:17 -07004839 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4840 chan, result, local_amp_id, remote_amp_id);
4841
Mat Martineau8eb200b2012-10-23 15:24:17 -07004842 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4843 l2cap_chan_unlock(chan);
4844 return;
4845 }
4846
4847 if (chan->state != BT_CONNECTED) {
4848 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4849 } else if (result != L2CAP_MR_SUCCESS) {
4850 l2cap_do_move_cancel(chan, result);
4851 } else {
4852 switch (chan->move_role) {
4853 case L2CAP_MOVE_ROLE_INITIATOR:
4854 l2cap_do_move_initiate(chan, local_amp_id,
4855 remote_amp_id);
4856 break;
4857 case L2CAP_MOVE_ROLE_RESPONDER:
4858 l2cap_do_move_respond(chan, result);
4859 break;
4860 default:
4861 l2cap_do_move_cancel(chan, result);
4862 break;
4863 }
4864 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004865}
4866
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004867static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004868 struct l2cap_cmd_hdr *cmd,
4869 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004870{
4871 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004872 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004873 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004874 u16 icid = 0;
4875 u16 result = L2CAP_MR_NOT_ALLOWED;
4876
4877 if (cmd_len != sizeof(*req))
4878 return -EPROTO;
4879
4880 icid = le16_to_cpu(req->icid);
4881
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004882 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004883
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02004884 if (!(conn->local_fixed_chan & L2CAP_FC_A2MP))
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004885 return -EINVAL;
4886
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004887 chan = l2cap_get_chan_by_dcid(conn, icid);
4888 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004889 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004890 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004891 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4892 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004893 return 0;
4894 }
4895
Mat Martineau1500109b2012-10-23 15:24:15 -07004896 chan->ident = cmd->ident;
4897
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004898 if (chan->scid < L2CAP_CID_DYN_START ||
4899 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4900 (chan->mode != L2CAP_MODE_ERTM &&
4901 chan->mode != L2CAP_MODE_STREAMING)) {
4902 result = L2CAP_MR_NOT_ALLOWED;
4903 goto send_move_response;
4904 }
4905
4906 if (chan->local_amp_id == req->dest_amp_id) {
4907 result = L2CAP_MR_SAME_ID;
4908 goto send_move_response;
4909 }
4910
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004911 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004912 struct hci_dev *hdev;
4913 hdev = hci_dev_get(req->dest_amp_id);
4914 if (!hdev || hdev->dev_type != HCI_AMP ||
4915 !test_bit(HCI_UP, &hdev->flags)) {
4916 if (hdev)
4917 hci_dev_put(hdev);
4918
4919 result = L2CAP_MR_BAD_ID;
4920 goto send_move_response;
4921 }
4922 hci_dev_put(hdev);
4923 }
4924
4925 /* Detect a move collision. Only send a collision response
4926 * if this side has "lost", otherwise proceed with the move.
4927 * The winner has the larger bd_addr.
4928 */
4929 if ((__chan_is_moving(chan) ||
4930 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004931 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004932 result = L2CAP_MR_COLLISION;
4933 goto send_move_response;
4934 }
4935
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004936 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4937 l2cap_move_setup(chan);
4938 chan->move_id = req->dest_amp_id;
4939 icid = chan->dcid;
4940
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004941 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004942 /* Moving to BR/EDR */
4943 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4944 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4945 result = L2CAP_MR_PEND;
4946 } else {
4947 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4948 result = L2CAP_MR_SUCCESS;
4949 }
4950 } else {
4951 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4952 /* Placeholder - uncomment when amp functions are available */
4953 /*amp_accept_physical(chan, req->dest_amp_id);*/
4954 result = L2CAP_MR_PEND;
4955 }
4956
4957send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004958 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004959
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004960 l2cap_chan_unlock(chan);
4961
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004962 return 0;
4963}
4964
Mat Martineau5b155ef2012-10-23 15:24:14 -07004965static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4966{
4967 struct l2cap_chan *chan;
4968 struct hci_chan *hchan = NULL;
4969
4970 chan = l2cap_get_chan_by_scid(conn, icid);
4971 if (!chan) {
4972 l2cap_send_move_chan_cfm_icid(conn, icid);
4973 return;
4974 }
4975
4976 __clear_chan_timer(chan);
4977 if (result == L2CAP_MR_PEND)
4978 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4979
4980 switch (chan->move_state) {
4981 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4982 /* Move confirm will be sent when logical link
4983 * is complete.
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 break;
4987 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4988 if (result == L2CAP_MR_PEND) {
4989 break;
4990 } else if (test_bit(CONN_LOCAL_BUSY,
4991 &chan->conn_state)) {
4992 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4993 } else {
4994 /* Logical link is up or moving to BR/EDR,
4995 * proceed with move
4996 */
4997 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4998 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4999 }
5000 break;
5001 case L2CAP_MOVE_WAIT_RSP:
5002 /* Moving to AMP */
5003 if (result == L2CAP_MR_SUCCESS) {
5004 /* Remote is ready, send confirm immediately
5005 * after logical link is ready
5006 */
5007 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
5008 } else {
5009 /* Both logical link and move success
5010 * are required to confirm
5011 */
5012 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
5013 }
5014
5015 /* Placeholder - get hci_chan for logical link */
5016 if (!hchan) {
5017 /* Logical link not available */
5018 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5019 break;
5020 }
5021
5022 /* If the logical link is not yet connected, do not
5023 * send confirmation.
5024 */
5025 if (hchan->state != BT_CONNECTED)
5026 break;
5027
5028 /* Logical link is already ready to go */
5029
5030 chan->hs_hcon = hchan->conn;
5031 chan->hs_hcon->l2cap_data = chan->conn;
5032
5033 if (result == L2CAP_MR_SUCCESS) {
5034 /* Can confirm now */
5035 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5036 } else {
5037 /* Now only need move success
5038 * to confirm
5039 */
5040 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5041 }
5042
5043 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5044 break;
5045 default:
5046 /* Any other amp move state means the move failed. */
5047 chan->move_id = chan->local_amp_id;
5048 l2cap_move_done(chan);
5049 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5050 }
5051
5052 l2cap_chan_unlock(chan);
5053}
5054
5055static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5056 u16 result)
5057{
5058 struct l2cap_chan *chan;
5059
5060 chan = l2cap_get_chan_by_ident(conn, ident);
5061 if (!chan) {
5062 /* Could not locate channel, icid is best guess */
5063 l2cap_send_move_chan_cfm_icid(conn, icid);
5064 return;
5065 }
5066
5067 __clear_chan_timer(chan);
5068
5069 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5070 if (result == L2CAP_MR_COLLISION) {
5071 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5072 } else {
5073 /* Cleanup - cancel move */
5074 chan->move_id = chan->local_amp_id;
5075 l2cap_move_done(chan);
5076 }
5077 }
5078
5079 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5080
5081 l2cap_chan_unlock(chan);
5082}
5083
5084static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5085 struct l2cap_cmd_hdr *cmd,
5086 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005087{
5088 struct l2cap_move_chan_rsp *rsp = data;
5089 u16 icid, result;
5090
5091 if (cmd_len != sizeof(*rsp))
5092 return -EPROTO;
5093
5094 icid = le16_to_cpu(rsp->icid);
5095 result = le16_to_cpu(rsp->result);
5096
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005097 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005098
Mat Martineau5b155ef2012-10-23 15:24:14 -07005099 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5100 l2cap_move_continue(conn, icid, result);
5101 else
5102 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005103
5104 return 0;
5105}
5106
Mat Martineau5f3847a2012-10-23 15:24:12 -07005107static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5108 struct l2cap_cmd_hdr *cmd,
5109 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005110{
5111 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005112 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005113 u16 icid, result;
5114
5115 if (cmd_len != sizeof(*cfm))
5116 return -EPROTO;
5117
5118 icid = le16_to_cpu(cfm->icid);
5119 result = le16_to_cpu(cfm->result);
5120
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005121 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005122
Mat Martineau5f3847a2012-10-23 15:24:12 -07005123 chan = l2cap_get_chan_by_dcid(conn, icid);
5124 if (!chan) {
5125 /* Spec requires a response even if the icid was not found */
5126 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5127 return 0;
5128 }
5129
5130 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5131 if (result == L2CAP_MC_CONFIRMED) {
5132 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005133 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005134 __release_logical_link(chan);
5135 } else {
5136 chan->move_id = chan->local_amp_id;
5137 }
5138
5139 l2cap_move_done(chan);
5140 }
5141
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005142 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5143
Mat Martineau5f3847a2012-10-23 15:24:12 -07005144 l2cap_chan_unlock(chan);
5145
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005146 return 0;
5147}
5148
5149static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005150 struct l2cap_cmd_hdr *cmd,
5151 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005152{
5153 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005154 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005155 u16 icid;
5156
5157 if (cmd_len != sizeof(*rsp))
5158 return -EPROTO;
5159
5160 icid = le16_to_cpu(rsp->icid);
5161
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005162 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005163
Mat Martineau3fd71a02012-10-23 15:24:16 -07005164 chan = l2cap_get_chan_by_scid(conn, icid);
5165 if (!chan)
5166 return 0;
5167
5168 __clear_chan_timer(chan);
5169
5170 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5171 chan->local_amp_id = chan->move_id;
5172
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005173 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005174 __release_logical_link(chan);
5175
5176 l2cap_move_done(chan);
5177 }
5178
5179 l2cap_chan_unlock(chan);
5180
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005181 return 0;
5182}
5183
Claudio Takahaside731152011-02-11 19:28:55 -02005184static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005185 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005186 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005187{
5188 struct hci_conn *hcon = conn->hcon;
5189 struct l2cap_conn_param_update_req *req;
5190 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005191 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005192 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005193
Johan Hedberg40bef302014-07-16 11:42:27 +03005194 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005195 return -EINVAL;
5196
Claudio Takahaside731152011-02-11 19:28:55 -02005197 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5198 return -EPROTO;
5199
5200 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005201 min = __le16_to_cpu(req->min);
5202 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005203 latency = __le16_to_cpu(req->latency);
5204 to_multiplier = __le16_to_cpu(req->to_multiplier);
5205
5206 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005207 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005208
5209 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005210
Andre Guedesd4905f22014-06-25 21:52:52 -03005211 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005212 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005213 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005214 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005215 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005216
5217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005218 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005219
Andre Guedesffb5a8272014-07-01 18:10:11 -03005220 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005221 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005222
Johan Hedbergf4869e22014-07-02 17:37:32 +03005223 store_hint = hci_le_conn_update(hcon, min, max, latency,
5224 to_multiplier);
5225 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5226 store_hint, min, max, latency,
5227 to_multiplier);
5228
Andre Guedesffb5a8272014-07-01 18:10:11 -03005229 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005230
Claudio Takahaside731152011-02-11 19:28:55 -02005231 return 0;
5232}
5233
Johan Hedbergf1496de2013-05-13 14:15:56 +03005234static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5235 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5236 u8 *data)
5237{
5238 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005239 struct hci_conn *hcon = conn->hcon;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005240 u16 dcid, mtu, mps, credits, result;
5241 struct l2cap_chan *chan;
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005242 int err, sec_level;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005243
5244 if (cmd_len < sizeof(*rsp))
5245 return -EPROTO;
5246
5247 dcid = __le16_to_cpu(rsp->dcid);
5248 mtu = __le16_to_cpu(rsp->mtu);
5249 mps = __le16_to_cpu(rsp->mps);
5250 credits = __le16_to_cpu(rsp->credits);
5251 result = __le16_to_cpu(rsp->result);
5252
5253 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5254 return -EPROTO;
5255
5256 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5257 dcid, mtu, mps, credits, result);
5258
5259 mutex_lock(&conn->chan_lock);
5260
5261 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5262 if (!chan) {
5263 err = -EBADSLT;
5264 goto unlock;
5265 }
5266
5267 err = 0;
5268
5269 l2cap_chan_lock(chan);
5270
5271 switch (result) {
5272 case L2CAP_CR_SUCCESS:
5273 chan->ident = 0;
5274 chan->dcid = dcid;
5275 chan->omtu = mtu;
5276 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005277 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005278 l2cap_chan_ready(chan);
5279 break;
5280
Johan Hedberg3e64b7b2014-11-13 10:55:19 +02005281 case L2CAP_CR_AUTHENTICATION:
5282 case L2CAP_CR_ENCRYPTION:
5283 /* If we already have MITM protection we can't do
5284 * anything.
5285 */
5286 if (hcon->sec_level > BT_SECURITY_MEDIUM) {
5287 l2cap_chan_del(chan, ECONNREFUSED);
5288 break;
5289 }
5290
5291 sec_level = hcon->sec_level + 1;
5292 if (chan->sec_level < sec_level)
5293 chan->sec_level = sec_level;
5294
5295 /* We'll need to send a new Connect Request */
5296 clear_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags);
5297
5298 smp_conn_security(hcon, chan->sec_level);
5299 break;
5300
Johan Hedbergf1496de2013-05-13 14:15:56 +03005301 default:
5302 l2cap_chan_del(chan, ECONNREFUSED);
5303 break;
5304 }
5305
5306 l2cap_chan_unlock(chan);
5307
5308unlock:
5309 mutex_unlock(&conn->chan_lock);
5310
5311 return err;
5312}
5313
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005314static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005315 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5316 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005317{
5318 int err = 0;
5319
5320 switch (cmd->code) {
5321 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005322 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005323 break;
5324
5325 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005326 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005327 break;
5328
5329 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005330 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005331 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005332 break;
5333
5334 case L2CAP_CONF_REQ:
5335 err = l2cap_config_req(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005339 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005340 break;
5341
5342 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005343 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005344 break;
5345
5346 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005347 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005348 break;
5349
5350 case L2CAP_ECHO_REQ:
5351 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5352 break;
5353
5354 case L2CAP_ECHO_RSP:
5355 break;
5356
5357 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005358 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005359 break;
5360
5361 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005362 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005363 break;
5364
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005365 case L2CAP_CREATE_CHAN_REQ:
5366 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5367 break;
5368
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005369 case L2CAP_MOVE_CHAN_REQ:
5370 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5371 break;
5372
5373 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005374 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005375 break;
5376
5377 case L2CAP_MOVE_CHAN_CFM:
5378 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5379 break;
5380
5381 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005382 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005383 break;
5384
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005385 default:
5386 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5387 err = -EINVAL;
5388 break;
5389 }
5390
5391 return err;
5392}
5393
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005394static int l2cap_le_connect_req(struct l2cap_conn *conn,
5395 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5396 u8 *data)
5397{
5398 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5399 struct l2cap_le_conn_rsp rsp;
5400 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005401 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005402 __le16 psm;
5403 u8 result;
5404
5405 if (cmd_len != sizeof(*req))
5406 return -EPROTO;
5407
5408 scid = __le16_to_cpu(req->scid);
5409 mtu = __le16_to_cpu(req->mtu);
5410 mps = __le16_to_cpu(req->mps);
5411 psm = req->psm;
5412 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005413 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005414
5415 if (mtu < 23 || mps < 23)
5416 return -EPROTO;
5417
5418 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5419 scid, mtu, mps);
5420
5421 /* Check if we have socket listening on psm */
5422 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5423 &conn->hcon->dst, LE_LINK);
5424 if (!pchan) {
5425 result = L2CAP_CR_BAD_PSM;
5426 chan = NULL;
5427 goto response;
5428 }
5429
5430 mutex_lock(&conn->chan_lock);
5431 l2cap_chan_lock(pchan);
5432
Johan Hedberg35dc6f82014-11-13 10:55:18 +02005433 if (!smp_sufficient_security(conn->hcon, pchan->sec_level,
5434 SMP_ALLOW_STK)) {
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005435 result = L2CAP_CR_AUTHENTICATION;
5436 chan = NULL;
5437 goto response_unlock;
5438 }
5439
Johan Hedberg8a7889c2015-11-02 14:39:15 +02005440 /* Check for valid dynamic CID range */
5441 if (scid < L2CAP_CID_DYN_START || scid > L2CAP_CID_LE_DYN_END) {
5442 result = L2CAP_CR_INVALID_SCID;
5443 chan = NULL;
5444 goto response_unlock;
5445 }
5446
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005447 /* Check if we already have channel with that dcid */
5448 if (__l2cap_get_chan_by_dcid(conn, scid)) {
Johan Hedberg8a7889c2015-11-02 14:39:15 +02005449 result = L2CAP_CR_SCID_IN_USE;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005450 chan = NULL;
5451 goto response_unlock;
5452 }
5453
5454 chan = pchan->ops->new_connection(pchan);
5455 if (!chan) {
5456 result = L2CAP_CR_NO_MEM;
5457 goto response_unlock;
5458 }
5459
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005460 l2cap_le_flowctl_init(chan);
5461
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005462 bacpy(&chan->src, &conn->hcon->src);
5463 bacpy(&chan->dst, &conn->hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02005464 chan->src_type = bdaddr_src_type(conn->hcon);
5465 chan->dst_type = bdaddr_dst_type(conn->hcon);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005466 chan->psm = psm;
5467 chan->dcid = scid;
5468 chan->omtu = mtu;
5469 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005470 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005471
5472 __l2cap_chan_add(conn, chan);
5473 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005474 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005475
5476 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5477
5478 chan->ident = cmd->ident;
5479
5480 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5481 l2cap_state_change(chan, BT_CONNECT2);
Johan Hedberg434714d2014-09-01 09:45:03 +03005482 /* The following result value is actually not defined
5483 * for LE CoC but we use it to let the function know
5484 * that it should bail out after doing its cleanup
5485 * instead of sending a response.
5486 */
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005487 result = L2CAP_CR_PEND;
5488 chan->ops->defer(chan);
5489 } else {
5490 l2cap_chan_ready(chan);
5491 result = L2CAP_CR_SUCCESS;
5492 }
5493
5494response_unlock:
5495 l2cap_chan_unlock(pchan);
5496 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005497 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005498
5499 if (result == L2CAP_CR_PEND)
5500 return 0;
5501
5502response:
5503 if (chan) {
5504 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005505 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005506 } else {
5507 rsp.mtu = 0;
5508 rsp.mps = 0;
5509 }
5510
5511 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005512 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005513 rsp.result = cpu_to_le16(result);
5514
5515 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5516
5517 return 0;
5518}
5519
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005520static inline int l2cap_le_credits(struct l2cap_conn *conn,
5521 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5522 u8 *data)
5523{
5524 struct l2cap_le_credits *pkt;
5525 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005526 u16 cid, credits, max_credits;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005527
5528 if (cmd_len != sizeof(*pkt))
5529 return -EPROTO;
5530
5531 pkt = (struct l2cap_le_credits *) data;
5532 cid = __le16_to_cpu(pkt->cid);
5533 credits = __le16_to_cpu(pkt->credits);
5534
5535 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5536
5537 chan = l2cap_get_chan_by_dcid(conn, cid);
5538 if (!chan)
5539 return -EBADSLT;
5540
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005541 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5542 if (credits > max_credits) {
5543 BT_ERR("LE credits overflow");
5544 l2cap_send_disconn_req(chan, ECONNRESET);
Martin Townsendee930532014-10-13 19:24:45 +01005545 l2cap_chan_unlock(chan);
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005546
5547 /* Return 0 so that we don't trigger an unnecessary
5548 * command reject packet.
5549 */
5550 return 0;
5551 }
5552
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005553 chan->tx_credits += credits;
5554
5555 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5556 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5557 chan->tx_credits--;
5558 }
5559
5560 if (chan->tx_credits)
5561 chan->ops->resume(chan);
5562
5563 l2cap_chan_unlock(chan);
5564
5565 return 0;
5566}
5567
Johan Hedberg71fb4192013-12-10 10:52:48 +02005568static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5569 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5570 u8 *data)
5571{
5572 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5573 struct l2cap_chan *chan;
5574
5575 if (cmd_len < sizeof(*rej))
5576 return -EPROTO;
5577
5578 mutex_lock(&conn->chan_lock);
5579
5580 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5581 if (!chan)
5582 goto done;
5583
5584 l2cap_chan_lock(chan);
5585 l2cap_chan_del(chan, ECONNREFUSED);
5586 l2cap_chan_unlock(chan);
5587
5588done:
5589 mutex_unlock(&conn->chan_lock);
5590 return 0;
5591}
5592
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005593static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005594 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5595 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005596{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005597 int err = 0;
5598
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005599 switch (cmd->code) {
5600 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005601 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005602 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005603
5604 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005605 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5606 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005607
5608 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005609 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005610
Johan Hedbergf1496de2013-05-13 14:15:56 +03005611 case L2CAP_LE_CONN_RSP:
5612 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005613 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005614
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005615 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005616 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5617 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005618
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005619 case L2CAP_LE_CREDITS:
5620 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5621 break;
5622
Johan Hedberg3defe012013-05-15 10:16:06 +03005623 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005624 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5625 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005626
5627 case L2CAP_DISCONN_RSP:
5628 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005629 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005630
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005631 default:
5632 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005633 err = -EINVAL;
5634 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005635 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005636
5637 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005638}
5639
Johan Hedbergc5623552013-04-29 19:35:33 +03005640static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5641 struct sk_buff *skb)
5642{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005643 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005644 struct l2cap_cmd_hdr *cmd;
5645 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005646 int err;
5647
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005648 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005649 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005650
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005651 if (skb->len < L2CAP_CMD_HDR_SIZE)
5652 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005653
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005654 cmd = (void *) skb->data;
5655 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005656
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005657 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005658
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005659 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005660
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005661 if (len != skb->len || !cmd->ident) {
5662 BT_DBG("corrupted command");
5663 goto drop;
5664 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005665
Johan Hedberg203e6392013-05-15 10:07:15 +03005666 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005667 if (err) {
5668 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005669
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005670 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005671
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005672 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005673 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5674 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005675 }
5676
Marcel Holtmann3b166292013-10-02 08:28:21 -07005677drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005678 kfree_skb(skb);
5679}
5680
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005681static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005682 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005683{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005684 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005685 u8 *data = skb->data;
5686 int len = skb->len;
5687 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005688 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005689
5690 l2cap_raw_recv(conn, skb);
5691
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005692 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005693 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005694
Linus Torvalds1da177e2005-04-16 15:20:36 -07005695 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005696 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005697 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5698 data += L2CAP_CMD_HDR_SIZE;
5699 len -= L2CAP_CMD_HDR_SIZE;
5700
Al Viro88219a02007-07-29 00:17:25 -07005701 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005702
Gustavo Padovan2d792812012-10-06 10:07:01 +01005703 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5704 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005705
Al Viro88219a02007-07-29 00:17:25 -07005706 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005707 BT_DBG("corrupted command");
5708 break;
5709 }
5710
Johan Hedbergc5623552013-04-29 19:35:33 +03005711 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005712 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005713 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005714
5715 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005716
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005717 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005718 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5719 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005720 }
5721
Al Viro88219a02007-07-29 00:17:25 -07005722 data += cmd_len;
5723 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005724 }
5725
Marcel Holtmann3b166292013-10-02 08:28:21 -07005726drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727 kfree_skb(skb);
5728}
5729
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005730static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005731{
5732 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005733 int hdr_size;
5734
5735 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5736 hdr_size = L2CAP_EXT_HDR_SIZE;
5737 else
5738 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005739
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005740 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005741 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005742 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5743 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5744
5745 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005746 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005747 }
5748 return 0;
5749}
5750
Mat Martineau6ea00482012-05-17 20:53:52 -07005751static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005752{
Mat Martineaue31f7632012-05-17 20:53:41 -07005753 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005754
Mat Martineaue31f7632012-05-17 20:53:41 -07005755 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005756
Mat Martineaue31f7632012-05-17 20:53:41 -07005757 memset(&control, 0, sizeof(control));
5758 control.sframe = 1;
5759 control.final = 1;
5760 control.reqseq = chan->buffer_seq;
5761 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005762
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005763 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005764 control.super = L2CAP_SUPER_RNR;
5765 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005766 }
5767
Mat Martineaue31f7632012-05-17 20:53:41 -07005768 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5769 chan->unacked_frames > 0)
5770 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005771
Mat Martineaue31f7632012-05-17 20:53:41 -07005772 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005773 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005774
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005775 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005776 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5777 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5778 * send it now.
5779 */
5780 control.super = L2CAP_SUPER_RR;
5781 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005782 }
5783}
5784
Gustavo Padovan2d792812012-10-06 10:07:01 +01005785static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5786 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005787{
Mat Martineau84084a32011-07-22 14:54:00 -07005788 /* skb->len reflects data in skb as well as all fragments
5789 * skb->data_len reflects only data in fragments
5790 */
5791 if (!skb_has_frag_list(skb))
5792 skb_shinfo(skb)->frag_list = new_frag;
5793
5794 new_frag->next = NULL;
5795
5796 (*last_frag)->next = new_frag;
5797 *last_frag = new_frag;
5798
5799 skb->len += new_frag->len;
5800 skb->data_len += new_frag->len;
5801 skb->truesize += new_frag->truesize;
5802}
5803
Mat Martineau4b51dae92012-05-17 20:53:37 -07005804static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5805 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005806{
5807 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005808
Mat Martineau4b51dae92012-05-17 20:53:37 -07005809 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005810 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005811 if (chan->sdu)
5812 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005813
Gustavo Padovan80b98022012-05-27 22:27:51 -03005814 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005815 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005816
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005817 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005818 if (chan->sdu)
5819 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005820
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005821 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005822 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005823
Mat Martineau84084a32011-07-22 14:54:00 -07005824 if (chan->sdu_len > chan->imtu) {
5825 err = -EMSGSIZE;
5826 break;
5827 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005828
Mat Martineau84084a32011-07-22 14:54:00 -07005829 if (skb->len >= chan->sdu_len)
5830 break;
5831
5832 chan->sdu = skb;
5833 chan->sdu_last_frag = skb;
5834
5835 skb = NULL;
5836 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005837 break;
5838
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005839 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005840 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005841 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005842
Mat Martineau84084a32011-07-22 14:54:00 -07005843 append_skb_frag(chan->sdu, skb,
5844 &chan->sdu_last_frag);
5845 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005846
Mat Martineau84084a32011-07-22 14:54:00 -07005847 if (chan->sdu->len >= chan->sdu_len)
5848 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005849
Mat Martineau84084a32011-07-22 14:54:00 -07005850 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005851 break;
5852
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005853 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005854 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005855 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005856
Mat Martineau84084a32011-07-22 14:54:00 -07005857 append_skb_frag(chan->sdu, skb,
5858 &chan->sdu_last_frag);
5859 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005860
Mat Martineau84084a32011-07-22 14:54:00 -07005861 if (chan->sdu->len != chan->sdu_len)
5862 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005863
Gustavo Padovan80b98022012-05-27 22:27:51 -03005864 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005865
Mat Martineau84084a32011-07-22 14:54:00 -07005866 if (!err) {
5867 /* Reassembly complete */
5868 chan->sdu = NULL;
5869 chan->sdu_last_frag = NULL;
5870 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005871 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005872 break;
5873 }
5874
Mat Martineau84084a32011-07-22 14:54:00 -07005875 if (err) {
5876 kfree_skb(skb);
5877 kfree_skb(chan->sdu);
5878 chan->sdu = NULL;
5879 chan->sdu_last_frag = NULL;
5880 chan->sdu_len = 0;
5881 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005882
Mat Martineau84084a32011-07-22 14:54:00 -07005883 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005884}
5885
Mat Martineau32b32732012-10-23 15:24:11 -07005886static int l2cap_resegment(struct l2cap_chan *chan)
5887{
5888 /* Placeholder */
5889 return 0;
5890}
5891
Mat Martineaue3281402011-07-07 09:39:02 -07005892void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132eb2010-06-21 19:39:50 -03005893{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005894 u8 event;
5895
5896 if (chan->mode != L2CAP_MODE_ERTM)
5897 return;
5898
5899 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005900 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005901}
5902
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005903static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5904{
Mat Martineau63838722012-05-17 20:53:45 -07005905 int err = 0;
5906 /* Pass sequential frames to l2cap_reassemble_sdu()
5907 * until a gap is encountered.
5908 */
5909
5910 BT_DBG("chan %p", chan);
5911
5912 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5913 struct sk_buff *skb;
5914 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5915 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5916
5917 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5918
5919 if (!skb)
5920 break;
5921
5922 skb_unlink(skb, &chan->srej_q);
5923 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
Johan Hedberga4368ff2015-03-30 23:21:01 +03005924 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->l2cap);
Mat Martineau63838722012-05-17 20:53:45 -07005925 if (err)
5926 break;
5927 }
5928
5929 if (skb_queue_empty(&chan->srej_q)) {
5930 chan->rx_state = L2CAP_RX_STATE_RECV;
5931 l2cap_send_ack(chan);
5932 }
5933
5934 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005935}
5936
5937static void l2cap_handle_srej(struct l2cap_chan *chan,
5938 struct l2cap_ctrl *control)
5939{
Mat Martineauf80842a2012-05-17 20:53:46 -07005940 struct sk_buff *skb;
5941
5942 BT_DBG("chan %p, control %p", chan, control);
5943
5944 if (control->reqseq == chan->next_tx_seq) {
5945 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005946 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005947 return;
5948 }
5949
5950 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5951
5952 if (skb == NULL) {
5953 BT_DBG("Seq %d not available for retransmission",
5954 control->reqseq);
5955 return;
5956 }
5957
Johan Hedberga4368ff2015-03-30 23:21:01 +03005958 if (chan->max_tx != 0 && bt_cb(skb)->l2cap.retries >= chan->max_tx) {
Mat Martineauf80842a2012-05-17 20:53:46 -07005959 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005960 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005961 return;
5962 }
5963
5964 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5965
5966 if (control->poll) {
5967 l2cap_pass_to_tx(chan, control);
5968
5969 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5970 l2cap_retransmit(chan, control);
5971 l2cap_ertm_send(chan);
5972
5973 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5974 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5975 chan->srej_save_reqseq = control->reqseq;
5976 }
5977 } else {
5978 l2cap_pass_to_tx_fbit(chan, control);
5979
5980 if (control->final) {
5981 if (chan->srej_save_reqseq != control->reqseq ||
5982 !test_and_clear_bit(CONN_SREJ_ACT,
5983 &chan->conn_state))
5984 l2cap_retransmit(chan, control);
5985 } else {
5986 l2cap_retransmit(chan, control);
5987 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5988 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5989 chan->srej_save_reqseq = control->reqseq;
5990 }
5991 }
5992 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005993}
5994
5995static void l2cap_handle_rej(struct l2cap_chan *chan,
5996 struct l2cap_ctrl *control)
5997{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005998 struct sk_buff *skb;
5999
6000 BT_DBG("chan %p, control %p", chan, control);
6001
6002 if (control->reqseq == chan->next_tx_seq) {
6003 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006004 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07006005 return;
6006 }
6007
6008 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
6009
6010 if (chan->max_tx && skb &&
Johan Hedberga4368ff2015-03-30 23:21:01 +03006011 bt_cb(skb)->l2cap.retries >= chan->max_tx) {
Mat Martineaufcd289d2012-05-17 20:53:47 -07006012 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006013 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07006014 return;
6015 }
6016
6017 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6018
6019 l2cap_pass_to_tx(chan, control);
6020
6021 if (control->final) {
6022 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
6023 l2cap_retransmit_all(chan, control);
6024 } else {
6025 l2cap_retransmit_all(chan, control);
6026 l2cap_ertm_send(chan);
6027 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
6028 set_bit(CONN_REJ_ACT, &chan->conn_state);
6029 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006030}
6031
Mat Martineau4b51dae92012-05-17 20:53:37 -07006032static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
6033{
6034 BT_DBG("chan %p, txseq %d", chan, txseq);
6035
6036 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
6037 chan->expected_tx_seq);
6038
6039 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
6040 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01006041 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006042 /* See notes below regarding "double poll" and
6043 * invalid packets.
6044 */
6045 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6046 BT_DBG("Invalid/Ignore - after SREJ");
6047 return L2CAP_TXSEQ_INVALID_IGNORE;
6048 } else {
6049 BT_DBG("Invalid - in window after SREJ sent");
6050 return L2CAP_TXSEQ_INVALID;
6051 }
6052 }
6053
6054 if (chan->srej_list.head == txseq) {
6055 BT_DBG("Expected SREJ");
6056 return L2CAP_TXSEQ_EXPECTED_SREJ;
6057 }
6058
6059 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6060 BT_DBG("Duplicate SREJ - txseq already stored");
6061 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6062 }
6063
6064 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6065 BT_DBG("Unexpected SREJ - not requested");
6066 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6067 }
6068 }
6069
6070 if (chan->expected_tx_seq == txseq) {
6071 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6072 chan->tx_win) {
6073 BT_DBG("Invalid - txseq outside tx window");
6074 return L2CAP_TXSEQ_INVALID;
6075 } else {
6076 BT_DBG("Expected");
6077 return L2CAP_TXSEQ_EXPECTED;
6078 }
6079 }
6080
6081 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006082 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006083 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6084 return L2CAP_TXSEQ_DUPLICATE;
6085 }
6086
6087 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6088 /* A source of invalid packets is a "double poll" condition,
6089 * where delays cause us to send multiple poll packets. If
6090 * the remote stack receives and processes both polls,
6091 * sequence numbers can wrap around in such a way that a
6092 * resent frame has a sequence number that looks like new data
6093 * with a sequence gap. This would trigger an erroneous SREJ
6094 * request.
6095 *
6096 * Fortunately, this is impossible with a tx window that's
6097 * less than half of the maximum sequence number, which allows
6098 * invalid frames to be safely ignored.
6099 *
6100 * With tx window sizes greater than half of the tx window
6101 * maximum, the frame is invalid and cannot be ignored. This
6102 * causes a disconnect.
6103 */
6104
6105 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6106 BT_DBG("Invalid/Ignore - txseq outside tx window");
6107 return L2CAP_TXSEQ_INVALID_IGNORE;
6108 } else {
6109 BT_DBG("Invalid - txseq outside tx window");
6110 return L2CAP_TXSEQ_INVALID;
6111 }
6112 } else {
6113 BT_DBG("Unexpected - txseq indicates missing frames");
6114 return L2CAP_TXSEQ_UNEXPECTED;
6115 }
6116}
6117
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006118static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6119 struct l2cap_ctrl *control,
6120 struct sk_buff *skb, u8 event)
6121{
6122 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006123 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006124
6125 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6126 event);
6127
6128 switch (event) {
6129 case L2CAP_EV_RECV_IFRAME:
6130 switch (l2cap_classify_txseq(chan, control->txseq)) {
6131 case L2CAP_TXSEQ_EXPECTED:
6132 l2cap_pass_to_tx(chan, control);
6133
6134 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6135 BT_DBG("Busy, discarding expected seq %d",
6136 control->txseq);
6137 break;
6138 }
6139
6140 chan->expected_tx_seq = __next_seq(chan,
6141 control->txseq);
6142
6143 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006144 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006145
6146 err = l2cap_reassemble_sdu(chan, skb, control);
6147 if (err)
6148 break;
6149
6150 if (control->final) {
6151 if (!test_and_clear_bit(CONN_REJ_ACT,
6152 &chan->conn_state)) {
6153 control->final = 0;
6154 l2cap_retransmit_all(chan, control);
6155 l2cap_ertm_send(chan);
6156 }
6157 }
6158
6159 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6160 l2cap_send_ack(chan);
6161 break;
6162 case L2CAP_TXSEQ_UNEXPECTED:
6163 l2cap_pass_to_tx(chan, control);
6164
6165 /* Can't issue SREJ frames in the local busy state.
6166 * Drop this frame, it will be seen as missing
6167 * when local busy is exited.
6168 */
6169 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6170 BT_DBG("Busy, discarding unexpected seq %d",
6171 control->txseq);
6172 break;
6173 }
6174
6175 /* There was a gap in the sequence, so an SREJ
6176 * must be sent for each missing frame. The
6177 * current frame is stored for later use.
6178 */
6179 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006180 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006181 BT_DBG("Queued %p (queue len %d)", skb,
6182 skb_queue_len(&chan->srej_q));
6183
6184 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6185 l2cap_seq_list_clear(&chan->srej_list);
6186 l2cap_send_srej(chan, control->txseq);
6187
6188 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6189 break;
6190 case L2CAP_TXSEQ_DUPLICATE:
6191 l2cap_pass_to_tx(chan, control);
6192 break;
6193 case L2CAP_TXSEQ_INVALID_IGNORE:
6194 break;
6195 case L2CAP_TXSEQ_INVALID:
6196 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006197 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006198 break;
6199 }
6200 break;
6201 case L2CAP_EV_RECV_RR:
6202 l2cap_pass_to_tx(chan, control);
6203 if (control->final) {
6204 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6205
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006206 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6207 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006208 control->final = 0;
6209 l2cap_retransmit_all(chan, control);
6210 }
6211
6212 l2cap_ertm_send(chan);
6213 } else if (control->poll) {
6214 l2cap_send_i_or_rr_or_rnr(chan);
6215 } else {
6216 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6217 &chan->conn_state) &&
6218 chan->unacked_frames)
6219 __set_retrans_timer(chan);
6220
6221 l2cap_ertm_send(chan);
6222 }
6223 break;
6224 case L2CAP_EV_RECV_RNR:
6225 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6226 l2cap_pass_to_tx(chan, control);
6227 if (control && control->poll) {
6228 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6229 l2cap_send_rr_or_rnr(chan, 0);
6230 }
6231 __clear_retrans_timer(chan);
6232 l2cap_seq_list_clear(&chan->retrans_list);
6233 break;
6234 case L2CAP_EV_RECV_REJ:
6235 l2cap_handle_rej(chan, control);
6236 break;
6237 case L2CAP_EV_RECV_SREJ:
6238 l2cap_handle_srej(chan, control);
6239 break;
6240 default:
6241 break;
6242 }
6243
6244 if (skb && !skb_in_use) {
6245 BT_DBG("Freeing %p", skb);
6246 kfree_skb(skb);
6247 }
6248
6249 return err;
6250}
6251
6252static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6253 struct l2cap_ctrl *control,
6254 struct sk_buff *skb, u8 event)
6255{
6256 int err = 0;
6257 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006258 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006259
6260 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6261 event);
6262
6263 switch (event) {
6264 case L2CAP_EV_RECV_IFRAME:
6265 switch (l2cap_classify_txseq(chan, txseq)) {
6266 case L2CAP_TXSEQ_EXPECTED:
6267 /* Keep frame for reassembly later */
6268 l2cap_pass_to_tx(chan, control);
6269 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006270 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006271 BT_DBG("Queued %p (queue len %d)", skb,
6272 skb_queue_len(&chan->srej_q));
6273
6274 chan->expected_tx_seq = __next_seq(chan, txseq);
6275 break;
6276 case L2CAP_TXSEQ_EXPECTED_SREJ:
6277 l2cap_seq_list_pop(&chan->srej_list);
6278
6279 l2cap_pass_to_tx(chan, control);
6280 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006281 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006282 BT_DBG("Queued %p (queue len %d)", skb,
6283 skb_queue_len(&chan->srej_q));
6284
6285 err = l2cap_rx_queued_iframes(chan);
6286 if (err)
6287 break;
6288
6289 break;
6290 case L2CAP_TXSEQ_UNEXPECTED:
6291 /* Got a frame that can't be reassembled yet.
6292 * Save it for later, and send SREJs to cover
6293 * the missing frames.
6294 */
6295 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006296 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006297 BT_DBG("Queued %p (queue len %d)", skb,
6298 skb_queue_len(&chan->srej_q));
6299
6300 l2cap_pass_to_tx(chan, control);
6301 l2cap_send_srej(chan, control->txseq);
6302 break;
6303 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6304 /* This frame was requested with an SREJ, but
6305 * some expected retransmitted frames are
6306 * missing. Request retransmission of missing
6307 * SREJ'd frames.
6308 */
6309 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006310 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006311 BT_DBG("Queued %p (queue len %d)", skb,
6312 skb_queue_len(&chan->srej_q));
6313
6314 l2cap_pass_to_tx(chan, control);
6315 l2cap_send_srej_list(chan, control->txseq);
6316 break;
6317 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6318 /* We've already queued this frame. Drop this copy. */
6319 l2cap_pass_to_tx(chan, control);
6320 break;
6321 case L2CAP_TXSEQ_DUPLICATE:
6322 /* Expecting a later sequence number, so this frame
6323 * was already received. Ignore it completely.
6324 */
6325 break;
6326 case L2CAP_TXSEQ_INVALID_IGNORE:
6327 break;
6328 case L2CAP_TXSEQ_INVALID:
6329 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006330 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006331 break;
6332 }
6333 break;
6334 case L2CAP_EV_RECV_RR:
6335 l2cap_pass_to_tx(chan, control);
6336 if (control->final) {
6337 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6338
6339 if (!test_and_clear_bit(CONN_REJ_ACT,
6340 &chan->conn_state)) {
6341 control->final = 0;
6342 l2cap_retransmit_all(chan, control);
6343 }
6344
6345 l2cap_ertm_send(chan);
6346 } else if (control->poll) {
6347 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6348 &chan->conn_state) &&
6349 chan->unacked_frames) {
6350 __set_retrans_timer(chan);
6351 }
6352
6353 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6354 l2cap_send_srej_tail(chan);
6355 } else {
6356 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6357 &chan->conn_state) &&
6358 chan->unacked_frames)
6359 __set_retrans_timer(chan);
6360
6361 l2cap_send_ack(chan);
6362 }
6363 break;
6364 case L2CAP_EV_RECV_RNR:
6365 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6366 l2cap_pass_to_tx(chan, control);
6367 if (control->poll) {
6368 l2cap_send_srej_tail(chan);
6369 } else {
6370 struct l2cap_ctrl rr_control;
6371 memset(&rr_control, 0, sizeof(rr_control));
6372 rr_control.sframe = 1;
6373 rr_control.super = L2CAP_SUPER_RR;
6374 rr_control.reqseq = chan->buffer_seq;
6375 l2cap_send_sframe(chan, &rr_control);
6376 }
6377
6378 break;
6379 case L2CAP_EV_RECV_REJ:
6380 l2cap_handle_rej(chan, control);
6381 break;
6382 case L2CAP_EV_RECV_SREJ:
6383 l2cap_handle_srej(chan, control);
6384 break;
6385 }
6386
6387 if (skb && !skb_in_use) {
6388 BT_DBG("Freeing %p", skb);
6389 kfree_skb(skb);
6390 }
6391
6392 return err;
6393}
6394
Mat Martineau32b32732012-10-23 15:24:11 -07006395static int l2cap_finish_move(struct l2cap_chan *chan)
6396{
6397 BT_DBG("chan %p", chan);
6398
6399 chan->rx_state = L2CAP_RX_STATE_RECV;
6400
6401 if (chan->hs_hcon)
6402 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6403 else
6404 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6405
6406 return l2cap_resegment(chan);
6407}
6408
6409static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6410 struct l2cap_ctrl *control,
6411 struct sk_buff *skb, u8 event)
6412{
6413 int err;
6414
6415 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6416 event);
6417
6418 if (!control->poll)
6419 return -EPROTO;
6420
6421 l2cap_process_reqseq(chan, control->reqseq);
6422
6423 if (!skb_queue_empty(&chan->tx_q))
6424 chan->tx_send_head = skb_peek(&chan->tx_q);
6425 else
6426 chan->tx_send_head = NULL;
6427
6428 /* Rewind next_tx_seq to the point expected
6429 * by the receiver.
6430 */
6431 chan->next_tx_seq = control->reqseq;
6432 chan->unacked_frames = 0;
6433
6434 err = l2cap_finish_move(chan);
6435 if (err)
6436 return err;
6437
6438 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6439 l2cap_send_i_or_rr_or_rnr(chan);
6440
6441 if (event == L2CAP_EV_RECV_IFRAME)
6442 return -EPROTO;
6443
6444 return l2cap_rx_state_recv(chan, control, NULL, event);
6445}
6446
6447static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6448 struct l2cap_ctrl *control,
6449 struct sk_buff *skb, u8 event)
6450{
6451 int err;
6452
6453 if (!control->final)
6454 return -EPROTO;
6455
6456 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6457
6458 chan->rx_state = L2CAP_RX_STATE_RECV;
6459 l2cap_process_reqseq(chan, control->reqseq);
6460
6461 if (!skb_queue_empty(&chan->tx_q))
6462 chan->tx_send_head = skb_peek(&chan->tx_q);
6463 else
6464 chan->tx_send_head = NULL;
6465
6466 /* Rewind next_tx_seq to the point expected
6467 * by the receiver.
6468 */
6469 chan->next_tx_seq = control->reqseq;
6470 chan->unacked_frames = 0;
6471
6472 if (chan->hs_hcon)
6473 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6474 else
6475 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6476
6477 err = l2cap_resegment(chan);
6478
6479 if (!err)
6480 err = l2cap_rx_state_recv(chan, control, skb, event);
6481
6482 return err;
6483}
6484
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006485static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6486{
6487 /* Make sure reqseq is for a packet that has been sent but not acked */
6488 u16 unacked;
6489
6490 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6491 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6492}
6493
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006494static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6495 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006496{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006497 int err = 0;
6498
6499 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6500 control, skb, event, chan->rx_state);
6501
6502 if (__valid_reqseq(chan, control->reqseq)) {
6503 switch (chan->rx_state) {
6504 case L2CAP_RX_STATE_RECV:
6505 err = l2cap_rx_state_recv(chan, control, skb, event);
6506 break;
6507 case L2CAP_RX_STATE_SREJ_SENT:
6508 err = l2cap_rx_state_srej_sent(chan, control, skb,
6509 event);
6510 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006511 case L2CAP_RX_STATE_WAIT_P:
6512 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6513 break;
6514 case L2CAP_RX_STATE_WAIT_F:
6515 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6516 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006517 default:
6518 /* shut it down */
6519 break;
6520 }
6521 } else {
6522 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6523 control->reqseq, chan->next_tx_seq,
6524 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006525 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006526 }
6527
6528 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006529}
6530
6531static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6532 struct sk_buff *skb)
6533{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006534 int err = 0;
6535
6536 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6537 chan->rx_state);
6538
6539 if (l2cap_classify_txseq(chan, control->txseq) ==
6540 L2CAP_TXSEQ_EXPECTED) {
6541 l2cap_pass_to_tx(chan, control);
6542
6543 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6544 __next_seq(chan, chan->buffer_seq));
6545
6546 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6547
6548 l2cap_reassemble_sdu(chan, skb, control);
6549 } else {
6550 if (chan->sdu) {
6551 kfree_skb(chan->sdu);
6552 chan->sdu = NULL;
6553 }
6554 chan->sdu_last_frag = NULL;
6555 chan->sdu_len = 0;
6556
6557 if (skb) {
6558 BT_DBG("Freeing %p", skb);
6559 kfree_skb(skb);
6560 }
6561 }
6562
6563 chan->last_acked_seq = control->txseq;
6564 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6565
6566 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006567}
6568
6569static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6570{
Johan Hedberga4368ff2015-03-30 23:21:01 +03006571 struct l2cap_ctrl *control = &bt_cb(skb)->l2cap;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006572 u16 len;
6573 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006574
Mat Martineaub76bbd62012-04-11 10:48:43 -07006575 __unpack_control(chan, skb);
6576
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006577 len = skb->len;
6578
6579 /*
6580 * We can just drop the corrupted I-frame here.
6581 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006582 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006583 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006584 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006585 goto drop;
6586
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006587 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006588 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006589
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006590 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006591 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006592
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006593 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006594 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006595 goto drop;
6596 }
6597
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006598 if (!control->sframe) {
6599 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006600
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006601 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6602 control->sar, control->reqseq, control->final,
6603 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006604
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006605 /* Validate F-bit - F=0 always valid, F=1 only
6606 * valid in TX WAIT_F
6607 */
6608 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006609 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006610
6611 if (chan->mode != L2CAP_MODE_STREAMING) {
6612 event = L2CAP_EV_RECV_IFRAME;
6613 err = l2cap_rx(chan, control, skb, event);
6614 } else {
6615 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006616 }
6617
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006618 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006619 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006620 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006621 const u8 rx_func_to_event[4] = {
6622 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6623 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6624 };
6625
6626 /* Only I-frames are expected in streaming mode */
6627 if (chan->mode == L2CAP_MODE_STREAMING)
6628 goto drop;
6629
6630 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6631 control->reqseq, control->final, control->poll,
6632 control->super);
6633
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006634 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006635 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006636 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006637 goto drop;
6638 }
6639
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006640 /* Validate F and P bits */
6641 if (control->final && (control->poll ||
6642 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6643 goto drop;
6644
6645 event = rx_func_to_event[control->super];
6646 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006647 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006648 }
6649
6650 return 0;
6651
6652drop:
6653 kfree_skb(skb);
6654 return 0;
6655}
6656
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006657static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6658{
6659 struct l2cap_conn *conn = chan->conn;
6660 struct l2cap_le_credits pkt;
6661 u16 return_credits;
6662
6663 /* We return more credits to the sender only after the amount of
6664 * credits falls below half of the initial amount.
6665 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006666 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006667 return;
6668
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006669 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006670
6671 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6672
6673 chan->rx_credits += return_credits;
6674
6675 pkt.cid = cpu_to_le16(chan->scid);
6676 pkt.credits = cpu_to_le16(return_credits);
6677
6678 chan->ident = l2cap_get_ident(conn);
6679
6680 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6681}
6682
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006683static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6684{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006685 int err;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006686
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006687 if (!chan->rx_credits) {
6688 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006689 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006690 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006691 }
6692
6693 if (chan->imtu < skb->len) {
6694 BT_ERR("Too big LE L2CAP PDU");
6695 return -ENOBUFS;
6696 }
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006697
6698 chan->rx_credits--;
6699 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6700
6701 l2cap_chan_le_send_credits(chan);
6702
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006703 err = 0;
6704
6705 if (!chan->sdu) {
6706 u16 sdu_len;
6707
6708 sdu_len = get_unaligned_le16(skb->data);
6709 skb_pull(skb, L2CAP_SDULEN_SIZE);
6710
6711 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6712 sdu_len, skb->len, chan->imtu);
6713
6714 if (sdu_len > chan->imtu) {
6715 BT_ERR("Too big LE L2CAP SDU length received");
6716 err = -EMSGSIZE;
6717 goto failed;
6718 }
6719
6720 if (skb->len > sdu_len) {
6721 BT_ERR("Too much LE L2CAP data received");
6722 err = -EINVAL;
6723 goto failed;
6724 }
6725
6726 if (skb->len == sdu_len)
6727 return chan->ops->recv(chan, skb);
6728
6729 chan->sdu = skb;
6730 chan->sdu_len = sdu_len;
6731 chan->sdu_last_frag = skb;
6732
6733 return 0;
6734 }
6735
6736 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6737 chan->sdu->len, skb->len, chan->sdu_len);
6738
6739 if (chan->sdu->len + skb->len > chan->sdu_len) {
6740 BT_ERR("Too much LE L2CAP data received");
6741 err = -EINVAL;
6742 goto failed;
6743 }
6744
6745 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6746 skb = NULL;
6747
6748 if (chan->sdu->len == chan->sdu_len) {
6749 err = chan->ops->recv(chan, chan->sdu);
6750 if (!err) {
6751 chan->sdu = NULL;
6752 chan->sdu_last_frag = NULL;
6753 chan->sdu_len = 0;
6754 }
6755 }
6756
6757failed:
6758 if (err) {
6759 kfree_skb(skb);
6760 kfree_skb(chan->sdu);
6761 chan->sdu = NULL;
6762 chan->sdu_last_frag = NULL;
6763 chan->sdu_len = 0;
6764 }
6765
6766 /* We can't return an error here since we took care of the skb
6767 * freeing internally. An error return would cause the caller to
6768 * do a double-free of the skb.
6769 */
6770 return 0;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006771}
6772
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006773static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6774 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006776 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006777
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006778 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006779 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006780 if (cid == L2CAP_CID_A2MP) {
6781 chan = a2mp_channel_create(conn, skb);
6782 if (!chan) {
6783 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006784 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006785 }
6786
6787 l2cap_chan_lock(chan);
6788 } else {
6789 BT_DBG("unknown cid 0x%4.4x", cid);
6790 /* Drop packet and return */
6791 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006792 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006793 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006794 }
6795
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006796 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006797
Johan Hedberg315917e2015-02-16 11:42:11 +02006798 /* If we receive data on a fixed channel before the info req/rsp
6799 * procdure is done simply assume that the channel is supported
6800 * and mark it as ready.
6801 */
6802 if (chan->chan_type == L2CAP_CHAN_FIXED)
6803 l2cap_chan_ready(chan);
6804
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006805 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006806 goto drop;
6807
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006808 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006809 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006810 if (l2cap_le_data_rcv(chan, skb) < 0)
6811 goto drop;
6812
6813 goto done;
6814
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006815 case L2CAP_MODE_BASIC:
6816 /* If socket recv buffers overflows we drop data here
6817 * which is *bad* because L2CAP has to be reliable.
6818 * But we don't have any other choice. L2CAP doesn't
6819 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006820
Szymon Janc2c96e032014-02-18 20:48:34 +01006821 if (chan->imtu < skb->len) {
6822 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006823 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006825
Gustavo Padovan80b98022012-05-27 22:27:51 -03006826 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006827 goto done;
6828 break;
6829
6830 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006831 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006832 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006833 goto done;
6834
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006835 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006836 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006837 break;
6838 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006839
6840drop:
6841 kfree_skb(skb);
6842
6843done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006844 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006845}
6846
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006847static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6848 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006849{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006850 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006851 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006852
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006853 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006854 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006855
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006856 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6857 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006858 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006859 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006860
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006861 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006863 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006864 goto drop;
6865
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006866 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006867 goto drop;
6868
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006869 /* Store remote BD_ADDR and PSM for msg_name */
Johan Hedberga4368ff2015-03-30 23:21:01 +03006870 bacpy(&bt_cb(skb)->l2cap.bdaddr, &hcon->dst);
6871 bt_cb(skb)->l2cap.psm = psm;
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006872
Johan Hedberga24cce12014-08-07 22:56:42 +03006873 if (!chan->ops->recv(chan, skb)) {
6874 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006875 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006876 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006877
6878drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006879 l2cap_chan_put(chan);
6880free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006881 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006882}
6883
6884static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6885{
6886 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006887 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006888 u16 cid, len;
6889 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006890
Johan Hedberg61a939c2014-01-17 20:45:11 +02006891 if (hcon->state != BT_CONNECTED) {
6892 BT_DBG("queueing pending rx skb");
6893 skb_queue_tail(&conn->pending_rx, skb);
6894 return;
6895 }
6896
Linus Torvalds1da177e2005-04-16 15:20:36 -07006897 skb_pull(skb, L2CAP_HDR_SIZE);
6898 cid = __le16_to_cpu(lh->cid);
6899 len = __le16_to_cpu(lh->len);
6900
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006901 if (len != skb->len) {
6902 kfree_skb(skb);
6903 return;
6904 }
6905
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006906 /* Since we can't actively block incoming LE connections we must
6907 * at least ensure that we ignore incoming data from them.
6908 */
6909 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006910 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
Johan Hedberga250e042015-01-15 13:06:44 +02006911 bdaddr_dst_type(hcon))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006912 kfree_skb(skb);
6913 return;
6914 }
6915
Linus Torvalds1da177e2005-04-16 15:20:36 -07006916 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6917
6918 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006919 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006920 l2cap_sig_channel(conn, skb);
6921 break;
6922
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006923 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006924 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006925 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006926 l2cap_conless_channel(conn, psm, skb);
6927 break;
6928
Marcel Holtmanna2877622013-10-02 23:46:54 -07006929 case L2CAP_CID_LE_SIGNALING:
6930 l2cap_le_sig_channel(conn, skb);
6931 break;
6932
Linus Torvalds1da177e2005-04-16 15:20:36 -07006933 default:
6934 l2cap_data_channel(conn, cid, skb);
6935 break;
6936 }
6937}
6938
Johan Hedberg61a939c2014-01-17 20:45:11 +02006939static void process_pending_rx(struct work_struct *work)
6940{
6941 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6942 pending_rx_work);
6943 struct sk_buff *skb;
6944
6945 BT_DBG("");
6946
6947 while ((skb = skb_dequeue(&conn->pending_rx)))
6948 l2cap_recv_frame(conn, skb);
6949}
6950
Johan Hedberg162b49e2014-01-17 20:45:10 +02006951static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6952{
6953 struct l2cap_conn *conn = hcon->l2cap_data;
6954 struct hci_chan *hchan;
6955
6956 if (conn)
6957 return conn;
6958
6959 hchan = hci_chan_create(hcon);
6960 if (!hchan)
6961 return NULL;
6962
Johan Hedberg27f70f32014-07-21 10:50:06 +03006963 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006964 if (!conn) {
6965 hci_chan_del(hchan);
6966 return NULL;
6967 }
6968
6969 kref_init(&conn->ref);
6970 hcon->l2cap_data = conn;
Johan Hedberg51bb84572014-08-15 21:06:57 +03006971 conn->hcon = hci_conn_get(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006972 conn->hchan = hchan;
6973
6974 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6975
6976 switch (hcon->type) {
6977 case LE_LINK:
6978 if (hcon->hdev->le_mtu) {
6979 conn->mtu = hcon->hdev->le_mtu;
6980 break;
6981 }
6982 /* fall through */
6983 default:
6984 conn->mtu = hcon->hdev->acl_mtu;
6985 break;
6986 }
6987
6988 conn->feat_mask = 0;
6989
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02006990 conn->local_fixed_chan = L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS;
6991
6992 if (hcon->type == ACL_LINK &&
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07006993 hci_dev_test_flag(hcon->hdev, HCI_HS_ENABLED))
Johan Hedberg0bd49fc2014-12-02 10:09:26 +02006994 conn->local_fixed_chan |= L2CAP_FC_A2MP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006995
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07006996 if (hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED) &&
Marcel Holtmannf9be9e82014-12-06 00:35:45 +01006997 (bredr_sc_enabled(hcon->hdev) ||
Marcel Holtmannb7cb93e2015-03-13 10:20:35 -07006998 hci_dev_test_flag(hcon->hdev, HCI_FORCE_BREDR_SMP)))
Johan Hedbergb5ae3442014-08-14 12:34:26 +03006999 conn->local_fixed_chan |= L2CAP_FC_SMP_BREDR;
7000
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02007001 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007002 mutex_init(&conn->chan_lock);
7003
7004 INIT_LIST_HEAD(&conn->chan_l);
7005 INIT_LIST_HEAD(&conn->users);
7006
Johan Hedberg276d8072014-08-11 22:06:41 +03007007 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007008
Johan Hedberg61a939c2014-01-17 20:45:11 +02007009 skb_queue_head_init(&conn->pending_rx);
7010 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
Johan Hedbergf3d82d02014-09-05 22:19:50 +03007011 INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr);
Johan Hedberg61a939c2014-01-17 20:45:11 +02007012
Johan Hedberg162b49e2014-01-17 20:45:10 +02007013 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
7014
7015 return conn;
7016}
7017
7018static bool is_valid_psm(u16 psm, u8 dst_type) {
7019 if (!psm)
7020 return false;
7021
7022 if (bdaddr_type_is_le(dst_type))
7023 return (psm <= 0x00ff);
7024
7025 /* PSM must be odd and lsb of upper byte must be 0 */
7026 return ((psm & 0x0101) == 0x0001);
7027}
7028
7029int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
7030 bdaddr_t *dst, u8 dst_type)
7031{
7032 struct l2cap_conn *conn;
7033 struct hci_conn *hcon;
7034 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007035 int err;
7036
7037 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
7038 dst_type, __le16_to_cpu(psm));
7039
7040 hdev = hci_get_route(dst, &chan->src);
7041 if (!hdev)
7042 return -EHOSTUNREACH;
7043
7044 hci_dev_lock(hdev);
7045
Johan Hedberg162b49e2014-01-17 20:45:10 +02007046 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
7047 chan->chan_type != L2CAP_CHAN_RAW) {
7048 err = -EINVAL;
7049 goto done;
7050 }
7051
Johan Hedberg21626e62014-01-24 10:35:41 +02007052 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
7053 err = -EINVAL;
7054 goto done;
7055 }
7056
7057 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02007058 err = -EINVAL;
7059 goto done;
7060 }
7061
7062 switch (chan->mode) {
7063 case L2CAP_MODE_BASIC:
7064 break;
7065 case L2CAP_MODE_LE_FLOWCTL:
7066 l2cap_le_flowctl_init(chan);
7067 break;
7068 case L2CAP_MODE_ERTM:
7069 case L2CAP_MODE_STREAMING:
7070 if (!disable_ertm)
7071 break;
7072 /* fall through */
7073 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007074 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007075 goto done;
7076 }
7077
7078 switch (chan->state) {
7079 case BT_CONNECT:
7080 case BT_CONNECT2:
7081 case BT_CONFIG:
7082 /* Already connecting */
7083 err = 0;
7084 goto done;
7085
7086 case BT_CONNECTED:
7087 /* Already connected */
7088 err = -EISCONN;
7089 goto done;
7090
7091 case BT_OPEN:
7092 case BT_BOUND:
7093 /* Can connect */
7094 break;
7095
7096 default:
7097 err = -EBADFD;
7098 goto done;
7099 }
7100
7101 /* Set destination address and psm */
7102 bacpy(&chan->dst, dst);
7103 chan->dst_type = dst_type;
7104
7105 chan->psm = psm;
7106 chan->dcid = cid;
7107
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007108 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007109 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007110
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007111 /* Convert from L2CAP channel address type to HCI address type
7112 */
7113 if (dst_type == BDADDR_LE_PUBLIC)
7114 dst_type = ADDR_LE_DEV_PUBLIC;
7115 else
7116 dst_type = ADDR_LE_DEV_RANDOM;
7117
Marcel Holtmannd7a5a112015-03-13 02:11:00 -07007118 if (hci_dev_test_flag(hdev, HCI_ADVERTISING))
Johan Hedberge804d252014-07-16 11:42:28 +03007119 role = HCI_ROLE_SLAVE;
7120 else
7121 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007122
Jakub Pawlowskifa142222015-08-07 20:22:56 +02007123 hcon = hci_connect_le_scan(hdev, dst, dst_type,
7124 chan->sec_level,
7125 HCI_LE_CONN_TIMEOUT,
7126 role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007127 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007128 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007129 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007130 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007131
7132 if (IS_ERR(hcon)) {
7133 err = PTR_ERR(hcon);
7134 goto done;
7135 }
7136
7137 conn = l2cap_conn_add(hcon);
7138 if (!conn) {
7139 hci_conn_drop(hcon);
7140 err = -ENOMEM;
7141 goto done;
7142 }
7143
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007144 mutex_lock(&conn->chan_lock);
7145 l2cap_chan_lock(chan);
7146
Johan Hedberg162b49e2014-01-17 20:45:10 +02007147 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7148 hci_conn_drop(hcon);
7149 err = -EBUSY;
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007150 goto chan_unlock;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007151 }
7152
7153 /* Update source addr of the socket */
7154 bacpy(&chan->src, &hcon->src);
Johan Hedberga250e042015-01-15 13:06:44 +02007155 chan->src_type = bdaddr_src_type(hcon);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007156
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007157 __l2cap_chan_add(conn, chan);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007158
7159 /* l2cap_chan_add takes its own ref so we can drop this one */
7160 hci_conn_drop(hcon);
7161
7162 l2cap_state_change(chan, BT_CONNECT);
7163 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7164
Johan Hedberg61202e42014-01-28 15:16:48 -08007165 /* Release chan->sport so that it can be reused by other
7166 * sockets (as it's only used for listening sockets).
7167 */
7168 write_lock(&chan_list_lock);
7169 chan->sport = 0;
7170 write_unlock(&chan_list_lock);
7171
Johan Hedberg162b49e2014-01-17 20:45:10 +02007172 if (hcon->state == BT_CONNECTED) {
7173 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7174 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007175 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007176 l2cap_state_change(chan, BT_CONNECTED);
7177 } else
7178 l2cap_do_start(chan);
7179 }
7180
7181 err = 0;
7182
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007183chan_unlock:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007184 l2cap_chan_unlock(chan);
Johan Hedberg02e246ae2014-10-02 10:16:22 +03007185 mutex_unlock(&conn->chan_lock);
7186done:
Johan Hedberg162b49e2014-01-17 20:45:10 +02007187 hci_dev_unlock(hdev);
7188 hci_dev_put(hdev);
7189 return err;
7190}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007191EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007192
Linus Torvalds1da177e2005-04-16 15:20:36 -07007193/* ---- L2CAP interface with lower layer (HCI) ---- */
7194
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007195int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196{
7197 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007198 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007199
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007200 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007201
7202 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007203 read_lock(&chan_list_lock);
7204 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007205 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007206 continue;
7207
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007208 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007209 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007210 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007211 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007213 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007214 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007215 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007216 lm2 |= HCI_LM_MASTER;
7217 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007218 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007219 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220
7221 return exact ? lm1 : lm2;
7222}
7223
Johan Hedberge760ec12014-08-07 22:56:47 +03007224/* Find the next fixed channel in BT_LISTEN state, continue iteration
7225 * from an existing channel in the list or from the beginning of the
7226 * global list (by passing NULL as first parameter).
7227 */
7228static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg327a7192015-01-15 13:06:45 +02007229 struct hci_conn *hcon)
Johan Hedberge760ec12014-08-07 22:56:47 +03007230{
Johan Hedberg327a7192015-01-15 13:06:45 +02007231 u8 src_type = bdaddr_src_type(hcon);
7232
Johan Hedberge760ec12014-08-07 22:56:47 +03007233 read_lock(&chan_list_lock);
7234
7235 if (c)
7236 c = list_next_entry(c, global_l);
7237 else
7238 c = list_entry(chan_list.next, typeof(*c), global_l);
7239
7240 list_for_each_entry_from(c, &chan_list, global_l) {
7241 if (c->chan_type != L2CAP_CHAN_FIXED)
7242 continue;
7243 if (c->state != BT_LISTEN)
7244 continue;
Johan Hedberg327a7192015-01-15 13:06:45 +02007245 if (bacmp(&c->src, &hcon->src) && bacmp(&c->src, BDADDR_ANY))
Johan Hedberge760ec12014-08-07 22:56:47 +03007246 continue;
Johan Hedberg327a7192015-01-15 13:06:45 +02007247 if (src_type != c->src_type)
Johan Hedberg54a1b622014-08-07 22:56:48 +03007248 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007249
7250 l2cap_chan_hold(c);
7251 read_unlock(&chan_list_lock);
7252 return c;
7253 }
7254
7255 read_unlock(&chan_list_lock);
7256
7257 return NULL;
7258}
7259
Johan Hedberg539c4962015-02-18 14:53:57 +02007260static void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261{
Johan Hedberge760ec12014-08-07 22:56:47 +03007262 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007263 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007264 struct l2cap_chan *pchan;
7265 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007266
Johan Hedberg539c4962015-02-18 14:53:57 +02007267 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7268 return;
7269
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007270 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007271
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007272 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007273 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007274 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007275 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007276
7277 conn = l2cap_conn_add(hcon);
7278 if (!conn)
7279 return;
7280
Johan Hedberga250e042015-01-15 13:06:44 +02007281 dst_type = bdaddr_dst_type(hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007282
7283 /* If device is blocked, do not create channels for it */
7284 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7285 return;
7286
7287 /* Find fixed channels and notify them of the new connection. We
7288 * use multiple individual lookups, continuing each time where
7289 * we left off, because the list lock would prevent calling the
7290 * potentially sleeping l2cap_chan_lock() function.
7291 */
Johan Hedberg327a7192015-01-15 13:06:45 +02007292 pchan = l2cap_global_fixed_chan(NULL, hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007293 while (pchan) {
7294 struct l2cap_chan *chan, *next;
7295
7296 /* Client fixed channels should override server ones */
7297 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7298 goto next;
7299
7300 l2cap_chan_lock(pchan);
7301 chan = pchan->ops->new_connection(pchan);
7302 if (chan) {
7303 bacpy(&chan->src, &hcon->src);
7304 bacpy(&chan->dst, &hcon->dst);
Johan Hedberga250e042015-01-15 13:06:44 +02007305 chan->src_type = bdaddr_src_type(hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007306 chan->dst_type = dst_type;
7307
7308 __l2cap_chan_add(conn, chan);
7309 }
7310
7311 l2cap_chan_unlock(pchan);
7312next:
Johan Hedberg327a7192015-01-15 13:06:45 +02007313 next = l2cap_global_fixed_chan(pchan, hcon);
Johan Hedberge760ec12014-08-07 22:56:47 +03007314 l2cap_chan_put(pchan);
7315 pchan = next;
7316 }
7317
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007318 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007319}
7320
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007321int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007322{
7323 struct l2cap_conn *conn = hcon->l2cap_data;
7324
7325 BT_DBG("hcon %p", hcon);
7326
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007327 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007328 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007329 return conn->disc_reason;
7330}
7331
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007332static void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007333{
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007334 if (hcon->type != ACL_LINK && hcon->type != LE_LINK)
7335 return;
7336
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337 BT_DBG("hcon %p reason %d", hcon, reason);
7338
Joe Perchese1750722011-06-29 18:18:29 -07007339 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340}
7341
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007342static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007343{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007344 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007345 return;
7346
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007347 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007348 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007349 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007350 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7351 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007352 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007353 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007354 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007355 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007356 }
7357}
7358
Johan Hedberg354fe802015-02-18 14:53:56 +02007359static void l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007361 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007362 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007363
Marcel Holtmann01394182006-07-03 10:02:46 +02007364 if (!conn)
Johan Hedberg354fe802015-02-18 14:53:56 +02007365 return;
Marcel Holtmann01394182006-07-03 10:02:46 +02007366
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007367 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007369 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007371 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007372 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007374 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7375 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007376
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007377 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007378 l2cap_chan_unlock(chan);
7379 continue;
7380 }
7381
Johan Hedberg191eb392014-08-07 22:56:45 +03007382 if (!status && encrypt)
7383 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007384
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007385 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007386 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007387 continue;
7388 }
7389
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007390 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007391 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007392 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007393 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007394 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007395 continue;
7396 }
7397
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007398 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007399 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007400 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007401 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007402 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergfa37c1a2014-11-13 10:55:17 +02007403 } else if (chan->state == BT_CONNECT2 &&
7404 chan->mode != L2CAP_MODE_LE_FLOWCTL) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007405 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007406 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007407
7408 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007409 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007410 res = L2CAP_CR_PEND;
7411 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007412 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007413 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007414 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007415 res = L2CAP_CR_SUCCESS;
7416 stat = L2CAP_CS_NO_INFO;
7417 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007418 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007419 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007420 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007421 res = L2CAP_CR_SEC_BLOCK;
7422 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007423 }
7424
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007425 rsp.scid = cpu_to_le16(chan->dcid);
7426 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007427 rsp.result = cpu_to_le16(res);
7428 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007429 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007430 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007431
7432 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7433 res == L2CAP_CR_SUCCESS) {
7434 char buf[128];
7435 set_bit(CONF_REQ_SENT, &chan->conf_state);
7436 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7437 L2CAP_CONF_REQ,
7438 l2cap_build_conf_req(chan, buf),
7439 buf);
7440 chan->num_conf_req++;
7441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007442 }
7443
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007444 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007445 }
7446
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007447 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007448}
7449
Arron Wang9b4c3332015-06-09 17:47:22 +08007450void l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451{
7452 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007453 struct l2cap_hdr *hdr;
7454 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007455
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007456 /* For AMP controller do not create l2cap conn */
7457 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7458 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007460 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007461 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007462
7463 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464 goto drop;
7465
7466 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7467
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007468 switch (flags) {
7469 case ACL_START:
7470 case ACL_START_NO_FLUSH:
7471 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472 if (conn->rx_len) {
7473 BT_ERR("Unexpected start frame (len %d)", skb->len);
7474 kfree_skb(conn->rx_skb);
7475 conn->rx_skb = NULL;
7476 conn->rx_len = 0;
7477 l2cap_conn_unreliable(conn, ECOMM);
7478 }
7479
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007480 /* Start fragment always begin with Basic L2CAP header */
7481 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007482 BT_ERR("Frame is too short (len %d)", skb->len);
7483 l2cap_conn_unreliable(conn, ECOMM);
7484 goto drop;
7485 }
7486
7487 hdr = (struct l2cap_hdr *) skb->data;
7488 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7489
7490 if (len == skb->len) {
7491 /* Complete frame received */
7492 l2cap_recv_frame(conn, skb);
Arron Wang9b4c3332015-06-09 17:47:22 +08007493 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007494 }
7495
7496 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7497
7498 if (skb->len > len) {
7499 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007500 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501 l2cap_conn_unreliable(conn, ECOMM);
7502 goto drop;
7503 }
7504
7505 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007506 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007507 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508 goto drop;
7509
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007510 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007511 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007512 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007513 break;
7514
7515 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007516 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7517
7518 if (!conn->rx_len) {
7519 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7520 l2cap_conn_unreliable(conn, ECOMM);
7521 goto drop;
7522 }
7523
7524 if (skb->len > conn->rx_len) {
7525 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007526 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527 kfree_skb(conn->rx_skb);
7528 conn->rx_skb = NULL;
7529 conn->rx_len = 0;
7530 l2cap_conn_unreliable(conn, ECOMM);
7531 goto drop;
7532 }
7533
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007534 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007535 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536 conn->rx_len -= skb->len;
7537
7538 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007539 /* Complete frame received. l2cap_recv_frame
7540 * takes ownership of the skb so set the global
7541 * rx_skb pointer to NULL first.
7542 */
7543 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007544 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007545 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007547 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548 }
7549
7550drop:
7551 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552}
7553
Johan Hedberg354fe802015-02-18 14:53:56 +02007554static struct hci_cb l2cap_cb = {
7555 .name = "L2CAP",
Johan Hedberg539c4962015-02-18 14:53:57 +02007556 .connect_cfm = l2cap_connect_cfm,
Johan Hedberg3a6d5762015-02-18 14:53:58 +02007557 .disconn_cfm = l2cap_disconn_cfm,
Johan Hedberg354fe802015-02-18 14:53:56 +02007558 .security_cfm = l2cap_security_cfm,
7559};
7560
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007561static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007562{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007563 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007564
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007565 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007566
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007567 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmanneeb5a062015-01-14 13:44:21 -08007568 seq_printf(f, "%pMR (%u) %pMR (%u) %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
7569 &c->src, c->src_type, &c->dst, c->dst_type,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007570 c->state, __le16_to_cpu(c->psm),
7571 c->scid, c->dcid, c->imtu, c->omtu,
7572 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007573 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007574
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007575 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007576
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007577 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007578}
7579
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007580static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7581{
7582 return single_open(file, l2cap_debugfs_show, inode->i_private);
7583}
7584
7585static const struct file_operations l2cap_debugfs_fops = {
7586 .open = l2cap_debugfs_open,
7587 .read = seq_read,
7588 .llseek = seq_lseek,
7589 .release = single_release,
7590};
7591
7592static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007593
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007594int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007595{
7596 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007597
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007598 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007599 if (err < 0)
7600 return err;
7601
Johan Hedberg354fe802015-02-18 14:53:56 +02007602 hci_register_cb(&l2cap_cb);
7603
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007604 if (IS_ERR_OR_NULL(bt_debugfs))
7605 return 0;
7606
7607 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7608 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007609
Samuel Ortiz40b93972014-05-14 17:53:35 +02007610 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007611 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007612 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007613 &le_default_mps);
7614
Linus Torvalds1da177e2005-04-16 15:20:36 -07007615 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007616}
7617
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007618void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007619{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007620 debugfs_remove(l2cap_debugfs);
Johan Hedberg354fe802015-02-18 14:53:56 +02007621 hci_unregister_cb(&l2cap_cb);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007622 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007623}
7624
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007625module_param(disable_ertm, bool, 0644);
7626MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");