blob: 4a90438d99dff0c8c660e1ce66c8f7ad9a67dc58 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Gustavo F. Padovan590051d2011-12-18 13:39:33 -02006 Copyright (C) 2011 ProFUSION Embedded Systems
Mat Martineau422e9252012-04-27 16:50:55 -07007 Copyright (c) 2012 Code Aurora Forum. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
9 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10
11 This program is free software; you can redistribute it and/or modify
12 it under the terms of the GNU General Public License version 2 as
13 published by the Free Software Foundation;
14
15 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
18 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090019 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
20 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
21 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090024 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
25 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070026 SOFTWARE IS DISCLAIMED.
27*/
28
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020029/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <linux/module.h>
32
Marcel Holtmannaef7d972010-03-21 05:27:45 +010033#include <linux/debugfs.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030034#include <linux/crc16.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035
36#include <net/bluetooth/bluetooth.h>
37#include <net/bluetooth/hci_core.h>
38#include <net/bluetooth/l2cap.h>
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070039
Marcel Holtmannac4b7232013-10-10 14:54:16 -070040#include "smp.h"
Marcel Holtmann70247282013-10-10 14:54:15 -070041#include "a2mp.h"
Marcel Holtmann7ef9fbf2013-10-10 14:54:14 -070042#include "amp.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Johan Hedberg0f1bfe42014-01-27 15:11:35 -080044#define LE_FLOWCTL_MAX_CREDITS 65535
45
Mat Martineaud1de6d42012-05-17 20:53:55 -070046bool disable_ertm;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020047
Marcel Holtmann547d1032013-10-12 08:18:19 -070048static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN | L2CAP_FEAT_UCD;
Marcel Holtmanna6801ca2014-07-11 06:03:08 +020049static u8 l2cap_fixed_chan[8] = { L2CAP_FC_SIG_BREDR | L2CAP_FC_CONNLESS, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Johannes Bergb5ad8b72011-06-01 08:54:45 +020051static LIST_HEAD(chan_list);
52static DEFINE_RWLOCK(chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053
Johan Hedbergf15b8ec2013-12-03 15:08:25 +020054static u16 le_max_credits = L2CAP_LE_MAX_CREDITS;
55static u16 le_default_mps = L2CAP_LE_DEFAULT_MPS;
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +010058 u8 code, u8 ident, u16 dlen, void *data);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -030059static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
Gustavo Padovan2d792812012-10-06 10:07:01 +010060 void *data);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -030061static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +020062static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err);
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
Gustavo Padovand6603662012-05-21 13:58:22 -030064static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
Gustavo Padovan2d792812012-10-06 10:07:01 +010065 struct sk_buff_head *skbs, u8 event);
Mat Martineau608bcc62012-05-17 20:53:32 -070066
Marcel Holtmann4f1654e2013-10-13 08:50:41 -070067static inline __u8 bdaddr_type(struct hci_conn *hcon, __u8 type)
68{
69 if (hcon->type == LE_LINK) {
70 if (type == ADDR_LE_DEV_PUBLIC)
71 return BDADDR_LE_PUBLIC;
72 else
73 return BDADDR_LE_RANDOM;
74 }
75
76 return BDADDR_BREDR;
77}
78
Marcel Holtmann01394182006-07-03 10:02:46 +020079/* ---- L2CAP channels ---- */
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -030080
Gustavo Padovan2d792812012-10-06 10:07:01 +010081static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
82 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020083{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020084 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030085
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020086 list_for_each_entry(c, &conn->chan_l, list) {
87 if (c->dcid == cid)
88 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +020089 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020090 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +020091}
92
Gustavo Padovan2d792812012-10-06 10:07:01 +010093static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn,
94 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +020095{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020096 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -030097
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +020098 list_for_each_entry(c, &conn->chan_l, list) {
99 if (c->scid == cid)
100 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200102 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
105/* Find channel with given SCID.
Mat Martineauef191ad2012-05-02 09:42:00 -0700106 * Returns locked channel. */
Gustavo Padovan2d792812012-10-06 10:07:01 +0100107static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300110 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300111
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200112 mutex_lock(&conn->chan_lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300113 c = __l2cap_get_chan_by_scid(conn, cid);
Mat Martineauef191ad2012-05-02 09:42:00 -0700114 if (c)
115 l2cap_chan_lock(c);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200116 mutex_unlock(&conn->chan_lock);
117
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300118 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200119}
120
Mat Martineaub1a130b2012-10-23 15:24:09 -0700121/* Find channel with given DCID.
122 * Returns locked channel.
123 */
124static struct l2cap_chan *l2cap_get_chan_by_dcid(struct l2cap_conn *conn,
125 u16 cid)
126{
127 struct l2cap_chan *c;
128
129 mutex_lock(&conn->chan_lock);
130 c = __l2cap_get_chan_by_dcid(conn, cid);
131 if (c)
132 l2cap_chan_lock(c);
133 mutex_unlock(&conn->chan_lock);
134
135 return c;
136}
137
Gustavo Padovan2d792812012-10-06 10:07:01 +0100138static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn,
139 u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200140{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200141 struct l2cap_chan *c;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300142
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200143 list_for_each_entry(c, &conn->chan_l, list) {
144 if (c->ident == ident)
145 return c;
Marcel Holtmann01394182006-07-03 10:02:46 +0200146 }
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200147 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148}
149
Mat Martineau5b155ef2012-10-23 15:24:14 -0700150static struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn,
151 u8 ident)
152{
153 struct l2cap_chan *c;
154
155 mutex_lock(&conn->chan_lock);
156 c = __l2cap_get_chan_by_ident(conn, ident);
157 if (c)
158 l2cap_chan_lock(c);
159 mutex_unlock(&conn->chan_lock);
160
161 return c;
162}
163
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300164static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300165{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300166 struct l2cap_chan *c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300167
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300168 list_for_each_entry(c, &chan_list, global_l) {
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700169 if (c->sport == psm && !bacmp(&c->src, src))
Szymon Janc250938c2011-11-16 09:32:22 +0100170 return c;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300171 }
Szymon Janc250938c2011-11-16 09:32:22 +0100172 return NULL;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300173}
174
175int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
176{
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300177 int err;
178
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200179 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300180
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300182 err = -EADDRINUSE;
183 goto done;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300184 }
185
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300186 if (psm) {
187 chan->psm = psm;
188 chan->sport = psm;
189 err = 0;
190 } else {
191 u16 p;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300192
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300193 err = -EINVAL;
194 for (p = 0x1001; p < 0x1100; p += 2)
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
198 err = 0;
199 break;
200 }
201 }
202
203done:
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200204 write_unlock(&chan_list_lock);
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300205 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300206}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300207EXPORT_SYMBOL_GPL(l2cap_add_psm);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300208
209int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210{
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200211 write_lock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300212
Johan Hedberg14824302014-08-07 22:56:50 +0300213 /* Override the defaults (which are for conn-oriented) */
214 chan->omtu = L2CAP_DEFAULT_MTU;
215 chan->chan_type = L2CAP_CHAN_FIXED;
216
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300217 chan->scid = scid;
218
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200219 write_unlock(&chan_list_lock);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300220
221 return 0;
222}
223
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300224static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
Marcel Holtmann01394182006-07-03 10:02:46 +0200225{
Johan Hedberge77af752013-10-08 10:31:00 +0200226 u16 cid, dyn_end;
Marcel Holtmann01394182006-07-03 10:02:46 +0200227
Johan Hedberge77af752013-10-08 10:31:00 +0200228 if (conn->hcon->type == LE_LINK)
229 dyn_end = L2CAP_CID_LE_DYN_END;
230 else
231 dyn_end = L2CAP_CID_DYN_END;
232
233 for (cid = L2CAP_CID_DYN_START; cid < dyn_end; cid++) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300234 if (!__l2cap_get_chan_by_scid(conn, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200235 return cid;
236 }
237
238 return 0;
239}
240
Gustavo Padovanf93fa272013-10-21 14:21:40 -0200241static void l2cap_state_change(struct l2cap_chan *chan, int state)
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300242{
Andrei Emeltchenko42d2d872012-02-17 11:40:57 +0200243 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
Gustavo Padovan2d792812012-10-06 10:07:01 +0100244 state_to_string(state));
Gustavo F. Padovanbadaaa02011-11-23 20:11:46 -0200245
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300246 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300247 chan->ops->state_change(chan, state, 0);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300248}
249
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300250static inline void l2cap_state_change_and_error(struct l2cap_chan *chan,
251 int state, int err)
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200252{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300253 chan->state = state;
Gustavo Padovan53f52122013-10-15 19:24:45 -0300254 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200255}
256
257static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
258{
Gustavo Padovanf8e73012013-10-15 19:24:46 -0300259 chan->ops->state_change(chan, chan->state, err);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +0200260}
261
Mat Martineau4239d162012-05-17 20:53:49 -0700262static void __set_retrans_timer(struct l2cap_chan *chan)
263{
264 if (!delayed_work_pending(&chan->monitor_timer) &&
265 chan->retrans_timeout) {
266 l2cap_set_timer(chan, &chan->retrans_timer,
267 msecs_to_jiffies(chan->retrans_timeout));
268 }
269}
270
271static void __set_monitor_timer(struct l2cap_chan *chan)
272{
273 __clear_retrans_timer(chan);
274 if (chan->monitor_timeout) {
275 l2cap_set_timer(chan, &chan->monitor_timer,
276 msecs_to_jiffies(chan->monitor_timeout));
277 }
278}
279
Mat Martineau608bcc62012-05-17 20:53:32 -0700280static struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
281 u16 seq)
282{
283 struct sk_buff *skb;
284
285 skb_queue_walk(head, skb) {
286 if (bt_cb(skb)->control.txseq == seq)
287 return skb;
288 }
289
290 return NULL;
291}
292
Mat Martineau3c588192012-04-11 10:48:42 -0700293/* ---- L2CAP sequence number lists ---- */
294
295/* For ERTM, ordered lists of sequence numbers must be tracked for
296 * SREJ requests that are received and for frames that are to be
297 * retransmitted. These seq_list functions implement a singly-linked
298 * list in an array, where membership in the list can also be checked
299 * in constant time. Items can also be added to the tail of the list
300 * and removed from the head in constant time, without further memory
301 * allocs or frees.
302 */
303
304static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
305{
306 size_t alloc_size, i;
307
308 /* Allocated size is a power of 2 to map sequence numbers
309 * (which may be up to 14 bits) in to a smaller array that is
310 * sized for the negotiated ERTM transmit windows.
311 */
312 alloc_size = roundup_pow_of_two(size);
313
314 seq_list->list = kmalloc(sizeof(u16) * alloc_size, GFP_KERNEL);
315 if (!seq_list->list)
316 return -ENOMEM;
317
318 seq_list->mask = alloc_size - 1;
319 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
320 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
321 for (i = 0; i < alloc_size; i++)
322 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
323
324 return 0;
325}
326
327static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
328{
329 kfree(seq_list->list);
330}
331
332static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
333 u16 seq)
334{
335 /* Constant-time check for list membership */
336 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
337}
338
Mat Martineau3c588192012-04-11 10:48:42 -0700339static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
340{
Johan Hedberg03a0c5d2014-01-18 21:32:59 +0200341 u16 seq = seq_list->head;
342 u16 mask = seq_list->mask;
343
344 seq_list->head = seq_list->list[seq & mask];
345 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
346
347 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
348 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
349 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
350 }
351
352 return seq;
Mat Martineau3c588192012-04-11 10:48:42 -0700353}
354
355static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
356{
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300357 u16 i;
Mat Martineau3c588192012-04-11 10:48:42 -0700358
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300359 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR)
360 return;
361
362 for (i = 0; i <= seq_list->mask; i++)
363 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
364
365 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
366 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
Mat Martineau3c588192012-04-11 10:48:42 -0700367}
368
369static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
370{
371 u16 mask = seq_list->mask;
372
373 /* All appends happen in constant time */
374
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300375 if (seq_list->list[seq & mask] != L2CAP_SEQ_LIST_CLEAR)
376 return;
Mat Martineau3c588192012-04-11 10:48:42 -0700377
Gustavo Padovanf522ae32012-05-09 18:28:00 -0300378 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
379 seq_list->head = seq;
380 else
381 seq_list->list[seq_list->tail & mask] = seq;
382
383 seq_list->tail = seq;
384 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
Mat Martineau3c588192012-04-11 10:48:42 -0700385}
386
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300387static void l2cap_chan_timeout(struct work_struct *work)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300388{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300389 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100390 chan_timer.work);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200391 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300392 int reason;
393
Andrei Emeltchenkoe05dcc32012-02-17 11:40:56 +0200394 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300395
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200396 mutex_lock(&conn->chan_lock);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200397 l2cap_chan_lock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300398
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300399 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300400 reason = ECONNREFUSED;
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300401 else if (chan->state == BT_CONNECT &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100402 chan->sec_level != BT_SECURITY_SDP)
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300403 reason = ECONNREFUSED;
404 else
405 reason = ETIMEDOUT;
406
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300407 l2cap_chan_close(chan, reason);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300408
Andrei Emeltchenko6be36552012-02-22 17:11:56 +0200409 l2cap_chan_unlock(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300410
Gustavo Padovan80b98022012-05-27 22:27:51 -0300411 chan->ops->close(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200412 mutex_unlock(&conn->chan_lock);
413
Ulisses Furquim371fd832011-12-21 20:02:36 -0200414 l2cap_chan_put(chan);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300415}
416
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300417struct l2cap_chan *l2cap_chan_create(void)
Marcel Holtmann01394182006-07-03 10:02:46 +0200418{
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300419 struct l2cap_chan *chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200420
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300421 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
422 if (!chan)
423 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200424
Andrei Emeltchenkoc03b3552012-02-21 12:54:56 +0200425 mutex_init(&chan->lock);
426
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200427 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300428 list_add(&chan->global_l, &chan_list);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200429 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300430
Gustavo F. Padovan721c4182011-06-23 19:29:58 -0300431 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
Gustavo F. Padovanab078012011-05-02 18:25:01 -0300432
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300433 chan->state = BT_OPEN;
434
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530435 kref_init(&chan->kref);
Gustavo F. Padovan71ba0e52011-05-17 14:34:52 -0300436
Mat Martineau28270112012-05-17 21:14:09 -0700437 /* This flag is cleared in l2cap_chan_ready() */
438 set_bit(CONF_NOT_COMPLETE, &chan->conf_state);
439
Gustavo Padovaneef1d9b2012-03-25 13:59:16 -0300440 BT_DBG("chan %p", chan);
Szymon Jancabc545b2011-11-03 16:05:44 +0100441
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300442 return chan;
Marcel Holtmann01394182006-07-03 10:02:46 +0200443}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300444EXPORT_SYMBOL_GPL(l2cap_chan_create);
Marcel Holtmann01394182006-07-03 10:02:46 +0200445
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530446static void l2cap_chan_destroy(struct kref *kref)
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300447{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530448 struct l2cap_chan *chan = container_of(kref, struct l2cap_chan, kref);
449
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530450 BT_DBG("chan %p", chan);
451
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200452 write_lock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300453 list_del(&chan->global_l);
Gustavo F. Padovan333055f2011-12-22 15:14:39 -0200454 write_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -0300455
Jaganath Kanakkassery4af66c62012-07-13 18:17:55 +0530456 kfree(chan);
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300457}
458
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530459void l2cap_chan_hold(struct l2cap_chan *c)
460{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530461 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530462
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530463 kref_get(&c->kref);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530464}
465
466void l2cap_chan_put(struct l2cap_chan *c)
467{
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530468 BT_DBG("chan %p orig refcnt %d", c, atomic_read(&c->kref.refcount));
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530469
Syam Sidhardhan144ad332012-07-27 23:51:21 +0530470 kref_put(&c->kref, l2cap_chan_destroy);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530471}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300472EXPORT_SYMBOL_GPL(l2cap_chan_put);
Jaganath Kanakkassery30648372012-07-13 18:17:54 +0530473
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300474void l2cap_chan_set_defaults(struct l2cap_chan *chan)
475{
476 chan->fcs = L2CAP_FCS_CRC16;
477 chan->max_tx = L2CAP_DEFAULT_MAX_TX;
478 chan->tx_win = L2CAP_DEFAULT_TX_WINDOW;
479 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300480 chan->remote_max_tx = chan->max_tx;
481 chan->remote_tx_win = chan->tx_win;
Mat Martineauc20f8e32012-07-10 05:47:07 -0700482 chan->ack_win = L2CAP_DEFAULT_TX_WINDOW;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300483 chan->sec_level = BT_SECURITY_LOW;
Jukka Rissanen6a5e8162014-05-28 14:43:04 +0300484 chan->flush_to = L2CAP_DEFAULT_FLUSH_TO;
485 chan->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
486 chan->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
487 chan->conf_state = 0;
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300488
489 set_bit(FLAG_FORCE_ACTIVE, &chan->flags);
490}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300491EXPORT_SYMBOL_GPL(l2cap_chan_set_defaults);
Andrei Emeltchenkobd4b1652012-03-28 16:31:25 +0300492
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200493static void l2cap_le_flowctl_init(struct l2cap_chan *chan)
Johan Hedberg38319712013-05-17 12:49:23 +0300494{
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200495 chan->sdu = NULL;
496 chan->sdu_last_frag = NULL;
497 chan->sdu_len = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300498 chan->tx_credits = 0;
Johan Hedbergf15b8ec2013-12-03 15:08:25 +0200499 chan->rx_credits = le_max_credits;
Johan Hedbergd1d79412014-01-27 15:11:33 -0800500 chan->mps = min_t(u16, chan->imtu, le_default_mps);
Johan Hedberg0ce43ce2013-12-05 14:55:33 +0200501
502 skb_queue_head_init(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300503}
504
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +0300505void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Marcel Holtmann01394182006-07-03 10:02:46 +0200506{
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -0300507 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Andrei Emeltchenko097db762012-03-09 14:16:17 +0200508 __le16_to_cpu(chan->psm), chan->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200509
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +0200510 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +0100511
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300512 chan->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200513
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200514 switch (chan->chan_type) {
515 case L2CAP_CHAN_CONN_ORIENTED:
Johan Hedberg21626e62014-01-24 10:35:41 +0200516 /* Alloc CID for connection-oriented socket */
517 chan->scid = l2cap_alloc_cid(conn);
518 if (conn->hcon->type == ACL_LINK)
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300519 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200520 break;
521
522 case L2CAP_CHAN_CONN_LESS:
Marcel Holtmann01394182006-07-03 10:02:46 +0200523 /* Connectionless socket */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300524 chan->scid = L2CAP_CID_CONN_LESS;
525 chan->dcid = L2CAP_CID_CONN_LESS;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300526 chan->omtu = L2CAP_DEFAULT_MTU;
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200527 break;
528
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200529 case L2CAP_CHAN_FIXED:
530 /* Caller will set CID and CID specific MTU values */
Andrei Emeltchenko416fa752012-05-29 13:59:16 +0300531 break;
532
Andrei Emeltchenko54911202012-02-06 15:04:00 +0200533 default:
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 /* Raw socket can send/recv signalling messages only */
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -0300535 chan->scid = L2CAP_CID_SIGNALING;
536 chan->dcid = L2CAP_CID_SIGNALING;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -0300537 chan->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200538 }
539
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300540 chan->local_id = L2CAP_BESTEFFORT_ID;
541 chan->local_stype = L2CAP_SERV_BESTEFFORT;
542 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
543 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
544 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +0300545 chan->local_flush_to = L2CAP_EFS_DEFAULT_FLUSH_TO;
Andrei Emeltchenko8f7975b2011-10-13 16:18:54 +0300546
Ulisses Furquim371fd832011-12-21 20:02:36 -0200547 l2cap_chan_hold(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300548
Johan Hedberg5ee98912013-04-29 19:35:43 +0300549 hci_conn_hold(conn->hcon);
550
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200551 list_add(&chan->list, &conn->chan_l);
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200552}
553
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300554void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
Andrei Emeltchenko643162a2012-02-22 17:11:55 +0200555{
556 mutex_lock(&conn->chan_lock);
557 __l2cap_chan_add(conn, chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200558 mutex_unlock(&conn->chan_lock);
Marcel Holtmann01394182006-07-03 10:02:46 +0200559}
560
Andrei Emeltchenko466f8002012-05-29 13:59:01 +0300561void l2cap_chan_del(struct l2cap_chan *chan, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200562{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300563 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200564
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -0300565 __clear_chan_timer(chan);
Marcel Holtmann01394182006-07-03 10:02:46 +0200566
Gustavo F. Padovan49208c92011-04-04 15:59:54 -0300567 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200568
Johan Hedberg72847ce2014-08-08 09:28:03 +0300569 chan->ops->teardown(chan, err);
570
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900571 if (conn) {
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300572 struct amp_mgr *mgr = conn->hcon->amp_mgr;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300573 /* Delete from channel list */
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +0200574 list_del(&chan->list);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -0200575
Ulisses Furquim371fd832011-12-21 20:02:36 -0200576 l2cap_chan_put(chan);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300577
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300578 chan->conn = NULL;
Andrei Emeltchenko3cabbfd2012-05-31 11:01:37 +0300579
Johan Hedberg2338a7e2014-01-24 10:35:40 +0200580 if (chan->scid != L2CAP_CID_A2MP)
David Herrmann76a68ba2013-04-06 20:28:37 +0200581 hci_conn_drop(conn->hcon);
Andrei Emeltchenko56f60982012-10-15 11:58:44 +0300582
583 if (mgr && mgr->bredr_chan == chan)
584 mgr->bredr_chan = NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200585 }
586
Andrei Emeltchenko419e08c2012-10-31 15:46:34 +0200587 if (chan->hs_hchan) {
588 struct hci_chan *hs_hchan = chan->hs_hchan;
589
590 BT_DBG("chan %p disconnect hs_hchan %p", chan, hs_hchan);
591 amp_disconnect_logical_link(hs_hchan);
592 }
593
Mat Martineau28270112012-05-17 21:14:09 -0700594 if (test_bit(CONF_NOT_COMPLETE, &chan->conf_state))
Gustavo F. Padovan6ff5abb2011-04-25 15:10:41 -0300595 return;
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300596
Gustavo Padovanee556f62012-05-18 20:22:38 -0300597 switch(chan->mode) {
598 case L2CAP_MODE_BASIC:
599 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300600
Johan Hedberg38319712013-05-17 12:49:23 +0300601 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +0300602 skb_queue_purge(&chan->tx_q);
Johan Hedberg38319712013-05-17 12:49:23 +0300603 break;
604
Gustavo Padovanee556f62012-05-18 20:22:38 -0300605 case L2CAP_MODE_ERTM:
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -0300606 __clear_retrans_timer(chan);
607 __clear_monitor_timer(chan);
608 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300609
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -0300610 skb_queue_purge(&chan->srej_q);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300611
Mat Martineau3c588192012-04-11 10:48:42 -0700612 l2cap_seq_list_free(&chan->srej_list);
613 l2cap_seq_list_free(&chan->retrans_list);
Gustavo Padovanee556f62012-05-18 20:22:38 -0300614
615 /* fall through */
616
617 case L2CAP_MODE_STREAMING:
618 skb_queue_purge(&chan->tx_q);
619 break;
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300620 }
Gustavo Padovanee556f62012-05-18 20:22:38 -0300621
622 return;
Marcel Holtmann01394182006-07-03 10:02:46 +0200623}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300624EXPORT_SYMBOL_GPL(l2cap_chan_del);
Marcel Holtmann01394182006-07-03 10:02:46 +0200625
Johan Hedberg387a33e2014-02-18 21:41:33 +0200626void l2cap_conn_update_id_addr(struct hci_conn *hcon)
627{
628 struct l2cap_conn *conn = hcon->l2cap_data;
629 struct l2cap_chan *chan;
630
631 mutex_lock(&conn->chan_lock);
632
633 list_for_each_entry(chan, &conn->chan_l, list) {
634 l2cap_chan_lock(chan);
635 bacpy(&chan->dst, &hcon->dst);
636 chan->dst_type = bdaddr_type(hcon, hcon->dst_type);
637 l2cap_chan_unlock(chan);
638 }
639
640 mutex_unlock(&conn->chan_lock);
641}
642
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300643static void l2cap_chan_le_connect_reject(struct l2cap_chan *chan)
644{
645 struct l2cap_conn *conn = chan->conn;
646 struct l2cap_le_conn_rsp rsp;
647 u16 result;
648
649 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
650 result = L2CAP_CR_AUTHORIZATION;
651 else
652 result = L2CAP_CR_BAD_PSM;
653
654 l2cap_state_change(chan, BT_DISCONN);
655
656 rsp.dcid = cpu_to_le16(chan->scid);
657 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +0200658 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +0300659 rsp.credits = cpu_to_le16(chan->rx_credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300660 rsp.result = cpu_to_le16(result);
661
662 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
663 &rsp);
664}
665
Johan Hedberg791d60f2013-05-14 22:24:44 +0300666static void l2cap_chan_connect_reject(struct l2cap_chan *chan)
667{
668 struct l2cap_conn *conn = chan->conn;
669 struct l2cap_conn_rsp rsp;
670 u16 result;
671
672 if (test_bit(FLAG_DEFER_SETUP, &chan->flags))
673 result = L2CAP_CR_SEC_BLOCK;
674 else
675 result = L2CAP_CR_BAD_PSM;
676
677 l2cap_state_change(chan, BT_DISCONN);
678
679 rsp.scid = cpu_to_le16(chan->dcid);
680 rsp.dcid = cpu_to_le16(chan->scid);
681 rsp.result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700682 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Johan Hedberg791d60f2013-05-14 22:24:44 +0300683
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
685}
686
Gustavo F. Padovan0f852722011-05-04 19:42:50 -0300687void l2cap_chan_close(struct l2cap_chan *chan, int reason)
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300688{
689 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300690
Marcel Holtmann7eafc592013-10-13 08:12:47 -0700691 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300692
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -0300693 switch (chan->state) {
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300694 case BT_LISTEN:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100695 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300696 break;
697
698 case BT_CONNECTED:
699 case BT_CONFIG:
Johan Hedberg7b25c9b2014-01-28 15:28:04 -0800700 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan8d836d72013-10-15 19:24:47 -0300701 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +0200702 l2cap_send_disconn_req(chan, reason);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300703 } else
704 l2cap_chan_del(chan, reason);
705 break;
706
707 case BT_CONNECT2:
Johan Hedberg791d60f2013-05-14 22:24:44 +0300708 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
709 if (conn->hcon->type == ACL_LINK)
710 l2cap_chan_connect_reject(chan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +0300711 else if (conn->hcon->type == LE_LINK)
712 l2cap_chan_le_connect_reject(chan);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300713 }
714
715 l2cap_chan_del(chan, reason);
716 break;
717
718 case BT_CONNECT:
719 case BT_DISCONN:
720 l2cap_chan_del(chan, reason);
721 break;
722
723 default:
Gustavo Padovanb699ec02012-10-06 11:51:54 +0100724 chan->ops->teardown(chan, 0);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300725 break;
726 }
727}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +0300728EXPORT_SYMBOL(l2cap_chan_close);
Gustavo F. Padovan4519de92011-04-28 17:55:53 -0300729
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300730static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530731{
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700732 switch (chan->chan_type) {
733 case L2CAP_CHAN_RAW:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300734 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530735 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800736 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530737 return HCI_AT_DEDICATED_BONDING_MITM;
738 case BT_SECURITY_MEDIUM:
739 return HCI_AT_DEDICATED_BONDING;
740 default:
741 return HCI_AT_NO_BONDING;
742 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700743 break;
Marcel Holtmann3124b842013-10-12 07:19:32 -0700744 case L2CAP_CHAN_CONN_LESS:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700745 if (chan->psm == cpu_to_le16(L2CAP_PSM_3DSP)) {
Marcel Holtmann3124b842013-10-12 07:19:32 -0700746 if (chan->sec_level == BT_SECURITY_LOW)
747 chan->sec_level = BT_SECURITY_SDP;
748 }
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800749 if (chan->sec_level == BT_SECURITY_HIGH ||
750 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann3124b842013-10-12 07:19:32 -0700751 return HCI_AT_NO_BONDING_MITM;
752 else
753 return HCI_AT_NO_BONDING;
754 break;
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700755 case L2CAP_CHAN_CONN_ORIENTED:
Joe Perchesdcf4adb2014-03-12 10:52:35 -0700756 if (chan->psm == cpu_to_le16(L2CAP_PSM_SDP)) {
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700757 if (chan->sec_level == BT_SECURITY_LOW)
758 chan->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530759
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800760 if (chan->sec_level == BT_SECURITY_HIGH ||
761 chan->sec_level == BT_SECURITY_FIPS)
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700762 return HCI_AT_NO_BONDING_MITM;
763 else
764 return HCI_AT_NO_BONDING;
765 }
766 /* fall through */
767 default:
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300768 switch (chan->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530769 case BT_SECURITY_HIGH:
Marcel Holtmann7d513e92014-01-15 22:37:40 -0800770 case BT_SECURITY_FIPS:
Johan Hedberg8556edd32011-01-19 12:06:50 +0530771 return HCI_AT_GENERAL_BONDING_MITM;
772 case BT_SECURITY_MEDIUM:
773 return HCI_AT_GENERAL_BONDING;
774 default:
775 return HCI_AT_NO_BONDING;
776 }
Marcel Holtmann6a974b52013-10-12 07:19:31 -0700777 break;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530778 }
779}
780
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200781/* Service level security */
Johan Hedberge7cafc42014-07-17 15:35:38 +0300782int l2cap_chan_check_security(struct l2cap_chan *chan, bool initiator)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200783{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -0300784 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100785 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200786
Johan Hedberga17de2f2013-05-14 13:25:37 +0300787 if (conn->hcon->type == LE_LINK)
788 return smp_conn_security(conn->hcon, chan->sec_level);
789
Gustavo F. Padovan43434782011-04-12 18:31:57 -0300790 auth_type = l2cap_get_auth_type(chan);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100791
Johan Hedberge7cafc42014-07-17 15:35:38 +0300792 return hci_conn_security(conn->hcon, chan->sec_level, auth_type,
793 initiator);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200794}
795
Johannes Bergb5ad8b72011-06-01 08:54:45 +0200796static u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200797{
798 u8 id;
799
800 /* Get next available identificator.
801 * 1 - 128 are used by kernel.
802 * 129 - 199 are reserved.
803 * 200 - 254 are used by utilities like l2ping, etc.
804 */
805
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200806 mutex_lock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200807
808 if (++conn->tx_ident > 128)
809 conn->tx_ident = 1;
810
811 id = conn->tx_ident;
812
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +0200813 mutex_unlock(&conn->ident_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200814
815 return id;
816}
817
Gustavo Padovan2d792812012-10-06 10:07:01 +0100818static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
819 void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200820{
821 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200822 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200823
824 BT_DBG("code 0x%2.2x", code);
825
826 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300827 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200828
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200829 if (lmp_no_flush_capable(conn->hcon->hdev))
830 flags = ACL_START_NO_FLUSH;
831 else
832 flags = ACL_START;
833
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700834 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +0200835 skb->priority = HCI_PRIO_MAX;
Jaikumar Ganesh14b12d02011-05-23 18:06:04 -0700836
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200837 hci_send_acl(conn->hchan, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200838}
839
Mat Martineau02b0fbb2012-10-23 15:24:10 -0700840static bool __chan_is_moving(struct l2cap_chan *chan)
841{
842 return chan->move_state != L2CAP_MOVE_STABLE &&
843 chan->move_state != L2CAP_MOVE_WAIT_PREPARE;
844}
845
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200846static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
847{
848 struct hci_conn *hcon = chan->conn->hcon;
849 u16 flags;
850
851 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
Gustavo Padovan2d792812012-10-06 10:07:01 +0100852 skb->priority);
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200853
Mat Martineaud5f8a752012-10-23 15:24:18 -0700854 if (chan->hs_hcon && !__chan_is_moving(chan)) {
855 if (chan->hs_hchan)
856 hci_send_acl(chan->hs_hchan, skb, ACL_COMPLETE);
857 else
858 kfree_skb(skb);
859
860 return;
861 }
862
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200863 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +0100864 lmp_no_flush_capable(hcon->hdev))
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +0200865 flags = ACL_START_NO_FLUSH;
866 else
867 flags = ACL_START;
868
869 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
870 hci_send_acl(chan->conn->hchan, skb, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871}
872
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700873static void __unpack_enhanced_control(u16 enh, struct l2cap_ctrl *control)
874{
875 control->reqseq = (enh & L2CAP_CTRL_REQSEQ) >> L2CAP_CTRL_REQSEQ_SHIFT;
876 control->final = (enh & L2CAP_CTRL_FINAL) >> L2CAP_CTRL_FINAL_SHIFT;
877
878 if (enh & L2CAP_CTRL_FRAME_TYPE) {
879 /* S-Frame */
880 control->sframe = 1;
881 control->poll = (enh & L2CAP_CTRL_POLL) >> L2CAP_CTRL_POLL_SHIFT;
882 control->super = (enh & L2CAP_CTRL_SUPERVISE) >> L2CAP_CTRL_SUPER_SHIFT;
883
884 control->sar = 0;
885 control->txseq = 0;
886 } else {
887 /* I-Frame */
888 control->sframe = 0;
889 control->sar = (enh & L2CAP_CTRL_SAR) >> L2CAP_CTRL_SAR_SHIFT;
890 control->txseq = (enh & L2CAP_CTRL_TXSEQ) >> L2CAP_CTRL_TXSEQ_SHIFT;
891
892 control->poll = 0;
893 control->super = 0;
894 }
895}
896
897static void __unpack_extended_control(u32 ext, struct l2cap_ctrl *control)
898{
899 control->reqseq = (ext & L2CAP_EXT_CTRL_REQSEQ) >> L2CAP_EXT_CTRL_REQSEQ_SHIFT;
900 control->final = (ext & L2CAP_EXT_CTRL_FINAL) >> L2CAP_EXT_CTRL_FINAL_SHIFT;
901
902 if (ext & L2CAP_EXT_CTRL_FRAME_TYPE) {
903 /* S-Frame */
904 control->sframe = 1;
905 control->poll = (ext & L2CAP_EXT_CTRL_POLL) >> L2CAP_EXT_CTRL_POLL_SHIFT;
906 control->super = (ext & L2CAP_EXT_CTRL_SUPERVISE) >> L2CAP_EXT_CTRL_SUPER_SHIFT;
907
908 control->sar = 0;
909 control->txseq = 0;
910 } else {
911 /* I-Frame */
912 control->sframe = 0;
913 control->sar = (ext & L2CAP_EXT_CTRL_SAR) >> L2CAP_EXT_CTRL_SAR_SHIFT;
914 control->txseq = (ext & L2CAP_EXT_CTRL_TXSEQ) >> L2CAP_EXT_CTRL_TXSEQ_SHIFT;
915
916 control->poll = 0;
917 control->super = 0;
918 }
919}
920
921static inline void __unpack_control(struct l2cap_chan *chan,
922 struct sk_buff *skb)
923{
924 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
925 __unpack_extended_control(get_unaligned_le32(skb->data),
926 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700927 skb_pull(skb, L2CAP_EXT_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700928 } else {
929 __unpack_enhanced_control(get_unaligned_le16(skb->data),
930 &bt_cb(skb)->control);
Mat Martineaucec8ab6e2012-05-17 20:53:36 -0700931 skb_pull(skb, L2CAP_ENH_CTRL_SIZE);
Mat Martineaub5c6aae2012-04-25 16:36:15 -0700932 }
933}
934
935static u32 __pack_extended_control(struct l2cap_ctrl *control)
936{
937 u32 packed;
938
939 packed = control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT;
940 packed |= control->final << L2CAP_EXT_CTRL_FINAL_SHIFT;
941
942 if (control->sframe) {
943 packed |= control->poll << L2CAP_EXT_CTRL_POLL_SHIFT;
944 packed |= control->super << L2CAP_EXT_CTRL_SUPER_SHIFT;
945 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
946 } else {
947 packed |= control->sar << L2CAP_EXT_CTRL_SAR_SHIFT;
948 packed |= control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT;
949 }
950
951 return packed;
952}
953
954static u16 __pack_enhanced_control(struct l2cap_ctrl *control)
955{
956 u16 packed;
957
958 packed = control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT;
959 packed |= control->final << L2CAP_CTRL_FINAL_SHIFT;
960
961 if (control->sframe) {
962 packed |= control->poll << L2CAP_CTRL_POLL_SHIFT;
963 packed |= control->super << L2CAP_CTRL_SUPER_SHIFT;
964 packed |= L2CAP_CTRL_FRAME_TYPE;
965 } else {
966 packed |= control->sar << L2CAP_CTRL_SAR_SHIFT;
967 packed |= control->txseq << L2CAP_CTRL_TXSEQ_SHIFT;
968 }
969
970 return packed;
971}
972
973static inline void __pack_control(struct l2cap_chan *chan,
974 struct l2cap_ctrl *control,
975 struct sk_buff *skb)
976{
977 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
978 put_unaligned_le32(__pack_extended_control(control),
979 skb->data + L2CAP_HDR_SIZE);
980 } else {
981 put_unaligned_le16(__pack_enhanced_control(control),
982 skb->data + L2CAP_HDR_SIZE);
983 }
984}
985
Gustavo Padovanba7aa642012-05-29 13:29:16 -0300986static inline unsigned int __ertm_hdr_size(struct l2cap_chan *chan)
987{
988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
989 return L2CAP_EXT_HDR_SIZE;
990 else
991 return L2CAP_ENH_HDR_SIZE;
992}
993
Mat Martineaua67d7f62012-05-17 20:53:35 -0700994static struct sk_buff *l2cap_create_sframe_pdu(struct l2cap_chan *chan,
995 u32 control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300996{
997 struct sk_buff *skb;
998 struct l2cap_hdr *lh;
Gustavo Padovanba7aa642012-05-29 13:29:16 -0300999 int hlen = __ertm_hdr_size(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001000
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001001 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001002 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001003
Mat Martineaua67d7f62012-05-17 20:53:35 -07001004 skb = bt_skb_alloc(hlen, GFP_KERNEL);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001005
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001006 if (!skb)
Mat Martineaua67d7f62012-05-17 20:53:35 -07001007 return ERR_PTR(-ENOMEM);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001008
1009 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001010 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001011 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03001012
Mat Martineaua67d7f62012-05-17 20:53:35 -07001013 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1014 put_unaligned_le32(control, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
1015 else
1016 put_unaligned_le16(control, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001017
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001018 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineaua67d7f62012-05-17 20:53:35 -07001019 u16 fcs = crc16(0, (u8 *)skb->data, skb->len);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03001020 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001021 }
1022
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001023 skb->priority = HCI_PRIO_MAX;
Mat Martineaua67d7f62012-05-17 20:53:35 -07001024 return skb;
1025}
1026
1027static void l2cap_send_sframe(struct l2cap_chan *chan,
1028 struct l2cap_ctrl *control)
1029{
1030 struct sk_buff *skb;
1031 u32 control_field;
1032
1033 BT_DBG("chan %p, control %p", chan, control);
1034
1035 if (!control->sframe)
1036 return;
1037
Mat Martineaub99e13a2012-10-23 15:24:19 -07001038 if (__chan_is_moving(chan))
1039 return;
1040
Mat Martineaua67d7f62012-05-17 20:53:35 -07001041 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state) &&
1042 !control->poll)
1043 control->final = 1;
1044
1045 if (control->super == L2CAP_SUPER_RR)
1046 clear_bit(CONN_RNR_SENT, &chan->conn_state);
1047 else if (control->super == L2CAP_SUPER_RNR)
1048 set_bit(CONN_RNR_SENT, &chan->conn_state);
1049
1050 if (control->super != L2CAP_SUPER_SREJ) {
1051 chan->last_acked_seq = control->reqseq;
1052 __clear_ack_timer(chan);
1053 }
1054
1055 BT_DBG("reqseq %d, final %d, poll %d, super %d", control->reqseq,
1056 control->final, control->poll, control->super);
1057
1058 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1059 control_field = __pack_extended_control(control);
1060 else
1061 control_field = __pack_enhanced_control(control);
1062
1063 skb = l2cap_create_sframe_pdu(chan, control_field);
1064 if (!IS_ERR(skb))
1065 l2cap_do_send(chan, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001066}
1067
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001068static void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, bool poll)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001069{
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001070 struct l2cap_ctrl control;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001071
Mat Martineauc9e3d5e2012-05-17 20:53:48 -07001072 BT_DBG("chan %p, poll %d", chan, poll);
1073
1074 memset(&control, 0, sizeof(control));
1075 control.sframe = 1;
1076 control.poll = poll;
1077
1078 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
1079 control.super = L2CAP_SUPER_RNR;
1080 else
1081 control.super = L2CAP_SUPER_RR;
1082
1083 control.reqseq = chan->buffer_seq;
1084 l2cap_send_sframe(chan, &control);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -03001085}
1086
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03001087static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001088{
Johan Hedberg5ff6f342014-08-07 22:56:43 +03001089 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
1090 return true;
1091
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001092 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +03001093}
1094
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001095static bool __amp_capable(struct l2cap_chan *chan)
1096{
1097 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001098 struct hci_dev *hdev;
1099 bool amp_available = false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001100
Marcel Holtmann1df7b172013-10-05 11:47:49 -07001101 if (!conn->hs_enabled)
1102 return false;
1103
1104 if (!(conn->fixed_chan_mask & L2CAP_FC_A2MP))
1105 return false;
1106
1107 read_lock(&hci_dev_list_lock);
1108 list_for_each_entry(hdev, &hci_dev_list, list) {
1109 if (hdev->amp_type != AMP_TYPE_BREDR &&
1110 test_bit(HCI_UP, &hdev->flags)) {
1111 amp_available = true;
1112 break;
1113 }
1114 }
1115 read_unlock(&hci_dev_list_lock);
1116
1117 if (chan->chan_policy == BT_CHANNEL_POLICY_AMP_PREFERRED)
1118 return amp_available;
Marcel Holtmann848566b2013-10-01 22:59:22 -07001119
1120 return false;
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001121}
1122
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02001123static bool l2cap_check_efs(struct l2cap_chan *chan)
1124{
1125 /* Check EFS parameters */
1126 return true;
1127}
1128
Andrei Emeltchenko2766be42012-09-27 17:26:21 +03001129void l2cap_send_conn_req(struct l2cap_chan *chan)
Andrei Emeltchenko9b27f352012-02-24 16:00:00 +02001130{
1131 struct l2cap_conn *conn = chan->conn;
1132 struct l2cap_conn_req req;
1133
1134 req.scid = cpu_to_le16(chan->scid);
1135 req.psm = chan->psm;
1136
1137 chan->ident = l2cap_get_ident(conn);
1138
1139 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
1140
1141 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
1142}
1143
Mat Martineau8eb200b2012-10-23 15:24:17 -07001144static void l2cap_send_create_chan_req(struct l2cap_chan *chan, u8 amp_id)
1145{
1146 struct l2cap_create_chan_req req;
1147 req.scid = cpu_to_le16(chan->scid);
1148 req.psm = chan->psm;
1149 req.amp_id = amp_id;
1150
1151 chan->ident = l2cap_get_ident(chan->conn);
1152
1153 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_REQ,
1154 sizeof(req), &req);
1155}
1156
Mat Martineau02b0fbb2012-10-23 15:24:10 -07001157static void l2cap_move_setup(struct l2cap_chan *chan)
1158{
1159 struct sk_buff *skb;
1160
1161 BT_DBG("chan %p", chan);
1162
1163 if (chan->mode != L2CAP_MODE_ERTM)
1164 return;
1165
1166 __clear_retrans_timer(chan);
1167 __clear_monitor_timer(chan);
1168 __clear_ack_timer(chan);
1169
1170 chan->retry_count = 0;
1171 skb_queue_walk(&chan->tx_q, skb) {
1172 if (bt_cb(skb)->control.retries)
1173 bt_cb(skb)->control.retries = 1;
1174 else
1175 break;
1176 }
1177
1178 chan->expected_tx_seq = chan->buffer_seq;
1179
1180 clear_bit(CONN_REJ_ACT, &chan->conn_state);
1181 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
1182 l2cap_seq_list_clear(&chan->retrans_list);
1183 l2cap_seq_list_clear(&chan->srej_list);
1184 skb_queue_purge(&chan->srej_q);
1185
1186 chan->tx_state = L2CAP_TX_STATE_XMIT;
1187 chan->rx_state = L2CAP_RX_STATE_MOVE;
1188
1189 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
1190}
1191
Mat Martineau5f3847a2012-10-23 15:24:12 -07001192static void l2cap_move_done(struct l2cap_chan *chan)
1193{
1194 u8 move_role = chan->move_role;
1195 BT_DBG("chan %p", chan);
1196
1197 chan->move_state = L2CAP_MOVE_STABLE;
1198 chan->move_role = L2CAP_MOVE_ROLE_NONE;
1199
1200 if (chan->mode != L2CAP_MODE_ERTM)
1201 return;
1202
1203 switch (move_role) {
1204 case L2CAP_MOVE_ROLE_INITIATOR:
1205 l2cap_tx(chan, NULL, NULL, L2CAP_EV_EXPLICIT_POLL);
1206 chan->rx_state = L2CAP_RX_STATE_WAIT_F;
1207 break;
1208 case L2CAP_MOVE_ROLE_RESPONDER:
1209 chan->rx_state = L2CAP_RX_STATE_WAIT_P;
1210 break;
1211 }
1212}
1213
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001214static void l2cap_chan_ready(struct l2cap_chan *chan)
1215{
Mat Martineau28270112012-05-17 21:14:09 -07001216 /* This clears all conf flags, including CONF_NOT_COMPLETE */
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001217 chan->conf_state = 0;
1218 __clear_chan_timer(chan);
1219
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02001220 if (chan->mode == L2CAP_MODE_LE_FLOWCTL && !chan->tx_credits)
1221 chan->ops->suspend(chan);
Johan Hedberg177f8f22013-05-31 17:54:51 +03001222
Andrei Emeltchenko54a59aa2012-05-27 22:27:53 -03001223 chan->state = BT_CONNECTED;
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001224
Andrei Emeltchenkofd83e2c2012-05-30 09:55:32 +03001225 chan->ops->ready(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001226}
1227
Johan Hedbergf1496de2013-05-13 14:15:56 +03001228static void l2cap_le_connect(struct l2cap_chan *chan)
1229{
1230 struct l2cap_conn *conn = chan->conn;
1231 struct l2cap_le_conn_req req;
1232
Johan Hedberg595177f2013-12-02 22:12:22 +02001233 if (test_and_set_bit(FLAG_LE_CONN_REQ_SENT, &chan->flags))
1234 return;
1235
Johan Hedbergf1496de2013-05-13 14:15:56 +03001236 req.psm = chan->psm;
1237 req.scid = cpu_to_le16(chan->scid);
1238 req.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02001239 req.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03001240 req.credits = cpu_to_le16(chan->rx_credits);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001241
1242 chan->ident = l2cap_get_ident(conn);
1243
1244 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_REQ,
1245 sizeof(req), &req);
1246}
1247
1248static void l2cap_le_start(struct l2cap_chan *chan)
1249{
1250 struct l2cap_conn *conn = chan->conn;
1251
1252 if (!smp_conn_security(conn->hcon, chan->sec_level))
1253 return;
1254
1255 if (!chan->psm) {
1256 l2cap_chan_ready(chan);
1257 return;
1258 }
1259
1260 if (chan->state == BT_CONNECT)
1261 l2cap_le_connect(chan);
1262}
1263
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001264static void l2cap_start_connection(struct l2cap_chan *chan)
1265{
1266 if (__amp_capable(chan)) {
1267 BT_DBG("chan %p AMP capable: discover AMPs", chan);
1268 a2mp_discover_amp(chan);
Johan Hedbergf1496de2013-05-13 14:15:56 +03001269 } else if (chan->conn->hcon->type == LE_LINK) {
1270 l2cap_le_start(chan);
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001271 } else {
1272 l2cap_send_conn_req(chan);
1273 }
1274}
1275
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001276static void l2cap_do_start(struct l2cap_chan *chan)
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001277{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03001278 struct l2cap_conn *conn = chan->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001279
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001280 if (conn->hcon->type == LE_LINK) {
Johan Hedberg96ac34f2013-05-13 11:15:07 +03001281 l2cap_le_start(chan);
Vinicius Costa Gomes9f0caeb2012-04-20 15:46:08 -03001282 return;
1283 }
1284
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001285 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +01001286 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
1287 return;
1288
Johan Hedberge7cafc42014-07-17 15:35:38 +03001289 if (l2cap_chan_check_security(chan, true) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01001290 __l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001291 l2cap_start_connection(chan);
1292 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001293 } else {
1294 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001295 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001296
1297 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
1298 conn->info_ident = l2cap_get_ident(conn);
1299
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08001300 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001301
Gustavo Padovan2d792812012-10-06 10:07:01 +01001302 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
1303 sizeof(req), &req);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001304 }
1305}
1306
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001307static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
1308{
1309 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03001310 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -03001311 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
1312
1313 switch (mode) {
1314 case L2CAP_MODE_ERTM:
1315 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
1316 case L2CAP_MODE_STREAMING:
1317 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
1318 default:
1319 return 0x00;
1320 }
1321}
1322
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001323static void l2cap_send_disconn_req(struct l2cap_chan *chan, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001324{
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001325 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001326 struct l2cap_disconn_req req;
1327
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001328 if (!conn)
1329 return;
1330
Andrei Emeltchenkoaad3d0e2012-09-06 15:05:42 +03001331 if (chan->mode == L2CAP_MODE_ERTM && chan->state == BT_CONNECTED) {
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001332 __clear_retrans_timer(chan);
1333 __clear_monitor_timer(chan);
1334 __clear_ack_timer(chan);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001335 }
1336
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001337 if (chan->scid == L2CAP_CID_A2MP) {
Gustavo Padovand1177732012-10-06 11:47:38 +01001338 l2cap_state_change(chan, BT_DISCONN);
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001339 return;
1340 }
1341
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001342 req.dcid = cpu_to_le16(chan->dcid);
1343 req.scid = cpu_to_le16(chan->scid);
Gustavo Padovan2d792812012-10-06 10:07:01 +01001344 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_DISCONN_REQ,
1345 sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001346
Gustavo Padovanf8e73012013-10-15 19:24:46 -03001347 l2cap_state_change_and_error(chan, BT_DISCONN, err);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -03001348}
1349
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001351static void l2cap_conn_start(struct l2cap_conn *conn)
1352{
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001353 struct l2cap_chan *chan, *tmp;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001354
1355 BT_DBG("conn %p", conn);
1356
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001357 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001358
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001359 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001360 l2cap_chan_lock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001361
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03001362 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001363 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001364 continue;
1365 }
1366
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001367 if (chan->state == BT_CONNECT) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03001368 if (!l2cap_chan_check_security(chan, true) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001369 !__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001370 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001371 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02001372 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001373
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001374 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
Gustavo Padovan2d792812012-10-06 10:07:01 +01001375 && test_bit(CONF_STATE2_DEVICE,
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001376 &chan->conf_state)) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001377 l2cap_chan_close(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001378 l2cap_chan_unlock(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001379 continue;
1380 }
1381
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03001382 l2cap_start_connection(chan);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -03001383
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001384 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001385 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001386 char buf[128];
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03001387 rsp.scid = cpu_to_le16(chan->dcid);
1388 rsp.dcid = cpu_to_le16(chan->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001389
Johan Hedberge7cafc42014-07-17 15:35:38 +03001390 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07001391 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001392 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1393 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08001394 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001395
1396 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02001397 l2cap_state_change(chan, BT_CONFIG);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001398 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1399 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01001400 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001401 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07001402 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
1403 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001404 }
1405
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001406 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001407 sizeof(rsp), &rsp);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001408
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001409 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01001410 rsp.result != L2CAP_CR_SUCCESS) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001411 l2cap_chan_unlock(chan);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001412 continue;
1413 }
1414
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03001415 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03001416 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001417 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03001418 chan->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001419 }
1420
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001421 l2cap_chan_unlock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001422 }
1423
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001424 mutex_unlock(&conn->chan_lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001425}
1426
Ville Tervob62f3282011-02-10 22:38:50 -03001427static void l2cap_le_conn_ready(struct l2cap_conn *conn)
1428{
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001429 struct hci_conn *hcon = conn->hcon;
Johan Hedbergdcc36c12014-07-09 12:59:13 +03001430 struct hci_dev *hdev = hcon->hdev;
Ville Tervob62f3282011-02-10 22:38:50 -03001431
Johan Hedberge760ec12014-08-07 22:56:47 +03001432 BT_DBG("%s conn %p", hdev->name, conn);
Ville Tervob62f3282011-02-10 22:38:50 -03001433
Johan Hedberge760ec12014-08-07 22:56:47 +03001434 /* For outgoing pairing which doesn't necessarily have an
1435 * associated socket (e.g. mgmt_pair_device).
1436 */
1437 if (hcon->out)
1438 smp_conn_security(hcon, hcon->pending_sec_level);
Marcel Holtmanncc8dba22013-10-18 03:43:01 -07001439
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001440 /* For LE slave connections, make sure the connection interval
1441 * is in the range of the minium and maximum interval that has
1442 * been configured for this connection. If not, then trigger
1443 * the connection update procedure.
1444 */
Johan Hedberg40bef302014-07-16 11:42:27 +03001445 if (hcon->role == HCI_ROLE_SLAVE &&
Marcel Holtmann80afeb62014-06-23 12:18:51 +02001446 (hcon->le_conn_interval < hcon->le_conn_min_interval ||
1447 hcon->le_conn_interval > hcon->le_conn_max_interval)) {
1448 struct l2cap_conn_param_update_req req;
1449
1450 req.min = cpu_to_le16(hcon->le_conn_min_interval);
1451 req.max = cpu_to_le16(hcon->le_conn_max_interval);
1452 req.latency = cpu_to_le16(hcon->le_conn_latency);
1453 req.to_multiplier = cpu_to_le16(hcon->le_supv_timeout);
1454
1455 l2cap_send_cmd(conn, l2cap_get_ident(conn),
1456 L2CAP_CONN_PARAM_UPDATE_REQ, sizeof(req), &req);
1457 }
Ville Tervob62f3282011-02-10 22:38:50 -03001458}
1459
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001460static void l2cap_conn_ready(struct l2cap_conn *conn)
1461{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001462 struct l2cap_chan *chan;
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001463 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001464
1465 BT_DBG("conn %p", conn);
1466
Johan Hedberge760ec12014-08-07 22:56:47 +03001467 mutex_lock(&conn->chan_lock);
1468
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001469 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03001470
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001471 l2cap_chan_lock(chan);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001472
Johan Hedberg2338a7e2014-01-24 10:35:40 +02001473 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko416fa752012-05-29 13:59:16 +03001474 l2cap_chan_unlock(chan);
1475 continue;
1476 }
1477
Vinicius Costa Gomescc110922012-08-23 21:32:43 -03001478 if (hcon->type == LE_LINK) {
Johan Hedbergf1496de2013-05-13 14:15:56 +03001479 l2cap_le_start(chan);
Vinicius Costa Gomes63128452011-06-17 22:46:26 -03001480 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
Gustavo Padovan74e75742013-10-15 19:24:51 -03001481 l2cap_chan_ready(chan);
Anderson Brigliab501d6a2011-06-07 18:46:31 -03001482
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001483 } else if (chan->state == BT_CONNECT) {
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03001484 l2cap_do_start(chan);
Gustavo Padovan1c244f72012-12-07 03:29:10 -02001485 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001486
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001487 l2cap_chan_unlock(chan);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001488 }
1489
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001490 mutex_unlock(&conn->chan_lock);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001491
Johan Hedberg79a05722014-08-08 09:28:04 +03001492 if (hcon->type == LE_LINK)
1493 l2cap_le_conn_ready(conn);
1494
Johan Hedberg61a939c2014-01-17 20:45:11 +02001495 queue_work(hcon->hdev->workqueue, &conn->pending_rx_work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001496}
1497
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001498/* Notify sockets that we cannot guaranty reliability anymore */
1499static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1500{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001501 struct l2cap_chan *chan;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001502
1503 BT_DBG("conn %p", conn);
1504
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001505 mutex_lock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001506
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001507 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenkoecf61bd2011-10-11 14:04:32 +03001508 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
Gustavo Padovan1d8b1fd2012-10-06 11:34:52 +01001509 l2cap_chan_set_err(chan, err);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001510 }
1511
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001512 mutex_unlock(&conn->chan_lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001513}
1514
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001515static void l2cap_info_timeout(struct work_struct *work)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001516{
Gustavo F. Padovanf878fca2011-12-15 01:16:14 -02001517 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01001518 info_timer.work);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001519
Marcel Holtmann984947d2009-02-06 23:35:19 +01001520 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001521 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001522
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001523 l2cap_conn_start(conn);
1524}
1525
David Herrmann2c8e1412013-04-06 20:28:45 +02001526/*
1527 * l2cap_user
1528 * External modules can register l2cap_user objects on l2cap_conn. The ->probe
1529 * callback is called during registration. The ->remove callback is called
1530 * during unregistration.
1531 * An l2cap_user object can either be explicitly unregistered or when the
1532 * underlying l2cap_conn object is deleted. This guarantees that l2cap->hcon,
1533 * l2cap->hchan, .. are valid as long as the remove callback hasn't been called.
1534 * External modules must own a reference to the l2cap_conn object if they intend
1535 * to call l2cap_unregister_user(). The l2cap_conn object might get destroyed at
1536 * any time if they don't.
1537 */
1538
1539int l2cap_register_user(struct l2cap_conn *conn, struct l2cap_user *user)
1540{
1541 struct hci_dev *hdev = conn->hcon->hdev;
1542 int ret;
1543
1544 /* We need to check whether l2cap_conn is registered. If it is not, we
1545 * must not register the l2cap_user. l2cap_conn_del() is unregisters
1546 * l2cap_conn objects, but doesn't provide its own locking. Instead, it
1547 * relies on the parent hci_conn object to be locked. This itself relies
1548 * on the hci_dev object to be locked. So we must lock the hci device
1549 * here, too. */
1550
1551 hci_dev_lock(hdev);
1552
1553 if (user->list.next || user->list.prev) {
1554 ret = -EINVAL;
1555 goto out_unlock;
1556 }
1557
1558 /* conn->hchan is NULL after l2cap_conn_del() was called */
1559 if (!conn->hchan) {
1560 ret = -ENODEV;
1561 goto out_unlock;
1562 }
1563
1564 ret = user->probe(conn, user);
1565 if (ret)
1566 goto out_unlock;
1567
1568 list_add(&user->list, &conn->users);
1569 ret = 0;
1570
1571out_unlock:
1572 hci_dev_unlock(hdev);
1573 return ret;
1574}
1575EXPORT_SYMBOL(l2cap_register_user);
1576
1577void l2cap_unregister_user(struct l2cap_conn *conn, struct l2cap_user *user)
1578{
1579 struct hci_dev *hdev = conn->hcon->hdev;
1580
1581 hci_dev_lock(hdev);
1582
1583 if (!user->list.next || !user->list.prev)
1584 goto out_unlock;
1585
1586 list_del(&user->list);
1587 user->list.next = NULL;
1588 user->list.prev = NULL;
1589 user->remove(conn, user);
1590
1591out_unlock:
1592 hci_dev_unlock(hdev);
1593}
1594EXPORT_SYMBOL(l2cap_unregister_user);
1595
1596static void l2cap_unregister_all_users(struct l2cap_conn *conn)
1597{
1598 struct l2cap_user *user;
1599
1600 while (!list_empty(&conn->users)) {
1601 user = list_first_entry(&conn->users, struct l2cap_user, list);
1602 list_del(&user->list);
1603 user->list.next = NULL;
1604 user->list.prev = NULL;
1605 user->remove(conn, user);
1606 }
1607}
1608
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001609static void l2cap_conn_del(struct hci_conn *hcon, int err)
1610{
1611 struct l2cap_conn *conn = hcon->l2cap_data;
1612 struct l2cap_chan *chan, *l;
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001613
1614 if (!conn)
1615 return;
1616
1617 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1618
1619 kfree_skb(conn->rx_skb);
1620
Johan Hedberg61a939c2014-01-17 20:45:11 +02001621 skb_queue_purge(&conn->pending_rx);
Jukka Taimisto7ab56c32014-06-12 10:15:13 +00001622
1623 /* We can not call flush_work(&conn->pending_rx_work) here since we
1624 * might block if we are running on a worker from the same workqueue
1625 * pending_rx_work is waiting on.
1626 */
1627 if (work_pending(&conn->pending_rx_work))
1628 cancel_work_sync(&conn->pending_rx_work);
Johan Hedberg61a939c2014-01-17 20:45:11 +02001629
Johan Hedbergdec5b492014-08-11 22:06:37 +03001630 if (work_pending(&conn->disconn_work))
1631 cancel_work_sync(&conn->disconn_work);
1632
David Herrmann2c8e1412013-04-06 20:28:45 +02001633 l2cap_unregister_all_users(conn);
1634
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001635 mutex_lock(&conn->chan_lock);
1636
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001637 /* Kill channels */
1638 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
Mat Martineau61d6ef32012-04-27 16:50:50 -07001639 l2cap_chan_hold(chan);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001640 l2cap_chan_lock(chan);
1641
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001642 l2cap_chan_del(chan, err);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001643
1644 l2cap_chan_unlock(chan);
1645
Gustavo Padovan80b98022012-05-27 22:27:51 -03001646 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07001647 l2cap_chan_put(chan);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001648 }
1649
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02001650 mutex_unlock(&conn->chan_lock);
1651
Luiz Augusto von Dentz73d80de2011-11-02 15:52:01 +02001652 hci_chan_del(conn->hchan);
1653
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
Ulisses Furquim127074b2012-01-30 18:26:29 -02001655 cancel_delayed_work_sync(&conn->info_timer);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001656
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001657 hcon->l2cap_data = NULL;
David Herrmann9c903e32013-04-06 20:28:44 +02001658 conn->hchan = NULL;
1659 l2cap_conn_put(conn);
Vinicius Costa Gomes5d3de7d2011-06-14 13:37:41 -03001660}
1661
Johan Hedbergdec5b492014-08-11 22:06:37 +03001662static void disconn_work(struct work_struct *work)
1663{
1664 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1665 disconn_work);
1666
1667 BT_DBG("conn %p", conn);
1668
1669 l2cap_conn_del(conn->hcon, conn->disconn_err);
1670}
1671
1672void l2cap_conn_shutdown(struct l2cap_conn *conn, int err)
1673{
1674 struct hci_dev *hdev = conn->hcon->hdev;
1675
1676 BT_DBG("conn %p err %d", conn, err);
1677
1678 conn->disconn_err = err;
1679 queue_work(hdev->workqueue, &conn->disconn_work);
1680}
1681
David Herrmann9c903e32013-04-06 20:28:44 +02001682static void l2cap_conn_free(struct kref *ref)
1683{
1684 struct l2cap_conn *conn = container_of(ref, struct l2cap_conn, ref);
1685
1686 hci_conn_put(conn->hcon);
1687 kfree(conn);
1688}
1689
1690void l2cap_conn_get(struct l2cap_conn *conn)
1691{
1692 kref_get(&conn->ref);
1693}
1694EXPORT_SYMBOL(l2cap_conn_get);
1695
1696void l2cap_conn_put(struct l2cap_conn *conn)
1697{
1698 kref_put(&conn->ref, l2cap_conn_free);
1699}
1700EXPORT_SYMBOL(l2cap_conn_put);
1701
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703
Ido Yarivc2287682012-04-20 15:46:07 -03001704/* Find socket with psm and source / destination bdaddr.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 * Returns closest match.
1706 */
Ido Yarivc2287682012-04-20 15:46:07 -03001707static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm,
1708 bdaddr_t *src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001709 bdaddr_t *dst,
1710 u8 link_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001712 struct l2cap_chan *c, *c1 = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001713
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001714 read_lock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001715
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001716 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001717 if (state && c->state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001718 continue;
1719
Johan Hedbergbf20fd42013-05-14 13:23:13 +03001720 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
1721 continue;
1722
1723 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
1724 continue;
1725
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001726 if (c->psm == psm) {
Ido Yarivc2287682012-04-20 15:46:07 -03001727 int src_match, dst_match;
1728 int src_any, dst_any;
1729
Linus Torvalds1da177e2005-04-16 15:20:36 -07001730 /* Exact match. */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001731 src_match = !bacmp(&c->src, src);
1732 dst_match = !bacmp(&c->dst, dst);
Ido Yarivc2287682012-04-20 15:46:07 -03001733 if (src_match && dst_match) {
Johan Hedberga24cce12014-08-07 22:56:42 +03001734 l2cap_chan_hold(c);
Johannes Berga7567b22011-06-01 08:29:54 +02001735 read_unlock(&chan_list_lock);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001736 return c;
1737 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738
1739 /* Closest match */
Marcel Holtmann7eafc592013-10-13 08:12:47 -07001740 src_any = !bacmp(&c->src, BDADDR_ANY);
1741 dst_any = !bacmp(&c->dst, BDADDR_ANY);
Ido Yarivc2287682012-04-20 15:46:07 -03001742 if ((src_match && dst_any) || (src_any && dst_match) ||
1743 (src_any && dst_any))
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001744 c1 = c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745 }
1746 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001747
Johan Hedberga24cce12014-08-07 22:56:42 +03001748 if (c1)
1749 l2cap_chan_hold(c1);
1750
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001751 read_unlock(&chan_list_lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001752
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03001753 return c1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001754}
1755
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001756static void l2cap_monitor_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001757{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001758 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001759 monitor_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001760
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03001761 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001762
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001763 l2cap_chan_lock(chan);
1764
Mat Martineau80909e02012-05-17 20:53:50 -07001765 if (!chan->conn) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001766 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001767 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001768 return;
1769 }
1770
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001771 l2cap_tx(chan, NULL, NULL, L2CAP_EV_MONITOR_TO);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001772
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001773 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001774 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001775}
1776
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001777static void l2cap_retrans_timeout(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001778{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03001779 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau4239d162012-05-17 20:53:49 -07001780 retrans_timer.work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001781
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03001782 BT_DBG("chan %p", chan);
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03001783
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001784 l2cap_chan_lock(chan);
1785
Mat Martineau80909e02012-05-17 20:53:50 -07001786 if (!chan->conn) {
1787 l2cap_chan_unlock(chan);
1788 l2cap_chan_put(chan);
1789 return;
1790 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001791
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03001792 l2cap_tx(chan, NULL, NULL, L2CAP_EV_RETRANS_TO);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02001793 l2cap_chan_unlock(chan);
Andrei Emeltchenko8d7e1c72012-03-23 09:42:15 +02001794 l2cap_chan_put(chan);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001795}
1796
Gustavo Padovand6603662012-05-21 13:58:22 -03001797static void l2cap_streaming_send(struct l2cap_chan *chan,
1798 struct sk_buff_head *skbs)
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001799{
Gustavo F. Padovanccbb84a2010-08-30 18:44:44 -03001800 struct sk_buff *skb;
Mat Martineau37339372012-05-17 20:53:33 -07001801 struct l2cap_ctrl *control;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001802
Mat Martineau37339372012-05-17 20:53:33 -07001803 BT_DBG("chan %p, skbs %p", chan, skbs);
1804
Mat Martineaub99e13a2012-10-23 15:24:19 -07001805 if (__chan_is_moving(chan))
1806 return;
1807
Mat Martineau37339372012-05-17 20:53:33 -07001808 skb_queue_splice_tail_init(skbs, &chan->tx_q);
1809
1810 while (!skb_queue_empty(&chan->tx_q)) {
1811
1812 skb = skb_dequeue(&chan->tx_q);
1813
1814 bt_cb(skb)->control.retries = 1;
1815 control = &bt_cb(skb)->control;
1816
1817 control->reqseq = 0;
1818 control->txseq = chan->next_tx_seq;
1819
1820 __pack_control(chan, control, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001821
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001822 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau37339372012-05-17 20:53:33 -07001823 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1824 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001825 }
1826
Gustavo F. Padovan43434782011-04-12 18:31:57 -03001827 l2cap_do_send(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001828
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001829 BT_DBG("Sent txseq %u", control->txseq);
Mat Martineau37339372012-05-17 20:53:33 -07001830
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001831 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau37339372012-05-17 20:53:33 -07001832 chan->frames_sent++;
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001833 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001834}
1835
Szymon Janc67c9e842011-07-28 16:24:33 +02001836static int l2cap_ertm_send(struct l2cap_chan *chan)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001837{
1838 struct sk_buff *skb, *tx_skb;
Mat Martineau18a48e72012-05-17 20:53:34 -07001839 struct l2cap_ctrl *control;
1840 int sent = 0;
1841
1842 BT_DBG("chan %p", chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001843
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03001844 if (chan->state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001845 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001846
Mat Martineau94122bb2012-05-02 09:42:02 -07001847 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1848 return 0;
1849
Mat Martineaub99e13a2012-10-23 15:24:19 -07001850 if (__chan_is_moving(chan))
1851 return 0;
1852
Mat Martineau18a48e72012-05-17 20:53:34 -07001853 while (chan->tx_send_head &&
1854 chan->unacked_frames < chan->remote_tx_win &&
1855 chan->tx_state == L2CAP_TX_STATE_XMIT) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001856
Mat Martineau18a48e72012-05-17 20:53:34 -07001857 skb = chan->tx_send_head;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001858
Mat Martineau18a48e72012-05-17 20:53:34 -07001859 bt_cb(skb)->control.retries = 1;
1860 control = &bt_cb(skb)->control;
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001861
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001862 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
Mat Martineau18a48e72012-05-17 20:53:34 -07001863 control->final = 1;
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03001864
Mat Martineau18a48e72012-05-17 20:53:34 -07001865 control->reqseq = chan->buffer_seq;
1866 chan->last_acked_seq = chan->buffer_seq;
1867 control->txseq = chan->next_tx_seq;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001868
Mat Martineau18a48e72012-05-17 20:53:34 -07001869 __pack_control(chan, control, skb);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001870
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03001871 if (chan->fcs == L2CAP_FCS_CRC16) {
Mat Martineau18a48e72012-05-17 20:53:34 -07001872 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1873 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001874 }
1875
Mat Martineau18a48e72012-05-17 20:53:34 -07001876 /* Clone after data has been modified. Data is assumed to be
1877 read-only (for locking purposes) on cloned sk_buffs.
1878 */
1879 tx_skb = skb_clone(skb, GFP_KERNEL);
1880
1881 if (!tx_skb)
1882 break;
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001883
Gustavo F. Padovan1a09bcb2011-05-17 15:13:19 -03001884 __set_retrans_timer(chan);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001885
Andrei Emeltchenko836be932011-10-17 12:19:57 +03001886 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
Mat Martineau18a48e72012-05-17 20:53:34 -07001887 chan->unacked_frames++;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03001888 chan->frames_sent++;
Mat Martineau18a48e72012-05-17 20:53:34 -07001889 sent++;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001890
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001891 if (skb_queue_is_last(&chan->tx_q, skb))
1892 chan->tx_send_head = NULL;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001893 else
Gustavo F. Padovan58d35f82011-04-04 16:16:44 -03001894 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
Mat Martineau18a48e72012-05-17 20:53:34 -07001895
1896 l2cap_do_send(chan, tx_skb);
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001897 BT_DBG("Sent txseq %u", control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001898 }
1899
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03001900 BT_DBG("Sent %d, %u unacked, %u in ERTM queue", sent,
1901 chan->unacked_frames, skb_queue_len(&chan->tx_q));
Mat Martineau18a48e72012-05-17 20:53:34 -07001902
1903 return sent;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001904}
1905
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001906static void l2cap_ertm_resend(struct l2cap_chan *chan)
1907{
1908 struct l2cap_ctrl control;
1909 struct sk_buff *skb;
1910 struct sk_buff *tx_skb;
1911 u16 seq;
1912
1913 BT_DBG("chan %p", chan);
1914
1915 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
1916 return;
1917
Mat Martineaub99e13a2012-10-23 15:24:19 -07001918 if (__chan_is_moving(chan))
1919 return;
1920
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001921 while (chan->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
1922 seq = l2cap_seq_list_pop(&chan->retrans_list);
1923
1924 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, seq);
1925 if (!skb) {
1926 BT_DBG("Error: Can't retransmit seq %d, frame missing",
Gustavo Padovan2d792812012-10-06 10:07:01 +01001927 seq);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001928 continue;
1929 }
1930
1931 bt_cb(skb)->control.retries++;
1932 control = bt_cb(skb)->control;
1933
1934 if (chan->max_tx != 0 &&
1935 bt_cb(skb)->control.retries > chan->max_tx) {
1936 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02001937 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001938 l2cap_seq_list_clear(&chan->retrans_list);
1939 break;
1940 }
1941
1942 control.reqseq = chan->buffer_seq;
1943 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1944 control.final = 1;
1945 else
1946 control.final = 0;
1947
1948 if (skb_cloned(skb)) {
1949 /* Cloned sk_buffs are read-only, so we need a
1950 * writeable copy
1951 */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001952 tx_skb = skb_copy(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001953 } else {
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03001954 tx_skb = skb_clone(skb, GFP_KERNEL);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001955 }
1956
1957 if (!tx_skb) {
1958 l2cap_seq_list_clear(&chan->retrans_list);
1959 break;
1960 }
1961
1962 /* Update skb contents */
1963 if (test_bit(FLAG_EXT_CTRL, &chan->flags)) {
1964 put_unaligned_le32(__pack_extended_control(&control),
1965 tx_skb->data + L2CAP_HDR_SIZE);
1966 } else {
1967 put_unaligned_le16(__pack_enhanced_control(&control),
1968 tx_skb->data + L2CAP_HDR_SIZE);
1969 }
1970
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001971 /* Update FCS */
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001972 if (chan->fcs == L2CAP_FCS_CRC16) {
Lukasz Rymanowski13cac152014-08-14 09:35:34 +02001973 u16 fcs = crc16(0, (u8 *) tx_skb->data,
1974 tx_skb->len - L2CAP_FCS_SIZE);
1975 put_unaligned_le16(fcs, skb_tail_pointer(tx_skb) -
1976 L2CAP_FCS_SIZE);
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001977 }
1978
1979 l2cap_do_send(chan, tx_skb);
1980
1981 BT_DBG("Resent txseq %d", control.txseq);
1982
1983 chan->last_acked_seq = chan->buffer_seq;
1984 }
1985}
1986
Mat Martineauf80842a2012-05-17 20:53:46 -07001987static void l2cap_retransmit(struct l2cap_chan *chan,
1988 struct l2cap_ctrl *control)
1989{
1990 BT_DBG("chan %p, control %p", chan, control);
1991
1992 l2cap_seq_list_append(&chan->retrans_list, control->reqseq);
1993 l2cap_ertm_resend(chan);
1994}
1995
Mat Martineaud2a7ac52012-05-17 20:53:42 -07001996static void l2cap_retransmit_all(struct l2cap_chan *chan,
1997 struct l2cap_ctrl *control)
1998{
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07001999 struct sk_buff *skb;
2000
2001 BT_DBG("chan %p, control %p", chan, control);
2002
2003 if (control->poll)
2004 set_bit(CONN_SEND_FBIT, &chan->conn_state);
2005
2006 l2cap_seq_list_clear(&chan->retrans_list);
2007
2008 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
2009 return;
2010
2011 if (chan->unacked_frames) {
2012 skb_queue_walk(&chan->tx_q, skb) {
2013 if (bt_cb(skb)->control.txseq == control->reqseq ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01002014 skb == chan->tx_send_head)
Mat Martineaue1fbd4c2012-05-17 20:53:43 -07002015 break;
2016 }
2017
2018 skb_queue_walk_from(&chan->tx_q, skb) {
2019 if (skb == chan->tx_send_head)
2020 break;
2021
2022 l2cap_seq_list_append(&chan->retrans_list,
2023 bt_cb(skb)->control.txseq);
2024 }
2025
2026 l2cap_ertm_resend(chan);
2027 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002028}
2029
Szymon Jancb17e73b2012-01-11 10:59:47 +01002030static void l2cap_send_ack(struct l2cap_chan *chan)
2031{
Mat Martineau0a0aba42012-05-17 20:53:39 -07002032 struct l2cap_ctrl control;
2033 u16 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
2034 chan->last_acked_seq);
2035 int threshold;
2036
2037 BT_DBG("chan %p last_acked_seq %d buffer_seq %d",
2038 chan, chan->last_acked_seq, chan->buffer_seq);
2039
2040 memset(&control, 0, sizeof(control));
2041 control.sframe = 1;
2042
2043 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
2044 chan->rx_state == L2CAP_RX_STATE_RECV) {
2045 __clear_ack_timer(chan);
2046 control.super = L2CAP_SUPER_RNR;
2047 control.reqseq = chan->buffer_seq;
2048 l2cap_send_sframe(chan, &control);
2049 } else {
2050 if (!test_bit(CONN_REMOTE_BUSY, &chan->conn_state)) {
2051 l2cap_ertm_send(chan);
2052 /* If any i-frames were sent, they included an ack */
2053 if (chan->buffer_seq == chan->last_acked_seq)
2054 frames_to_ack = 0;
2055 }
2056
Mat Martineauc20f8e32012-07-10 05:47:07 -07002057 /* Ack now if the window is 3/4ths full.
Mat Martineau0a0aba42012-05-17 20:53:39 -07002058 * Calculate without mul or div
2059 */
Mat Martineauc20f8e32012-07-10 05:47:07 -07002060 threshold = chan->ack_win;
Mat Martineau0a0aba42012-05-17 20:53:39 -07002061 threshold += threshold << 1;
2062 threshold >>= 2;
2063
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002064 BT_DBG("frames_to_ack %u, threshold %d", frames_to_ack,
Mat Martineau0a0aba42012-05-17 20:53:39 -07002065 threshold);
2066
2067 if (frames_to_ack >= threshold) {
2068 __clear_ack_timer(chan);
2069 control.super = L2CAP_SUPER_RR;
2070 control.reqseq = chan->buffer_seq;
2071 l2cap_send_sframe(chan, &control);
2072 frames_to_ack = 0;
2073 }
2074
2075 if (frames_to_ack)
2076 __set_ack_timer(chan);
2077 }
Szymon Jancb17e73b2012-01-11 10:59:47 +01002078}
2079
Gustavo F. Padovan04124682012-03-08 01:25:00 -03002080static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan,
2081 struct msghdr *msg, int len,
2082 int count, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083{
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002084 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002085 struct sk_buff **frag;
Gustavo Padovan90338942012-04-06 20:15:47 -03002086 int sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002087
Jukka Rissanen04988782014-06-18 16:37:07 +03002088 if (chan->ops->memcpy_fromiovec(chan, skb_put(skb, count),
2089 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002090 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091
2092 sent += count;
2093 len -= count;
2094
2095 /* Continuation fragments (no L2CAP header) */
2096 frag = &skb_shinfo(skb)->frag_list;
2097 while (len) {
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002098 struct sk_buff *tmp;
2099
Linus Torvalds1da177e2005-04-16 15:20:36 -07002100 count = min_t(unsigned int, conn->mtu, len);
2101
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002102 tmp = chan->ops->alloc_skb(chan, 0, count,
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002103 msg->msg_flags & MSG_DONTWAIT);
2104 if (IS_ERR(tmp))
2105 return PTR_ERR(tmp);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002106
Gustavo Padovanfbe00702012-05-15 13:22:55 -03002107 *frag = tmp;
2108
Jukka Rissanen04988782014-06-18 16:37:07 +03002109 if (chan->ops->memcpy_fromiovec(chan, skb_put(*frag, count),
2110 msg->msg_iov, count))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002111 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002112
2113 sent += count;
2114 len -= count;
2115
Gustavo Padovan2d0ed3d2012-05-11 13:16:12 -03002116 skb->len += (*frag)->len;
2117 skb->data_len += (*frag)->len;
2118
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119 frag = &(*frag)->next;
2120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002123}
Linus Torvalds1da177e2005-04-16 15:20:36 -07002124
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002125static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002126 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002127{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002128 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002129 struct sk_buff *skb;
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002130 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002131 struct l2cap_hdr *lh;
2132
Marcel Holtmann8d463212014-06-05 15:22:51 +02002133 BT_DBG("chan %p psm 0x%2.2x len %zu", chan,
2134 __le16_to_cpu(chan->psm), len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002135
2136 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002137
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002138 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002139 msg->msg_flags & MSG_DONTWAIT);
2140 if (IS_ERR(skb))
2141 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002142
2143 /* Create L2CAP header */
2144 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002145 lh->cid = cpu_to_le16(chan->dcid);
Andrei Emeltchenkodaf6a78c2012-05-03 10:55:52 +03002146 lh->len = cpu_to_le16(len + L2CAP_PSMLEN_SIZE);
Marcel Holtmann43b1b8d2013-10-12 06:01:26 -07002147 put_unaligned(chan->psm, (__le16 *) skb_put(skb, L2CAP_PSMLEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002148
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002149 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002150 if (unlikely(err < 0)) {
2151 kfree_skb(skb);
2152 return ERR_PTR(err);
2153 }
2154 return skb;
2155}
2156
Luiz Augusto von Dentz5e59b792011-11-01 10:58:57 +02002157static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
Marcel Holtmann8d463212014-06-05 15:22:51 +02002158 struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002159{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002160 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002161 struct sk_buff *skb;
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002162 int err, count;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002163 struct l2cap_hdr *lh;
2164
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002165 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002166
Gustavo Padovanf2ba7fa2012-05-03 04:54:21 -03002167 count = min_t(unsigned int, (conn->mtu - L2CAP_HDR_SIZE), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002168
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002169 skb = chan->ops->alloc_skb(chan, L2CAP_HDR_SIZE, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002170 msg->msg_flags & MSG_DONTWAIT);
2171 if (IS_ERR(skb))
2172 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002173
2174 /* Create L2CAP header */
2175 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002176 lh->cid = cpu_to_le16(chan->dcid);
Gustavo Padovan6ff9b5e2012-05-02 11:56:17 -03002177 lh->len = cpu_to_le16(len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002178
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002179 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002180 if (unlikely(err < 0)) {
2181 kfree_skb(skb);
2182 return ERR_PTR(err);
2183 }
2184 return skb;
2185}
2186
Luiz Augusto von Dentzab0ff762011-09-12 20:00:50 +03002187static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002188 struct msghdr *msg, size_t len,
2189 u16 sdulen)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002190{
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03002191 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002192 struct sk_buff *skb;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002193 int err, count, hlen;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002194 struct l2cap_hdr *lh;
2195
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002196 BT_DBG("chan %p len %zu", chan, len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002197
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03002198 if (!conn)
2199 return ERR_PTR(-ENOTCONN);
2200
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002201 hlen = __ertm_hdr_size(chan);
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03002202
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002203 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002204 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002205
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03002206 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002207 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03002208
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002209 count = min_t(unsigned int, (conn->mtu - hlen), len);
Andrei Emeltchenko2f7719c2012-01-20 14:08:03 +02002210
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002211 skb = chan->ops->alloc_skb(chan, hlen, count,
Gustavo Padovan90338942012-04-06 20:15:47 -03002212 msg->msg_flags & MSG_DONTWAIT);
2213 if (IS_ERR(skb))
2214 return skb;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002215
2216 /* Create L2CAP header */
2217 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03002218 lh->cid = cpu_to_le16(chan->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002219 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002220
Mat Martineau18a48e72012-05-17 20:53:34 -07002221 /* Control header is populated later */
2222 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2223 put_unaligned_le32(0, skb_put(skb, L2CAP_EXT_CTRL_SIZE));
2224 else
2225 put_unaligned_le16(0, skb_put(skb, L2CAP_ENH_CTRL_SIZE));
Andrei Emeltchenko88843ab2011-10-17 12:19:56 +03002226
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002227 if (sdulen)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03002228 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002229
Andrei Emeltchenko0952a572012-01-13 17:21:43 +02002230 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002231 if (unlikely(err < 0)) {
2232 kfree_skb(skb);
2233 return ERR_PTR(err);
2234 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03002235
Mat Martineau18a48e72012-05-17 20:53:34 -07002236 bt_cb(skb)->control.fcs = chan->fcs;
Mat Martineau3ce35142012-04-25 16:36:14 -07002237 bt_cb(skb)->control.retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03002238 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002239}
2240
Mat Martineau94122bb2012-05-02 09:42:02 -07002241static int l2cap_segment_sdu(struct l2cap_chan *chan,
2242 struct sk_buff_head *seg_queue,
2243 struct msghdr *msg, size_t len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002244{
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002245 struct sk_buff *skb;
Mat Martineau94122bb2012-05-02 09:42:02 -07002246 u16 sdu_len;
2247 size_t pdu_len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002248 u8 sar;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002249
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002250 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002251
Mat Martineau94122bb2012-05-02 09:42:02 -07002252 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2253 * so fragmented skbs are not used. The HCI layer's handling
2254 * of fragmented skbs is not compatible with ERTM's queueing.
2255 */
2256
2257 /* PDU size is derived from the HCI MTU */
2258 pdu_len = chan->conn->mtu;
2259
Mat Martineaua5495742012-10-23 15:24:21 -07002260 /* Constrain PDU size for BR/EDR connections */
2261 if (!chan->hs_hcon)
2262 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
Mat Martineau94122bb2012-05-02 09:42:02 -07002263
2264 /* Adjust for largest possible L2CAP overhead. */
Gustavo Padovan35d401d2012-05-25 18:57:05 -03002265 if (chan->fcs)
2266 pdu_len -= L2CAP_FCS_SIZE;
2267
Gustavo Padovanba7aa642012-05-29 13:29:16 -03002268 pdu_len -= __ertm_hdr_size(chan);
Mat Martineau94122bb2012-05-02 09:42:02 -07002269
2270 /* Remote device may have requested smaller PDUs */
2271 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2272
2273 if (len <= pdu_len) {
2274 sar = L2CAP_SAR_UNSEGMENTED;
2275 sdu_len = 0;
2276 pdu_len = len;
2277 } else {
2278 sar = L2CAP_SAR_START;
2279 sdu_len = len;
Mat Martineau94122bb2012-05-02 09:42:02 -07002280 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002281
2282 while (len > 0) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002283 skb = l2cap_create_iframe_pdu(chan, msg, pdu_len, sdu_len);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002284
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002285 if (IS_ERR(skb)) {
Mat Martineau94122bb2012-05-02 09:42:02 -07002286 __skb_queue_purge(seg_queue);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002287 return PTR_ERR(skb);
2288 }
2289
Mat Martineau94122bb2012-05-02 09:42:02 -07002290 bt_cb(skb)->control.sar = sar;
2291 __skb_queue_tail(seg_queue, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002292
Mat Martineau94122bb2012-05-02 09:42:02 -07002293 len -= pdu_len;
Lukasz Rymanowski069cb272014-08-13 16:01:41 +02002294 if (sdu_len)
Mat Martineau94122bb2012-05-02 09:42:02 -07002295 sdu_len = 0;
Mat Martineau94122bb2012-05-02 09:42:02 -07002296
2297 if (len <= pdu_len) {
2298 sar = L2CAP_SAR_END;
2299 pdu_len = len;
2300 } else {
2301 sar = L2CAP_SAR_CONTINUE;
2302 }
2303 }
2304
Gustavo Padovanf0f62792012-05-29 13:29:17 -03002305 return 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03002306}
2307
Johan Hedberg177f8f22013-05-31 17:54:51 +03002308static struct sk_buff *l2cap_create_le_flowctl_pdu(struct l2cap_chan *chan,
2309 struct msghdr *msg,
2310 size_t len, u16 sdulen)
2311{
2312 struct l2cap_conn *conn = chan->conn;
2313 struct sk_buff *skb;
2314 int err, count, hlen;
2315 struct l2cap_hdr *lh;
2316
2317 BT_DBG("chan %p len %zu", chan, len);
2318
2319 if (!conn)
2320 return ERR_PTR(-ENOTCONN);
2321
2322 hlen = L2CAP_HDR_SIZE;
2323
2324 if (sdulen)
2325 hlen += L2CAP_SDULEN_SIZE;
2326
2327 count = min_t(unsigned int, (conn->mtu - hlen), len);
2328
Marcel Holtmannd9fbd022014-06-08 11:22:28 +02002329 skb = chan->ops->alloc_skb(chan, hlen, count,
Johan Hedberg177f8f22013-05-31 17:54:51 +03002330 msg->msg_flags & MSG_DONTWAIT);
2331 if (IS_ERR(skb))
2332 return skb;
2333
2334 /* Create L2CAP header */
2335 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2336 lh->cid = cpu_to_le16(chan->dcid);
2337 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
2338
2339 if (sdulen)
2340 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
2341
2342 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
2343 if (unlikely(err < 0)) {
2344 kfree_skb(skb);
2345 return ERR_PTR(err);
2346 }
2347
2348 return skb;
2349}
2350
2351static int l2cap_segment_le_sdu(struct l2cap_chan *chan,
2352 struct sk_buff_head *seg_queue,
2353 struct msghdr *msg, size_t len)
2354{
2355 struct sk_buff *skb;
2356 size_t pdu_len;
2357 u16 sdu_len;
2358
2359 BT_DBG("chan %p, msg %p, len %zu", chan, msg, len);
2360
2361 pdu_len = chan->conn->mtu - L2CAP_HDR_SIZE;
2362
2363 pdu_len = min_t(size_t, pdu_len, chan->remote_mps);
2364
2365 sdu_len = len;
2366 pdu_len -= L2CAP_SDULEN_SIZE;
2367
2368 while (len > 0) {
2369 if (len <= pdu_len)
2370 pdu_len = len;
2371
2372 skb = l2cap_create_le_flowctl_pdu(chan, msg, pdu_len, sdu_len);
2373 if (IS_ERR(skb)) {
2374 __skb_queue_purge(seg_queue);
2375 return PTR_ERR(skb);
2376 }
2377
2378 __skb_queue_tail(seg_queue, skb);
2379
2380 len -= pdu_len;
2381
2382 if (sdu_len) {
2383 sdu_len = 0;
2384 pdu_len += L2CAP_SDULEN_SIZE;
2385 }
2386 }
2387
2388 return 0;
2389}
2390
Marcel Holtmann8d463212014-06-05 15:22:51 +02002391int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002392{
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002393 struct sk_buff *skb;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002394 int err;
Mat Martineau94122bb2012-05-02 09:42:02 -07002395 struct sk_buff_head seg_queue;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002396
Seung-Woo Kim31e8ce82013-11-05 18:46:33 +09002397 if (!chan->conn)
2398 return -ENOTCONN;
2399
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002400 /* Connectionless channel */
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002401 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
Marcel Holtmann8d463212014-06-05 15:22:51 +02002402 skb = l2cap_create_connless_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002403 if (IS_ERR(skb))
2404 return PTR_ERR(skb);
2405
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002406 /* Channel lock is released before requesting new skb and then
2407 * reacquired thus we need to recheck channel state.
2408 */
2409 if (chan->state != BT_CONNECTED) {
2410 kfree_skb(skb);
2411 return -ENOTCONN;
2412 }
2413
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002414 l2cap_do_send(chan, skb);
2415 return len;
2416 }
2417
2418 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03002419 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedberg177f8f22013-05-31 17:54:51 +03002420 /* Check outgoing MTU */
2421 if (len > chan->omtu)
2422 return -EMSGSIZE;
2423
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002424 if (!chan->tx_credits)
2425 return -EAGAIN;
2426
Johan Hedberg177f8f22013-05-31 17:54:51 +03002427 __skb_queue_head_init(&seg_queue);
2428
2429 err = l2cap_segment_le_sdu(chan, &seg_queue, msg, len);
2430
2431 if (chan->state != BT_CONNECTED) {
2432 __skb_queue_purge(&seg_queue);
2433 err = -ENOTCONN;
2434 }
2435
2436 if (err)
2437 return err;
2438
2439 skb_queue_splice_tail_init(&seg_queue, &chan->tx_q);
2440
2441 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
2442 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
2443 chan->tx_credits--;
2444 }
2445
2446 if (!chan->tx_credits)
2447 chan->ops->suspend(chan);
2448
2449 err = len;
2450
2451 break;
2452
Johan Hedbergfad5fc82013-12-05 09:45:01 +02002453 case L2CAP_MODE_BASIC:
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002454 /* Check outgoing MTU */
2455 if (len > chan->omtu)
2456 return -EMSGSIZE;
2457
2458 /* Create a basic PDU */
Marcel Holtmann8d463212014-06-05 15:22:51 +02002459 skb = l2cap_create_basic_pdu(chan, msg, len);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002460 if (IS_ERR(skb))
2461 return PTR_ERR(skb);
2462
Andrzej Kaczmarekede81a22014-02-25 17:16:22 +01002463 /* Channel lock is released before requesting new skb and then
2464 * reacquired thus we need to recheck channel state.
2465 */
2466 if (chan->state != BT_CONNECTED) {
2467 kfree_skb(skb);
2468 return -ENOTCONN;
2469 }
2470
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002471 l2cap_do_send(chan, skb);
2472 err = len;
2473 break;
2474
2475 case L2CAP_MODE_ERTM:
2476 case L2CAP_MODE_STREAMING:
Mat Martineau94122bb2012-05-02 09:42:02 -07002477 /* Check outgoing MTU */
2478 if (len > chan->omtu) {
2479 err = -EMSGSIZE;
2480 break;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002481 }
2482
Mat Martineau94122bb2012-05-02 09:42:02 -07002483 __skb_queue_head_init(&seg_queue);
2484
2485 /* Do segmentation before calling in to the state machine,
2486 * since it's possible to block while waiting for memory
2487 * allocation.
2488 */
2489 err = l2cap_segment_sdu(chan, &seg_queue, msg, len);
2490
2491 /* The channel could have been closed while segmenting,
2492 * check that it is still connected.
2493 */
2494 if (chan->state != BT_CONNECTED) {
2495 __skb_queue_purge(&seg_queue);
2496 err = -ENOTCONN;
2497 }
2498
2499 if (err)
2500 break;
2501
Mat Martineau37339372012-05-17 20:53:33 -07002502 if (chan->mode == L2CAP_MODE_ERTM)
Gustavo Padovand6603662012-05-21 13:58:22 -03002503 l2cap_tx(chan, NULL, &seg_queue, L2CAP_EV_DATA_REQUEST);
Mat Martineau37339372012-05-17 20:53:33 -07002504 else
Gustavo Padovand6603662012-05-21 13:58:22 -03002505 l2cap_streaming_send(chan, &seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002506
Gustavo Padovand6603662012-05-21 13:58:22 -03002507 err = len;
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002508
Mat Martineau94122bb2012-05-02 09:42:02 -07002509 /* If the skbs were not queued for sending, they'll still be in
2510 * seg_queue and need to be purged.
2511 */
2512 __skb_queue_purge(&seg_queue);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002513 break;
2514
2515 default:
2516 BT_DBG("bad state %1.1x", chan->mode);
2517 err = -EBADFD;
2518 }
2519
2520 return err;
2521}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03002522EXPORT_SYMBOL_GPL(l2cap_chan_send);
Gustavo F. Padovan9a91a042011-04-28 18:50:17 -03002523
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002524static void l2cap_send_srej(struct l2cap_chan *chan, u16 txseq)
2525{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002526 struct l2cap_ctrl control;
2527 u16 seq;
2528
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002529 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002530
2531 memset(&control, 0, sizeof(control));
2532 control.sframe = 1;
2533 control.super = L2CAP_SUPER_SREJ;
2534
2535 for (seq = chan->expected_tx_seq; seq != txseq;
2536 seq = __next_seq(chan, seq)) {
2537 if (!l2cap_ertm_seq_in_queue(&chan->srej_q, seq)) {
2538 control.reqseq = seq;
2539 l2cap_send_sframe(chan, &control);
2540 l2cap_seq_list_append(&chan->srej_list, seq);
2541 }
2542 }
2543
2544 chan->expected_tx_seq = __next_seq(chan, txseq);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002545}
2546
2547static void l2cap_send_srej_tail(struct l2cap_chan *chan)
2548{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002549 struct l2cap_ctrl control;
2550
2551 BT_DBG("chan %p", chan);
2552
2553 if (chan->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2554 return;
2555
2556 memset(&control, 0, sizeof(control));
2557 control.sframe = 1;
2558 control.super = L2CAP_SUPER_SREJ;
2559 control.reqseq = chan->srej_list.tail;
2560 l2cap_send_sframe(chan, &control);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002561}
2562
2563static void l2cap_send_srej_list(struct l2cap_chan *chan, u16 txseq)
2564{
Mat Martineaubed68bd2012-05-17 20:53:44 -07002565 struct l2cap_ctrl control;
2566 u16 initial_head;
2567 u16 seq;
2568
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002569 BT_DBG("chan %p, txseq %u", chan, txseq);
Mat Martineaubed68bd2012-05-17 20:53:44 -07002570
2571 memset(&control, 0, sizeof(control));
2572 control.sframe = 1;
2573 control.super = L2CAP_SUPER_SREJ;
2574
2575 /* Capture initial list head to allow only one pass through the list. */
2576 initial_head = chan->srej_list.head;
2577
2578 do {
2579 seq = l2cap_seq_list_pop(&chan->srej_list);
2580 if (seq == txseq || seq == L2CAP_SEQ_LIST_CLEAR)
2581 break;
2582
2583 control.reqseq = seq;
2584 l2cap_send_sframe(chan, &control);
2585 l2cap_seq_list_append(&chan->srej_list, seq);
2586 } while (chan->srej_list.head != initial_head);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07002587}
2588
Mat Martineau608bcc62012-05-17 20:53:32 -07002589static void l2cap_process_reqseq(struct l2cap_chan *chan, u16 reqseq)
2590{
2591 struct sk_buff *acked_skb;
2592 u16 ackseq;
2593
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002594 BT_DBG("chan %p, reqseq %u", chan, reqseq);
Mat Martineau608bcc62012-05-17 20:53:32 -07002595
2596 if (chan->unacked_frames == 0 || reqseq == chan->expected_ack_seq)
2597 return;
2598
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002599 BT_DBG("expected_ack_seq %u, unacked_frames %u",
Mat Martineau608bcc62012-05-17 20:53:32 -07002600 chan->expected_ack_seq, chan->unacked_frames);
2601
2602 for (ackseq = chan->expected_ack_seq; ackseq != reqseq;
2603 ackseq = __next_seq(chan, ackseq)) {
2604
2605 acked_skb = l2cap_ertm_seq_in_queue(&chan->tx_q, ackseq);
2606 if (acked_skb) {
2607 skb_unlink(acked_skb, &chan->tx_q);
2608 kfree_skb(acked_skb);
2609 chan->unacked_frames--;
2610 }
2611 }
2612
2613 chan->expected_ack_seq = reqseq;
2614
2615 if (chan->unacked_frames == 0)
2616 __clear_retrans_timer(chan);
2617
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002618 BT_DBG("unacked_frames %u", chan->unacked_frames);
Mat Martineau608bcc62012-05-17 20:53:32 -07002619}
2620
2621static void l2cap_abort_rx_srej_sent(struct l2cap_chan *chan)
2622{
2623 BT_DBG("chan %p", chan);
2624
2625 chan->expected_tx_seq = chan->buffer_seq;
2626 l2cap_seq_list_clear(&chan->srej_list);
2627 skb_queue_purge(&chan->srej_q);
2628 chan->rx_state = L2CAP_RX_STATE_RECV;
2629}
2630
Gustavo Padovand6603662012-05-21 13:58:22 -03002631static void l2cap_tx_state_xmit(struct l2cap_chan *chan,
2632 struct l2cap_ctrl *control,
2633 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002634{
Mat Martineau608bcc62012-05-17 20:53:32 -07002635 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2636 event);
2637
2638 switch (event) {
2639 case L2CAP_EV_DATA_REQUEST:
2640 if (chan->tx_send_head == NULL)
2641 chan->tx_send_head = skb_peek(skbs);
2642
2643 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2644 l2cap_ertm_send(chan);
2645 break;
2646 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2647 BT_DBG("Enter LOCAL_BUSY");
2648 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2649
2650 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2651 /* The SREJ_SENT state must be aborted if we are to
2652 * enter the LOCAL_BUSY state.
2653 */
2654 l2cap_abort_rx_srej_sent(chan);
2655 }
2656
2657 l2cap_send_ack(chan);
2658
2659 break;
2660 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2661 BT_DBG("Exit LOCAL_BUSY");
2662 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2663
2664 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2665 struct l2cap_ctrl local_control;
2666
2667 memset(&local_control, 0, sizeof(local_control));
2668 local_control.sframe = 1;
2669 local_control.super = L2CAP_SUPER_RR;
2670 local_control.poll = 1;
2671 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002672 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002673
2674 chan->retry_count = 1;
2675 __set_monitor_timer(chan);
2676 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2677 }
2678 break;
2679 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2680 l2cap_process_reqseq(chan, control->reqseq);
2681 break;
2682 case L2CAP_EV_EXPLICIT_POLL:
2683 l2cap_send_rr_or_rnr(chan, 1);
2684 chan->retry_count = 1;
2685 __set_monitor_timer(chan);
2686 __clear_ack_timer(chan);
2687 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2688 break;
2689 case L2CAP_EV_RETRANS_TO:
2690 l2cap_send_rr_or_rnr(chan, 1);
2691 chan->retry_count = 1;
2692 __set_monitor_timer(chan);
2693 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2694 break;
2695 case L2CAP_EV_RECV_FBIT:
2696 /* Nothing to process */
2697 break;
2698 default:
2699 break;
2700 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002701}
2702
Gustavo Padovand6603662012-05-21 13:58:22 -03002703static void l2cap_tx_state_wait_f(struct l2cap_chan *chan,
2704 struct l2cap_ctrl *control,
2705 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002706{
Mat Martineau608bcc62012-05-17 20:53:32 -07002707 BT_DBG("chan %p, control %p, skbs %p, event %d", chan, control, skbs,
2708 event);
2709
2710 switch (event) {
2711 case L2CAP_EV_DATA_REQUEST:
2712 if (chan->tx_send_head == NULL)
2713 chan->tx_send_head = skb_peek(skbs);
2714 /* Queue data, but don't send. */
2715 skb_queue_splice_tail_init(skbs, &chan->tx_q);
2716 break;
2717 case L2CAP_EV_LOCAL_BUSY_DETECTED:
2718 BT_DBG("Enter LOCAL_BUSY");
2719 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2720
2721 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
2722 /* The SREJ_SENT state must be aborted if we are to
2723 * enter the LOCAL_BUSY state.
2724 */
2725 l2cap_abort_rx_srej_sent(chan);
2726 }
2727
2728 l2cap_send_ack(chan);
2729
2730 break;
2731 case L2CAP_EV_LOCAL_BUSY_CLEAR:
2732 BT_DBG("Exit LOCAL_BUSY");
2733 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
2734
2735 if (test_bit(CONN_RNR_SENT, &chan->conn_state)) {
2736 struct l2cap_ctrl local_control;
2737 memset(&local_control, 0, sizeof(local_control));
2738 local_control.sframe = 1;
2739 local_control.super = L2CAP_SUPER_RR;
2740 local_control.poll = 1;
2741 local_control.reqseq = chan->buffer_seq;
Mat Martineaua67d7f62012-05-17 20:53:35 -07002742 l2cap_send_sframe(chan, &local_control);
Mat Martineau608bcc62012-05-17 20:53:32 -07002743
2744 chan->retry_count = 1;
2745 __set_monitor_timer(chan);
2746 chan->tx_state = L2CAP_TX_STATE_WAIT_F;
2747 }
2748 break;
2749 case L2CAP_EV_RECV_REQSEQ_AND_FBIT:
2750 l2cap_process_reqseq(chan, control->reqseq);
2751
2752 /* Fall through */
2753
2754 case L2CAP_EV_RECV_FBIT:
2755 if (control && control->final) {
2756 __clear_monitor_timer(chan);
2757 if (chan->unacked_frames > 0)
2758 __set_retrans_timer(chan);
2759 chan->retry_count = 0;
2760 chan->tx_state = L2CAP_TX_STATE_XMIT;
2761 BT_DBG("recv fbit tx_state 0x2.2%x", chan->tx_state);
2762 }
2763 break;
2764 case L2CAP_EV_EXPLICIT_POLL:
2765 /* Ignore */
2766 break;
2767 case L2CAP_EV_MONITOR_TO:
2768 if (chan->max_tx == 0 || chan->retry_count < chan->max_tx) {
2769 l2cap_send_rr_or_rnr(chan, 1);
2770 __set_monitor_timer(chan);
2771 chan->retry_count++;
2772 } else {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02002773 l2cap_send_disconn_req(chan, ECONNABORTED);
Mat Martineau608bcc62012-05-17 20:53:32 -07002774 }
2775 break;
2776 default:
2777 break;
2778 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002779}
2780
Gustavo Padovand6603662012-05-21 13:58:22 -03002781static void l2cap_tx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
2782 struct sk_buff_head *skbs, u8 event)
Mat Martineau608bcc62012-05-17 20:53:32 -07002783{
Mat Martineau608bcc62012-05-17 20:53:32 -07002784 BT_DBG("chan %p, control %p, skbs %p, event %d, state %d",
2785 chan, control, skbs, event, chan->tx_state);
2786
2787 switch (chan->tx_state) {
2788 case L2CAP_TX_STATE_XMIT:
Gustavo Padovand6603662012-05-21 13:58:22 -03002789 l2cap_tx_state_xmit(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002790 break;
2791 case L2CAP_TX_STATE_WAIT_F:
Gustavo Padovand6603662012-05-21 13:58:22 -03002792 l2cap_tx_state_wait_f(chan, control, skbs, event);
Mat Martineau608bcc62012-05-17 20:53:32 -07002793 break;
2794 default:
2795 /* Ignore event */
2796 break;
2797 }
Mat Martineau608bcc62012-05-17 20:53:32 -07002798}
2799
Mat Martineau4b51dae92012-05-17 20:53:37 -07002800static void l2cap_pass_to_tx(struct l2cap_chan *chan,
2801 struct l2cap_ctrl *control)
2802{
2803 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002804 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_REQSEQ_AND_FBIT);
Mat Martineau4b51dae92012-05-17 20:53:37 -07002805}
2806
Mat Martineauf80842a2012-05-17 20:53:46 -07002807static void l2cap_pass_to_tx_fbit(struct l2cap_chan *chan,
2808 struct l2cap_ctrl *control)
2809{
2810 BT_DBG("chan %p, control %p", chan, control);
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03002811 l2cap_tx(chan, control, NULL, L2CAP_EV_RECV_FBIT);
Mat Martineauf80842a2012-05-17 20:53:46 -07002812}
2813
Linus Torvalds1da177e2005-04-16 15:20:36 -07002814/* Copy frame to all raw sockets on that connection */
2815static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2816{
Linus Torvalds1da177e2005-04-16 15:20:36 -07002817 struct sk_buff *nskb;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03002818 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819
2820 BT_DBG("conn %p", conn);
2821
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002822 mutex_lock(&conn->chan_lock);
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002823
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002824 list_for_each_entry(chan, &conn->chan_l, list) {
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03002825 if (chan->chan_type != L2CAP_CHAN_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002826 continue;
2827
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002828 /* Don't send frame to the channel it came from */
2829 if (bt_cb(skb)->chan == chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002830 continue;
Gustavo Padovan7f5396a2013-10-21 18:22:25 -02002831
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002832 nskb = skb_clone(skb, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03002833 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002834 continue;
Gustavo Padovan80b98022012-05-27 22:27:51 -03002835 if (chan->ops->recv(chan, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002836 kfree_skb(nskb);
2837 }
Gustavo F. Padovan3d57dc62011-12-17 10:56:45 -02002838
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02002839 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002840}
2841
2842/* ---- L2CAP signalling commands ---- */
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002843static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, u8 code,
2844 u8 ident, u16 dlen, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002845{
2846 struct sk_buff *skb, **frag;
2847 struct l2cap_cmd_hdr *cmd;
2848 struct l2cap_hdr *lh;
2849 int len, count;
2850
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002851 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %u",
2852 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002853
Anderson Lizardo300b9622013-06-02 16:30:40 -04002854 if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE)
2855 return NULL;
2856
Linus Torvalds1da177e2005-04-16 15:20:36 -07002857 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2858 count = min_t(unsigned int, conn->mtu, len);
2859
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002860 skb = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002861 if (!skb)
2862 return NULL;
2863
2864 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002865 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002866
2867 if (conn->hcon->type == LE_LINK)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002868 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002869 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002870 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002871
2872 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2873 cmd->code = code;
2874 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002875 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002876
2877 if (dlen) {
2878 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2879 memcpy(skb_put(skb, count), data, count);
2880 data += count;
2881 }
2882
2883 len -= skb->len;
2884
2885 /* Continuation fragments (no L2CAP header) */
2886 frag = &skb_shinfo(skb)->frag_list;
2887 while (len) {
2888 count = min_t(unsigned int, conn->mtu, len);
2889
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03002890 *frag = bt_skb_alloc(count, GFP_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002891 if (!*frag)
2892 goto fail;
2893
2894 memcpy(skb_put(*frag, count), data, count);
2895
2896 len -= count;
2897 data += count;
2898
2899 frag = &(*frag)->next;
2900 }
2901
2902 return skb;
2903
2904fail:
2905 kfree_skb(skb);
2906 return NULL;
2907}
2908
Gustavo Padovan2d792812012-10-06 10:07:01 +01002909static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen,
2910 unsigned long *val)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002911{
2912 struct l2cap_conf_opt *opt = *ptr;
2913 int len;
2914
2915 len = L2CAP_CONF_OPT_SIZE + opt->len;
2916 *ptr += len;
2917
2918 *type = opt->type;
2919 *olen = opt->len;
2920
2921 switch (opt->len) {
2922 case 1:
2923 *val = *((u8 *) opt->val);
2924 break;
2925
2926 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002927 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002928 break;
2929
2930 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002931 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002932 break;
2933
2934 default:
2935 *val = (unsigned long) opt->val;
2936 break;
2937 }
2938
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002939 BT_DBG("type 0x%2.2x len %u val 0x%lx", *type, opt->len, *val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002940 return len;
2941}
2942
Linus Torvalds1da177e2005-04-16 15:20:36 -07002943static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2944{
2945 struct l2cap_conf_opt *opt = *ptr;
2946
Andrei Emeltchenkob4400672012-07-10 15:27:49 +03002947 BT_DBG("type 0x%2.2x len %u val 0x%lx", type, len, val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002948
2949 opt->type = type;
2950 opt->len = len;
2951
2952 switch (len) {
2953 case 1:
2954 *((u8 *) opt->val) = val;
2955 break;
2956
2957 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002958 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002959 break;
2960
2961 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002962 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002963 break;
2964
2965 default:
2966 memcpy(opt->val, (void *) val, len);
2967 break;
2968 }
2969
2970 *ptr += L2CAP_CONF_OPT_SIZE + len;
2971}
2972
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002973static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
2974{
2975 struct l2cap_conf_efs efs;
2976
Szymon Janc1ec918c2011-11-16 09:32:21 +01002977 switch (chan->mode) {
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002978 case L2CAP_MODE_ERTM:
2979 efs.id = chan->local_id;
2980 efs.stype = chan->local_stype;
2981 efs.msdu = cpu_to_le16(chan->local_msdu);
2982 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07002983 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2984 efs.flush_to = cpu_to_le32(L2CAP_EFS_DEFAULT_FLUSH_TO);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03002985 break;
2986
2987 case L2CAP_MODE_STREAMING:
2988 efs.id = 1;
2989 efs.stype = L2CAP_SERV_BESTEFFORT;
2990 efs.msdu = cpu_to_le16(chan->local_msdu);
2991 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2992 efs.acc_lat = 0;
2993 efs.flush_to = 0;
2994 break;
2995
2996 default:
2997 return;
2998 }
2999
3000 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
Andrei Emeltchenko8936fa62012-10-08 11:14:41 +03003001 (unsigned long) &efs);
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003002}
3003
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003004static void l2cap_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003005{
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003006 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
Mat Martineau03625202012-05-17 20:53:51 -07003007 ack_timer.work);
3008 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003009
Gustavo F. Padovan2fb9b3d2011-12-22 16:56:05 -02003010 BT_DBG("chan %p", chan);
3011
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003012 l2cap_chan_lock(chan);
3013
Mat Martineau03625202012-05-17 20:53:51 -07003014 frames_to_ack = __seq_offset(chan, chan->buffer_seq,
3015 chan->last_acked_seq);
3016
3017 if (frames_to_ack)
3018 l2cap_send_rr_or_rnr(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003019
3020 l2cap_chan_unlock(chan);
Szymon Janc09bfb2e2012-01-11 10:59:49 +01003021 l2cap_chan_put(chan);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03003022}
3023
Andrei Emeltchenko466f8002012-05-29 13:59:01 +03003024int l2cap_ertm_init(struct l2cap_chan *chan)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003025{
Mat Martineau3c588192012-04-11 10:48:42 -07003026 int err;
3027
Mat Martineau105bdf92012-04-27 16:50:48 -07003028 chan->next_tx_seq = 0;
3029 chan->expected_tx_seq = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003030 chan->expected_ack_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003031 chan->unacked_frames = 0;
Gustavo F. Padovan42e5c802011-03-25 19:58:34 -03003032 chan->buffer_seq = 0;
Gustavo F. Padovan6a026612011-04-01 00:38:50 -03003033 chan->frames_sent = 0;
Mat Martineau105bdf92012-04-27 16:50:48 -07003034 chan->last_acked_seq = 0;
3035 chan->sdu = NULL;
3036 chan->sdu_last_frag = NULL;
3037 chan->sdu_len = 0;
3038
Mat Martineaud34c34f2012-05-14 14:49:27 -07003039 skb_queue_head_init(&chan->tx_q);
3040
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003041 chan->local_amp_id = AMP_ID_BREDR;
3042 chan->move_id = AMP_ID_BREDR;
Mat Martineau08333282012-10-23 15:24:06 -07003043 chan->move_state = L2CAP_MOVE_STABLE;
3044 chan->move_role = L2CAP_MOVE_ROLE_NONE;
3045
Mat Martineau105bdf92012-04-27 16:50:48 -07003046 if (chan->mode != L2CAP_MODE_ERTM)
3047 return 0;
3048
3049 chan->rx_state = L2CAP_RX_STATE_RECV;
3050 chan->tx_state = L2CAP_TX_STATE_XMIT;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003051
Gustavo F. Padovan721c4182011-06-23 19:29:58 -03003052 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
3053 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
3054 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003055
Gustavo F. Padovanf1c67752011-03-25 20:36:10 -03003056 skb_queue_head_init(&chan->srej_q);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03003057
Mat Martineau3c588192012-04-11 10:48:42 -07003058 err = l2cap_seq_list_init(&chan->srej_list, chan->tx_win);
3059 if (err < 0)
3060 return err;
3061
Mat Martineau9dc9aff2012-05-17 16:20:14 -07003062 err = l2cap_seq_list_init(&chan->retrans_list, chan->remote_tx_win);
3063 if (err < 0)
3064 l2cap_seq_list_free(&chan->srej_list);
3065
3066 return err;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003067}
3068
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003069static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3070{
3071 switch (mode) {
3072 case L2CAP_MODE_STREAMING:
3073 case L2CAP_MODE_ERTM:
3074 if (l2cap_mode_supported(mode, remote_feat_mask))
3075 return mode;
3076 /* fall through */
3077 default:
3078 return L2CAP_MODE_BASIC;
3079 }
3080}
3081
Marcel Holtmann848566b2013-10-01 22:59:22 -07003082static inline bool __l2cap_ews_supported(struct l2cap_conn *conn)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003083{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003084 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003085}
3086
Marcel Holtmann848566b2013-10-01 22:59:22 -07003087static inline bool __l2cap_efs_supported(struct l2cap_conn *conn)
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003088{
Marcel Holtmann848566b2013-10-01 22:59:22 -07003089 return conn->hs_enabled && conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003090}
3091
Mat Martineau36c86c82012-10-23 15:24:20 -07003092static void __l2cap_set_ertm_timeouts(struct l2cap_chan *chan,
3093 struct l2cap_conf_rfc *rfc)
3094{
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003095 if (chan->local_amp_id != AMP_ID_BREDR && chan->hs_hcon) {
Mat Martineau36c86c82012-10-23 15:24:20 -07003096 u64 ertm_to = chan->hs_hcon->hdev->amp_be_flush_to;
3097
3098 /* Class 1 devices have must have ERTM timeouts
3099 * exceeding the Link Supervision Timeout. The
3100 * default Link Supervision Timeout for AMP
3101 * controllers is 10 seconds.
3102 *
3103 * Class 1 devices use 0xffffffff for their
3104 * best-effort flush timeout, so the clamping logic
3105 * will result in a timeout that meets the above
3106 * requirement. ERTM timeouts are 16-bit values, so
3107 * the maximum timeout is 65.535 seconds.
3108 */
3109
3110 /* Convert timeout to milliseconds and round */
3111 ertm_to = DIV_ROUND_UP_ULL(ertm_to, 1000);
3112
3113 /* This is the recommended formula for class 2 devices
3114 * that start ERTM timers when packets are sent to the
3115 * controller.
3116 */
3117 ertm_to = 3 * ertm_to + 500;
3118
3119 if (ertm_to > 0xffff)
3120 ertm_to = 0xffff;
3121
3122 rfc->retrans_timeout = cpu_to_le16((u16) ertm_to);
3123 rfc->monitor_timeout = rfc->retrans_timeout;
3124 } else {
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003125 rfc->retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3126 rfc->monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Mat Martineau36c86c82012-10-23 15:24:20 -07003127 }
3128}
3129
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003130static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
3131{
3132 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
Marcel Holtmann848566b2013-10-01 22:59:22 -07003133 __l2cap_ews_supported(chan->conn)) {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003134 /* use extended control field */
3135 set_bit(FLAG_EXT_CTRL, &chan->flags);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003136 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
3137 } else {
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003138 chan->tx_win = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003139 L2CAP_DEFAULT_TX_WINDOW);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003140 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
3141 }
Mat Martineauc20f8e32012-07-10 05:47:07 -07003142 chan->ack_win = chan->tx_win;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003143}
3144
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003145static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003146{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003147 struct l2cap_conf_req *req = data;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003148 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
Linus Torvalds1da177e2005-04-16 15:20:36 -07003149 void *ptr = req->data;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003150 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003151
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003152 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003153
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003154 if (chan->num_conf_req || chan->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003155 goto done;
3156
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003157 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003158 case L2CAP_MODE_STREAMING:
3159 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003160 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003161 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003162
Marcel Holtmann848566b2013-10-01 22:59:22 -07003163 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003164 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3165
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003166 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003167 default:
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003168 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003169 break;
3170 }
3171
3172done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003173 if (chan->imtu != L2CAP_DEFAULT_MTU)
3174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovan7990681c2011-01-24 16:01:43 -02003175
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003176 switch (chan->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003177 case L2CAP_MODE_BASIC:
Marcel Holtmann6fea7ad2014-07-09 11:53:35 +02003178 if (disable_ertm)
3179 break;
3180
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003181 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003182 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003183 break;
3184
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003185 rfc.mode = L2CAP_MODE_BASIC;
3186 rfc.txwin_size = 0;
3187 rfc.max_transmit = 0;
3188 rfc.retrans_timeout = 0;
3189 rfc.monitor_timeout = 0;
3190 rfc.max_pdu_size = 0;
3191
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003193 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003194 break;
3195
3196 case L2CAP_MODE_ERTM:
3197 rfc.mode = L2CAP_MODE_ERTM;
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003198 rfc.max_transmit = chan->max_tx;
Mat Martineau36c86c82012-10-23 15:24:20 -07003199
3200 __l2cap_set_ertm_timeouts(chan, &rfc);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003201
3202 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003203 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3204 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003205 rfc.max_pdu_size = cpu_to_le16(size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003206
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003207 l2cap_txwin_setup(chan);
3208
3209 rfc.txwin_size = min_t(u16, chan->tx_win,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003210 L2CAP_DEFAULT_TX_WINDOW);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003211
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003213 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003214
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003215 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3216 l2cap_add_opt_efs(&ptr, chan);
3217
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003218 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3219 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003220 chan->tx_win);
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003221
3222 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3223 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003224 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003225 chan->fcs = L2CAP_FCS_NONE;
3226 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3227 chan->fcs);
3228 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003229 break;
3230
3231 case L2CAP_MODE_STREAMING:
Mat Martineau273759e2012-05-17 20:53:53 -07003232 l2cap_txwin_setup(chan);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003233 rfc.mode = L2CAP_MODE_STREAMING;
3234 rfc.txwin_size = 0;
3235 rfc.max_transmit = 0;
3236 rfc.retrans_timeout = 0;
3237 rfc.monitor_timeout = 0;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003238
3239 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
Gustavo Padovan2d792812012-10-06 10:07:01 +01003240 L2CAP_EXT_HDR_SIZE - L2CAP_SDULEN_SIZE -
3241 L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003242 rfc.max_pdu_size = cpu_to_le16(size);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003243
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003244 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003245 (unsigned long) &rfc);
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003246
Andrei Emeltchenkof89cef02011-10-13 16:18:55 +03003247 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
3248 l2cap_add_opt_efs(&ptr, chan);
3249
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003250 if (chan->conn->feat_mask & L2CAP_FEAT_FCS)
3251 if (chan->fcs == L2CAP_FCS_NONE ||
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003252 test_bit(CONF_RECV_NO_FCS, &chan->conf_state)) {
Andrei Emeltchenko60918912012-11-29 17:46:06 +02003253 chan->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3255 chan->fcs);
3256 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003257 break;
3258 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003259
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003260 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003261 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262
3263 return ptr - data;
3264}
3265
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003266static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267{
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003268 struct l2cap_conf_rsp *rsp = data;
3269 void *ptr = rsp->data;
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003270 void *req = chan->conf_req;
3271 int len = chan->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003272 int type, hint, olen;
3273 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003274 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003275 struct l2cap_conf_efs efs;
3276 u8 remote_efs = 0;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003277 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003278 u16 result = L2CAP_CONF_SUCCESS;
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003279 u16 size;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003280
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003281 BT_DBG("chan %p", chan);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003282
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003283 while (len >= L2CAP_CONF_OPT_SIZE) {
3284 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003285
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003286 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003287 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003288
3289 switch (type) {
3290 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003291 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003292 break;
3293
3294 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003295 chan->flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003296 break;
3297
3298 case L2CAP_CONF_QOS:
3299 break;
3300
Marcel Holtmann6464f352007-10-20 13:39:51 +02003301 case L2CAP_CONF_RFC:
3302 if (olen == sizeof(rfc))
3303 memcpy(&rfc, (void *) val, olen);
3304 break;
3305
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003306 case L2CAP_CONF_FCS:
3307 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003308 set_bit(CONF_RECV_NO_FCS, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003309 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003310
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003311 case L2CAP_CONF_EFS:
3312 remote_efs = 1;
3313 if (olen == sizeof(efs))
3314 memcpy(&efs, (void *) val, olen);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003315 break;
3316
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003317 case L2CAP_CONF_EWS:
Marcel Holtmann848566b2013-10-01 22:59:22 -07003318 if (!chan->conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003319 return -ECONNREFUSED;
3320
3321 set_bit(FLAG_EXT_CTRL, &chan->flags);
3322 set_bit(CONF_EWS_RECV, &chan->conf_state);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03003323 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003324 chan->remote_tx_win = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003325 break;
3326
3327 default:
3328 if (hint)
3329 break;
3330
3331 result = L2CAP_CONF_UNKNOWN;
3332 *((u8 *) ptr++) = type;
3333 break;
3334 }
3335 }
3336
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003337 if (chan->num_conf_rsp || chan->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003338 goto done;
3339
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003340 switch (chan->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003341 case L2CAP_MODE_STREAMING:
3342 case L2CAP_MODE_ERTM:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003343 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003344 chan->mode = l2cap_select_mode(rfc.mode,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003345 chan->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003346 break;
3347 }
3348
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003349 if (remote_efs) {
Marcel Holtmann848566b2013-10-01 22:59:22 -07003350 if (__l2cap_efs_supported(chan->conn))
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003351 set_bit(FLAG_EFS_ENABLE, &chan->flags);
3352 else
3353 return -ECONNREFUSED;
3354 }
3355
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003356 if (chan->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003357 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003358
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003359 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003360 }
3361
3362done:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003363 if (chan->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003364 result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003365 rfc.mode = chan->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003366
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003367 if (chan->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003368 return -ECONNREFUSED;
3369
Gustavo Padovan2d792812012-10-06 10:07:01 +01003370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3371 (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003372 }
3373
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003374 if (result == L2CAP_CONF_SUCCESS) {
3375 /* Configure output options and let the other side know
3376 * which ones we don't like. */
3377
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003378 if (mtu < L2CAP_DEFAULT_MIN_MTU)
3379 result = L2CAP_CONF_UNACCEPT;
3380 else {
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003381 chan->omtu = mtu;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003382 set_bit(CONF_MTU_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003383 }
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003385
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003386 if (remote_efs) {
3387 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003388 efs.stype != L2CAP_SERV_NOTRAFIC &&
3389 efs.stype != chan->local_stype) {
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003390
3391 result = L2CAP_CONF_UNACCEPT;
3392
3393 if (chan->num_conf_req >= 1)
3394 return -ECONNREFUSED;
3395
3396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003397 sizeof(efs),
3398 (unsigned long) &efs);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003399 } else {
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003400 /* Send PENDING Conf Rsp */
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003401 result = L2CAP_CONF_PENDING;
3402 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003403 }
3404 }
3405
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003406 switch (rfc.mode) {
3407 case L2CAP_MODE_BASIC:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003408 chan->fcs = L2CAP_FCS_NONE;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003409 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003410 break;
3411
3412 case L2CAP_MODE_ERTM:
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003413 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
3414 chan->remote_tx_win = rfc.txwin_size;
3415 else
3416 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
3417
Gustavo F. Padovan2c03a7a2011-03-25 20:15:28 -03003418 chan->remote_max_tx = rfc.max_transmit;
Mat Martineau86b1b262010-08-05 15:54:22 -07003419
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003420 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003421 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3422 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003423 rfc.max_pdu_size = cpu_to_le16(size);
3424 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003425
Mat Martineau36c86c82012-10-23 15:24:20 -07003426 __l2cap_set_ertm_timeouts(chan, &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003427
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003428 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003429
3430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003431 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003432
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003433 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3434 chan->remote_id = efs.id;
3435 chan->remote_stype = efs.stype;
3436 chan->remote_msdu = le16_to_cpu(efs.msdu);
3437 chan->remote_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003438 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003439 chan->remote_acc_lat =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003440 le32_to_cpu(efs.acc_lat);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003441 chan->remote_sdu_itime =
3442 le32_to_cpu(efs.sdu_itime);
3443 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003444 sizeof(efs),
3445 (unsigned long) &efs);
Andrei Emeltchenko42dceae2011-10-17 14:35:30 +03003446 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003447 break;
3448
3449 case L2CAP_MODE_STREAMING:
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003450 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
Gustavo Padovan2d792812012-10-06 10:07:01 +01003451 chan->conn->mtu - L2CAP_EXT_HDR_SIZE -
3452 L2CAP_SDULEN_SIZE - L2CAP_FCS_SIZE);
Andrei Emeltchenkoc8f79162011-10-17 12:19:59 +03003453 rfc.max_pdu_size = cpu_to_le16(size);
3454 chan->remote_mps = size;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003455
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003456 set_bit(CONF_MODE_DONE, &chan->conf_state);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003457
Gustavo Padovan2d792812012-10-06 10:07:01 +01003458 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3459 (unsigned long) &rfc);
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003460
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003461 break;
3462
3463 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003464 result = L2CAP_CONF_UNACCEPT;
3465
3466 memset(&rfc, 0, sizeof(rfc));
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003467 rfc.mode = chan->mode;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003468 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003469
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003470 if (result == L2CAP_CONF_SUCCESS)
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003471 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003472 }
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003473 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003474 rsp->result = cpu_to_le16(result);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003475 rsp->flags = cpu_to_le16(0);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003476
3477 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003478}
3479
Gustavo Padovan2d792812012-10-06 10:07:01 +01003480static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
3481 void *data, u16 *result)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003482{
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003483 struct l2cap_conf_req *req = data;
3484 void *ptr = req->data;
3485 int type, olen;
3486 unsigned long val;
Mat Martineau36e999a2011-12-08 17:23:21 -08003487 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003488 struct l2cap_conf_efs efs;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003490 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003491
3492 while (len >= L2CAP_CONF_OPT_SIZE) {
3493 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3494
3495 switch (type) {
3496 case L2CAP_CONF_MTU:
3497 if (val < L2CAP_DEFAULT_MIN_MTU) {
3498 *result = L2CAP_CONF_UNACCEPT;
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003499 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003500 } else
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003501 chan->imtu = val;
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003503 break;
3504
3505 case L2CAP_CONF_FLUSH_TO:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003506 chan->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003508 2, chan->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003509 break;
3510
3511 case L2CAP_CONF_RFC:
3512 if (olen == sizeof(rfc))
3513 memcpy(&rfc, (void *)val, olen);
3514
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003515 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003516 rfc.mode != chan->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003517 return -ECONNREFUSED;
3518
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003519 chan->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003520
3521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003522 sizeof(rfc), (unsigned long) &rfc);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003523 break;
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003524
3525 case L2CAP_CONF_EWS:
Mat Martineauc20f8e32012-07-10 05:47:07 -07003526 chan->ack_win = min_t(u16, val, chan->ack_win);
Gustavo F. Padovan3e6b3b92011-11-01 14:06:23 -02003527 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
Mat Martineauc20f8e32012-07-10 05:47:07 -07003528 chan->tx_win);
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03003529 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003530
3531 case L2CAP_CONF_EFS:
3532 if (olen == sizeof(efs))
3533 memcpy(&efs, (void *)val, olen);
3534
3535 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003536 efs.stype != L2CAP_SERV_NOTRAFIC &&
3537 efs.stype != chan->local_stype)
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003538 return -ECONNREFUSED;
3539
Gustavo Padovan2d792812012-10-06 10:07:01 +01003540 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS, sizeof(efs),
3541 (unsigned long) &efs);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003542 break;
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003543
3544 case L2CAP_CONF_FCS:
3545 if (*result == L2CAP_CONF_PENDING)
3546 if (val == L2CAP_FCS_NONE)
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003547 set_bit(CONF_RECV_NO_FCS,
Andrei Emeltchenkocbabee72012-11-29 17:46:07 +02003548 &chan->conf_state);
3549 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003550 }
3551 }
3552
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003553 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003554 return -ECONNREFUSED;
3555
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003556 chan->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003557
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03003558 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003559 switch (rfc.mode) {
3560 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003561 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3562 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3563 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003564 if (!test_bit(FLAG_EXT_CTRL, &chan->flags))
3565 chan->ack_win = min_t(u16, chan->ack_win,
3566 rfc.txwin_size);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003567
3568 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
3569 chan->local_msdu = le16_to_cpu(efs.msdu);
3570 chan->local_sdu_itime =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003571 le32_to_cpu(efs.sdu_itime);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003572 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
3573 chan->local_flush_to =
Gustavo Padovan2d792812012-10-06 10:07:01 +01003574 le32_to_cpu(efs.flush_to);
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003575 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003576 break;
Andrei Emeltchenko66af7aa2011-11-07 14:20:33 +02003577
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003578 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003579 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003580 }
3581 }
3582
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003583 req->dcid = cpu_to_le16(chan->dcid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003584 req->flags = cpu_to_le16(0);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003585
3586 return ptr - data;
3587}
3588
Gustavo Padovan2d792812012-10-06 10:07:01 +01003589static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data,
3590 u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003591{
3592 struct l2cap_conf_rsp *rsp = data;
3593 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003594
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003595 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003596
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003597 rsp->scid = cpu_to_le16(chan->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003598 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003599 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003600
3601 return ptr - data;
3602}
3603
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003604void __l2cap_le_connect_rsp_defer(struct l2cap_chan *chan)
3605{
3606 struct l2cap_le_conn_rsp rsp;
3607 struct l2cap_conn *conn = chan->conn;
3608
3609 BT_DBG("chan %p", chan);
3610
3611 rsp.dcid = cpu_to_le16(chan->scid);
3612 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02003613 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03003614 rsp.credits = cpu_to_le16(chan->rx_credits);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003615 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03003616
3617 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CONN_RSP, sizeof(rsp),
3618 &rsp);
3619}
3620
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003621void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003622{
3623 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan8c1d7872011-04-13 20:23:55 -03003624 struct l2cap_conn *conn = chan->conn;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003625 u8 buf[128];
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003626 u8 rsp_code;
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003627
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003628 rsp.scid = cpu_to_le16(chan->dcid);
3629 rsp.dcid = cpu_to_le16(chan->scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003630 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
3631 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko439f34a2012-10-31 15:46:28 +02003632
3633 if (chan->hs_hcon)
3634 rsp_code = L2CAP_CREATE_CHAN_RSP;
3635 else
3636 rsp_code = L2CAP_CONN_RSP;
3637
3638 BT_DBG("chan %p rsp_code %u", chan, rsp_code);
3639
3640 l2cap_send_cmd(conn, chan->ident, rsp_code, sizeof(rsp), &rsp);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003641
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003642 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003643 return;
3644
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003645 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003646 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003647 chan->num_conf_req++;
3648}
3649
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003650static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003651{
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003652 int type, olen;
3653 unsigned long val;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003654 /* Use sane default values in case a misbehaving remote device
3655 * did not send an RFC or extended window size option.
3656 */
3657 u16 txwin_ext = chan->ack_win;
3658 struct l2cap_conf_rfc rfc = {
3659 .mode = chan->mode,
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003660 .retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO),
3661 .monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO),
Mat Martineauc20f8e32012-07-10 05:47:07 -07003662 .max_pdu_size = cpu_to_le16(chan->imtu),
3663 .txwin_size = min_t(u16, chan->ack_win, L2CAP_DEFAULT_TX_WINDOW),
3664 };
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003665
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003666 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003667
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003668 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003669 return;
3670
3671 while (len >= L2CAP_CONF_OPT_SIZE) {
3672 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3673
Mat Martineauc20f8e32012-07-10 05:47:07 -07003674 switch (type) {
3675 case L2CAP_CONF_RFC:
3676 if (olen == sizeof(rfc))
3677 memcpy(&rfc, (void *)val, olen);
Szymon Janc8f321f82012-06-08 11:33:33 +02003678 break;
Mat Martineauc20f8e32012-07-10 05:47:07 -07003679 case L2CAP_CONF_EWS:
3680 txwin_ext = val;
3681 break;
3682 }
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003683 }
3684
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003685 switch (rfc.mode) {
3686 case L2CAP_MODE_ERTM:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003687 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3688 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
Mat Martineauc20f8e32012-07-10 05:47:07 -07003689 chan->mps = le16_to_cpu(rfc.max_pdu_size);
3690 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3691 chan->ack_win = min_t(u16, chan->ack_win, txwin_ext);
3692 else
3693 chan->ack_win = min_t(u16, chan->ack_win,
3694 rfc.txwin_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003695 break;
3696 case L2CAP_MODE_STREAMING:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003697 chan->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003698 }
3699}
3700
Gustavo Padovan2d792812012-10-06 10:07:01 +01003701static inline int l2cap_command_rej(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003702 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3703 u8 *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003704{
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003705 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003706
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003707 if (cmd_len < sizeof(*rej))
3708 return -EPROTO;
3709
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03003710 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003711 return 0;
3712
3713 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003714 cmd->ident == conn->info_ident) {
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02003715 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003716
3717 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003718 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003719
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003720 l2cap_conn_start(conn);
3721 }
3722
3723 return 0;
3724}
3725
Mat Martineau17009152012-10-23 15:24:07 -07003726static struct l2cap_chan *l2cap_connect(struct l2cap_conn *conn,
3727 struct l2cap_cmd_hdr *cmd,
3728 u8 *data, u8 rsp_code, u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003729{
Linus Torvalds1da177e2005-04-16 15:20:36 -07003730 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3731 struct l2cap_conn_rsp rsp;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003732 struct l2cap_chan *chan = NULL, *pchan;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003733 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003734
3735 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003736 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003737
Andrei Emeltchenko097db762012-03-09 14:16:17 +02003738 BT_DBG("psm 0x%2.2x scid 0x%4.4x", __le16_to_cpu(psm), scid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003739
3740 /* Check if we have socket listening on psm */
Marcel Holtmann6f59b902013-10-13 05:24:01 -07003741 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
Johan Hedbergbf20fd42013-05-14 13:23:13 +03003742 &conn->hcon->dst, ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03003743 if (!pchan) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003744 result = L2CAP_CR_BAD_PSM;
3745 goto sendresp;
3746 }
3747
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003748 mutex_lock(&conn->chan_lock);
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003749 l2cap_chan_lock(pchan);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003750
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003751 /* Check if the ACL is secure enough (if not SDP) */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003752 if (psm != cpu_to_le16(L2CAP_PSM_SDP) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003753 !hci_conn_check_link_mode(conn->hcon)) {
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02003754 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003755 result = L2CAP_CR_SEC_BLOCK;
3756 goto response;
3757 }
3758
Linus Torvalds1da177e2005-04-16 15:20:36 -07003759 result = L2CAP_CR_NO_MEM;
3760
Gustavo Padovan2dfa1002012-05-27 22:27:58 -03003761 /* Check if we already have channel with that dcid */
3762 if (__l2cap_get_chan_by_dcid(conn, scid))
3763 goto response;
3764
Gustavo Padovan80b98022012-05-27 22:27:51 -03003765 chan = pchan->ops->new_connection(pchan);
Gustavo F. Padovan80808e42011-05-16 17:24:37 -03003766 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003767 goto response;
3768
Syam Sidhardhan330b6c12013-08-06 01:59:12 +09003769 /* For certain devices (ex: HID mouse), support for authentication,
3770 * pairing and bonding is optional. For such devices, inorder to avoid
3771 * the ACL alive for too long after L2CAP disconnection, reset the ACL
3772 * disc_timeout back to HCI_DISCONN_TIMEOUT during L2CAP connect.
3773 */
3774 conn->hcon->disc_timeout = HCI_DISCONN_TIMEOUT;
3775
Marcel Holtmann7eafc592013-10-13 08:12:47 -07003776 bacpy(&chan->src, &conn->hcon->src);
3777 bacpy(&chan->dst, &conn->hcon->dst);
Marcel Holtmann4f1654e2013-10-13 08:50:41 -07003778 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
3779 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003780 chan->psm = psm;
3781 chan->dcid = scid;
Mat Martineau17009152012-10-23 15:24:07 -07003782 chan->local_amp_id = amp_id;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003784 __l2cap_chan_add(conn, chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003785
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003786 dcid = chan->scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
Gustavo Padovan8d836d72013-10-15 19:24:47 -03003788 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
Linus Torvalds1da177e2005-04-16 15:20:36 -07003789
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003790 chan->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003791
Marcel Holtmann984947d2009-02-06 23:35:19 +01003792 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Johan Hedberge7cafc42014-07-17 15:35:38 +03003793 if (l2cap_chan_check_security(chan, false)) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07003794 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003795 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003796 result = L2CAP_CR_PEND;
3797 status = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08003798 chan->ops->defer(chan);
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003799 } else {
Mat Martineau17009152012-10-23 15:24:07 -07003800 /* Force pending result for AMP controllers.
3801 * The connection will succeed after the
3802 * physical link is up.
3803 */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003804 if (amp_id == AMP_ID_BREDR) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003805 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau17009152012-10-23 15:24:07 -07003806 result = L2CAP_CR_SUCCESS;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003807 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003808 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07003809 result = L2CAP_CR_PEND;
Mat Martineau17009152012-10-23 15:24:07 -07003810 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01003811 status = L2CAP_CS_NO_INFO;
3812 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003813 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003814 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003815 result = L2CAP_CR_PEND;
3816 status = L2CAP_CS_AUTHEN_PEND;
3817 }
3818 } else {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02003819 l2cap_state_change(chan, BT_CONNECT2);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003820 result = L2CAP_CR_PEND;
3821 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003822 }
3823
Linus Torvalds1da177e2005-04-16 15:20:36 -07003824response:
Gustavo Padovan8ffb9292013-10-21 14:21:41 -02003825 l2cap_chan_unlock(pchan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003826 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03003827 l2cap_chan_put(pchan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003828
3829sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003830 rsp.scid = cpu_to_le16(scid);
3831 rsp.dcid = cpu_to_le16(dcid);
3832 rsp.result = cpu_to_le16(result);
3833 rsp.status = cpu_to_le16(status);
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003834 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003835
3836 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3837 struct l2cap_info_req info;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003838 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003839
3840 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3841 conn->info_ident = l2cap_get_ident(conn);
3842
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08003843 schedule_delayed_work(&conn->info_timer, L2CAP_INFO_TIMEOUT);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003844
Gustavo Padovan2d792812012-10-06 10:07:01 +01003845 l2cap_send_cmd(conn, conn->info_ident, L2CAP_INFO_REQ,
3846 sizeof(info), &info);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02003847 }
3848
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003849 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
Gustavo Padovan2d792812012-10-06 10:07:01 +01003850 result == L2CAP_CR_SUCCESS) {
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003851 u8 buf[128];
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003852 set_bit(CONF_REQ_SENT, &chan->conf_state);
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003853 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003854 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003855 chan->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003856 }
Mat Martineau17009152012-10-23 15:24:07 -07003857
3858 return chan;
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003859}
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003860
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003861static int l2cap_connect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003862 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Mat Martineau4c89b6a2012-10-11 17:48:22 +03003863{
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303864 struct hci_dev *hdev = conn->hcon->hdev;
3865 struct hci_conn *hcon = conn->hcon;
3866
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003867 if (cmd_len < sizeof(struct l2cap_conn_req))
3868 return -EPROTO;
3869
Jaganath Kanakkassery7b064ed2013-01-10 10:28:35 +05303870 hci_dev_lock(hdev);
3871 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
3872 !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &hcon->flags))
3873 mgmt_device_connected(hdev, &hcon->dst, hcon->type,
3874 hcon->dst_type, 0, NULL, 0,
3875 hcon->dev_class);
3876 hci_dev_unlock(hdev);
3877
Gustavo Padovan300229f2012-10-12 19:40:40 +08003878 l2cap_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003879 return 0;
3880}
3881
Mat Martineau5909cf32012-10-23 15:24:08 -07003882static int l2cap_connect_create_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003883 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3884 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003885{
3886 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3887 u16 scid, dcid, result, status;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003888 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003889 u8 req[128];
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003890 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003891
Johan Hedbergcb3b3152013-05-28 13:46:30 +03003892 if (cmd_len < sizeof(*rsp))
3893 return -EPROTO;
3894
Linus Torvalds1da177e2005-04-16 15:20:36 -07003895 scid = __le16_to_cpu(rsp->scid);
3896 dcid = __le16_to_cpu(rsp->dcid);
3897 result = __le16_to_cpu(rsp->result);
3898 status = __le16_to_cpu(rsp->status);
3899
Andrei Emeltchenko1b009c92012-02-21 12:54:54 +02003900 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01003901 dcid, scid, result, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003902
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003903 mutex_lock(&conn->chan_lock);
3904
Linus Torvalds1da177e2005-04-16 15:20:36 -07003905 if (scid) {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003906 chan = __l2cap_get_chan_by_scid(conn, scid);
3907 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003908 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003909 goto unlock;
3910 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003911 } else {
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003912 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
3913 if (!chan) {
Johan Hedberg21870b52013-09-16 13:05:14 +03003914 err = -EBADSLT;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003915 goto unlock;
3916 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003917 }
3918
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003919 err = 0;
3920
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003921 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003922
Linus Torvalds1da177e2005-04-16 15:20:36 -07003923 switch (result) {
3924 case L2CAP_CR_SUCCESS:
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03003925 l2cap_state_change(chan, BT_CONFIG);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03003926 chan->ident = 0;
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03003927 chan->dcid = dcid;
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003928 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01003929
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003930 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03003931 break;
3932
Linus Torvalds1da177e2005-04-16 15:20:36 -07003933 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01003934 l2cap_build_conf_req(chan, req), req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03003935 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003936 break;
3937
3938 case L2CAP_CR_PEND:
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03003939 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003940 break;
3941
3942 default:
Gustavo F. Padovan48454072011-03-25 00:22:30 -03003943 l2cap_chan_del(chan, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003944 break;
3945 }
3946
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02003947 l2cap_chan_unlock(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02003948
3949unlock:
3950 mutex_unlock(&conn->chan_lock);
3951
3952 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003953}
3954
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003955static inline void set_default_fcs(struct l2cap_chan *chan)
Mat Martineau8c462b62010-08-24 15:35:42 -07003956{
3957 /* FCS is enabled only in ERTM or streaming mode, if one or both
3958 * sides request it.
3959 */
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03003960 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003961 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenkof2592d32012-11-29 17:46:08 +02003962 else if (!test_bit(CONF_RECV_NO_FCS, &chan->conf_state))
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03003963 chan->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07003964}
3965
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03003966static void l2cap_send_efs_conf_rsp(struct l2cap_chan *chan, void *data,
3967 u8 ident, u16 flags)
3968{
3969 struct l2cap_conn *conn = chan->conn;
3970
3971 BT_DBG("conn %p chan %p ident %d flags 0x%4.4x", conn, chan, ident,
3972 flags);
3973
3974 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
3975 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
3976
3977 l2cap_send_cmd(conn, ident, L2CAP_CONF_RSP,
3978 l2cap_build_conf_rsp(chan, data,
3979 L2CAP_CONF_SUCCESS, flags), data);
3980}
3981
Johan Hedberg662d6522013-10-16 11:20:47 +03003982static void cmd_reject_invalid_cid(struct l2cap_conn *conn, u8 ident,
3983 u16 scid, u16 dcid)
3984{
3985 struct l2cap_cmd_rej_cid rej;
3986
Joe Perchesdcf4adb2014-03-12 10:52:35 -07003987 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
Johan Hedberg662d6522013-10-16 11:20:47 +03003988 rej.scid = __cpu_to_le16(scid);
3989 rej.dcid = __cpu_to_le16(dcid);
3990
3991 l2cap_send_cmd(conn, ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3992}
3993
Gustavo Padovan2d792812012-10-06 10:07:01 +01003994static inline int l2cap_config_req(struct l2cap_conn *conn,
3995 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3996 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997{
3998 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3999 u16 dcid, flags;
4000 u8 rsp[64];
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004001 struct l2cap_chan *chan;
Mat Martineau3c588192012-04-11 10:48:42 -07004002 int len, err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004003
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004004 if (cmd_len < sizeof(*req))
4005 return -EPROTO;
4006
Linus Torvalds1da177e2005-04-16 15:20:36 -07004007 dcid = __le16_to_cpu(req->dcid);
4008 flags = __le16_to_cpu(req->flags);
4009
4010 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4011
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004012 chan = l2cap_get_chan_by_scid(conn, dcid);
Johan Hedberg662d6522013-10-16 11:20:47 +03004013 if (!chan) {
4014 cmd_reject_invalid_cid(conn, cmd->ident, dcid, 0);
4015 return 0;
4016 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
David S. Miller033b1142011-07-21 13:38:42 -07004018 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
Johan Hedberg662d6522013-10-16 11:20:47 +03004019 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4020 chan->dcid);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004021 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004022 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004023
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004024 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004025 len = cmd_len - sizeof(*req);
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004026 if (chan->conf_len + len > sizeof(chan->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004027 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004028 l2cap_build_conf_rsp(chan, rsp,
4029 L2CAP_CONF_REJECT, flags), rsp);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004030 goto unlock;
4031 }
4032
4033 /* Store config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004034 memcpy(chan->conf_req + chan->conf_len, req->data, len);
4035 chan->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004036
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004037 if (flags & L2CAP_CONF_FLAG_CONTINUATION) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004038 /* Incomplete config. Send empty response. */
4039 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004040 l2cap_build_conf_rsp(chan, rsp,
4041 L2CAP_CONF_SUCCESS, flags), rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004042 goto unlock;
4043 }
4044
4045 /* Complete config. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004046 len = l2cap_parse_conf_req(chan, rsp);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004047 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004048 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004050 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051
Mat Martineau1500109b2012-10-23 15:24:15 -07004052 chan->ident = cmd->ident;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004053 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004054 chan->num_conf_rsp++;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004055
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004056 /* Reset config buffer. */
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004057 chan->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004058
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004059 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
Marcel Holtmann876d9482007-10-20 13:35:42 +02004060 goto unlock;
4061
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004062 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004063 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004064
Mat Martineau105bdf92012-04-27 16:50:48 -07004065 if (chan->mode == L2CAP_MODE_ERTM ||
4066 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004067 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004068
Mat Martineau3c588192012-04-11 10:48:42 -07004069 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004070 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004071 else
4072 l2cap_chan_ready(chan);
4073
Marcel Holtmann876d9482007-10-20 13:35:42 +02004074 goto unlock;
4075 }
4076
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004077 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004078 u8 buf[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -07004079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004080 l2cap_build_conf_req(chan, buf), buf);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004081 chan->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004082 }
4083
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004084 /* Got Conf Rsp PENDING from remote side and asume we sent
4085 Conf Rsp PENDING in the code above */
4086 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
Andrei Emeltchenko29d8a592012-09-21 12:30:05 +03004087 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004088
4089 /* check compatibility */
4090
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004091 /* Send rsp for BR/EDR channel */
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004092 if (!chan->hs_hcon)
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004093 l2cap_send_efs_conf_rsp(chan, rsp, cmd->ident, flags);
4094 else
4095 chan->ident = cmd->ident;
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004096 }
4097
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098unlock:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004099 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004100 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004101}
4102
Gustavo Padovan2d792812012-10-06 10:07:01 +01004103static inline int l2cap_config_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004104 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4105 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004106{
4107 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4108 u16 scid, flags, result;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004109 struct l2cap_chan *chan;
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004110 int len = cmd_len - sizeof(*rsp);
Mat Martineau3c588192012-04-11 10:48:42 -07004111 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004112
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004113 if (cmd_len < sizeof(*rsp))
4114 return -EPROTO;
4115
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 scid = __le16_to_cpu(rsp->scid);
4117 flags = __le16_to_cpu(rsp->flags);
4118 result = __le16_to_cpu(rsp->result);
4119
Andrei Emeltchenko61386cb2012-03-12 12:13:07 +02004120 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x len %d", scid, flags,
4121 result, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004122
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03004123 chan = l2cap_get_chan_by_scid(conn, scid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004124 if (!chan)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004125 return 0;
4126
4127 switch (result) {
4128 case L2CAP_CONF_SUCCESS:
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004129 l2cap_conf_rfc_get(chan, rsp->data, len);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004130 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004131 break;
4132
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004133 case L2CAP_CONF_PENDING:
4134 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
4135
4136 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
4137 char buf[64];
4138
4139 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004140 buf, &result);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004141 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004142 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004143 goto done;
4144 }
4145
Andrei Emeltchenkof351bc72012-10-31 15:46:35 +02004146 if (!chan->hs_hcon) {
Andrei Emeltchenko79de8862012-10-15 11:58:42 +03004147 l2cap_send_efs_conf_rsp(chan, buf, cmd->ident,
4148 0);
Andrei Emeltchenko5ce66b52012-10-31 15:46:30 +02004149 } else {
4150 if (l2cap_check_efs(chan)) {
4151 amp_create_logical_link(chan);
4152 chan->ident = cmd->ident;
4153 }
4154 }
Andrei Emeltchenko0e8b2072011-10-17 14:35:32 +03004155 }
4156 goto done;
4157
Linus Torvalds1da177e2005-04-16 15:20:36 -07004158 case L2CAP_CONF_UNACCEPT:
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004159 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004160 char req[64];
4161
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004162 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004163 l2cap_send_disconn_req(chan, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004164 goto done;
4165 }
4166
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004167 /* throw out any old stored conf requests */
4168 result = L2CAP_CONF_SUCCESS;
Gustavo F. Padovanb4450032011-04-12 18:15:09 -03004169 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004170 req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004171 if (len < 0) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004172 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004173 goto done;
4174 }
4175
4176 l2cap_send_cmd(conn, l2cap_get_ident(conn),
Gustavo Padovan2d792812012-10-06 10:07:01 +01004177 L2CAP_CONF_REQ, len, req);
Gustavo F. Padovan73ffa902011-03-25 14:16:54 -03004178 chan->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004179 if (result != L2CAP_CONF_SUCCESS)
4180 goto done;
4181 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004182 }
4183
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004184 default:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004185 l2cap_chan_set_err(chan, ECONNRESET);
Andrei Emeltchenko2e0052e2012-02-21 12:54:58 +02004186
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08004187 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004188 l2cap_send_disconn_req(chan, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004189 goto done;
4190 }
4191
Andrei Emeltchenko59e54bd2012-05-23 15:44:06 +03004192 if (flags & L2CAP_CONF_FLAG_CONTINUATION)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004193 goto done;
4194
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004195 set_bit(CONF_INPUT_DONE, &chan->conf_state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196
Gustavo F. Padovanc1360a12011-06-10 17:02:12 -03004197 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03004198 set_default_fcs(chan);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004199
Mat Martineau105bdf92012-04-27 16:50:48 -07004200 if (chan->mode == L2CAP_MODE_ERTM ||
4201 chan->mode == L2CAP_MODE_STREAMING)
Mat Martineau3c588192012-04-11 10:48:42 -07004202 err = l2cap_ertm_init(chan);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004203
Mat Martineau3c588192012-04-11 10:48:42 -07004204 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004205 l2cap_send_disconn_req(chan, -err);
Mat Martineau3c588192012-04-11 10:48:42 -07004206 else
4207 l2cap_chan_ready(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004208 }
4209
4210done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004211 l2cap_chan_unlock(chan);
Mat Martineau3c588192012-04-11 10:48:42 -07004212 return err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004213}
4214
Gustavo Padovan2d792812012-10-06 10:07:01 +01004215static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004216 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4217 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218{
4219 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4220 struct l2cap_disconn_rsp rsp;
4221 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004222 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004223
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004224 if (cmd_len != sizeof(*req))
4225 return -EPROTO;
4226
Linus Torvalds1da177e2005-04-16 15:20:36 -07004227 scid = __le16_to_cpu(req->scid);
4228 dcid = __le16_to_cpu(req->dcid);
4229
4230 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4231
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004232 mutex_lock(&conn->chan_lock);
4233
4234 chan = __l2cap_get_chan_by_scid(conn, dcid);
4235 if (!chan) {
4236 mutex_unlock(&conn->chan_lock);
Johan Hedberg662d6522013-10-16 11:20:47 +03004237 cmd_reject_invalid_cid(conn, cmd->ident, dcid, scid);
4238 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004240
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004241 l2cap_chan_lock(chan);
4242
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03004243 rsp.dcid = cpu_to_le16(chan->scid);
4244 rsp.scid = cpu_to_le16(chan->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4246
Gustavo Padovan5ec1bbe2013-10-15 19:24:48 -03004247 chan->ops->set_shutdown(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248
Mat Martineau61d6ef32012-04-27 16:50:50 -07004249 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004250 l2cap_chan_del(chan, ECONNRESET);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004251
4252 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253
Gustavo Padovan80b98022012-05-27 22:27:51 -03004254 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004255 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004256
4257 mutex_unlock(&conn->chan_lock);
4258
Linus Torvalds1da177e2005-04-16 15:20:36 -07004259 return 0;
4260}
4261
Gustavo Padovan2d792812012-10-06 10:07:01 +01004262static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004263 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4264 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004265{
4266 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4267 u16 dcid, scid;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004268 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004269
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004270 if (cmd_len != sizeof(*rsp))
4271 return -EPROTO;
4272
Linus Torvalds1da177e2005-04-16 15:20:36 -07004273 scid = __le16_to_cpu(rsp->scid);
4274 dcid = __le16_to_cpu(rsp->dcid);
4275
4276 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4277
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004278 mutex_lock(&conn->chan_lock);
4279
4280 chan = __l2cap_get_chan_by_scid(conn, scid);
4281 if (!chan) {
4282 mutex_unlock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004283 return 0;
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004284 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004286 l2cap_chan_lock(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004287
Mat Martineau61d6ef32012-04-27 16:50:50 -07004288 l2cap_chan_hold(chan);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004289 l2cap_chan_del(chan, 0);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02004290
4291 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292
Gustavo Padovan80b98022012-05-27 22:27:51 -03004293 chan->ops->close(chan);
Mat Martineau61d6ef32012-04-27 16:50:50 -07004294 l2cap_chan_put(chan);
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02004295
4296 mutex_unlock(&conn->chan_lock);
4297
Linus Torvalds1da177e2005-04-16 15:20:36 -07004298 return 0;
4299}
4300
Gustavo Padovan2d792812012-10-06 10:07:01 +01004301static inline int l2cap_information_req(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004302 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4303 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304{
4305 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004306 u16 type;
4307
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004308 if (cmd_len != sizeof(*req))
4309 return -EPROTO;
4310
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311 type = __le16_to_cpu(req->type);
4312
4313 BT_DBG("type 0x%4.4x", type);
4314
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004315 if (type == L2CAP_IT_FEAT_MASK) {
4316 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004317 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004318 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004319 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4320 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004321 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004322 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Gustavo Padovan2d792812012-10-06 10:07:01 +01004323 | L2CAP_FEAT_FCS;
Marcel Holtmann848566b2013-10-01 22:59:22 -07004324 if (conn->hs_enabled)
Andrei Emeltchenko6327eb92011-10-11 13:37:42 +03004325 feat_mask |= L2CAP_FEAT_EXT_FLOW
Gustavo Padovan2d792812012-10-06 10:07:01 +01004326 | L2CAP_FEAT_EXT_WINDOW;
Andrei Emeltchenkoa5fd6f32011-09-16 16:26:32 +03004327
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004328 put_unaligned_le32(feat_mask, rsp->data);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004329 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4330 buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004331 } else if (type == L2CAP_IT_FIXED_CHAN) {
4332 u8 buf[12];
4333 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
Mat Martineau50a147c2011-11-02 16:18:34 -07004334
Marcel Holtmann848566b2013-10-01 22:59:22 -07004335 if (conn->hs_enabled)
Mat Martineau50a147c2011-11-02 16:18:34 -07004336 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
4337 else
4338 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
4339
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004340 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4341 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Andrei Emeltchenkoc6337ea2011-10-20 17:02:44 +03004342 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
Gustavo Padovan2d792812012-10-06 10:07:01 +01004343 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(buf),
4344 buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004345 } else {
4346 struct l2cap_info_rsp rsp;
4347 rsp.type = cpu_to_le16(type);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004348 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
Gustavo Padovan2d792812012-10-06 10:07:01 +01004349 l2cap_send_cmd(conn, cmd->ident, L2CAP_INFO_RSP, sizeof(rsp),
4350 &rsp);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004351 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004352
4353 return 0;
4354}
4355
Gustavo Padovan2d792812012-10-06 10:07:01 +01004356static inline int l2cap_information_rsp(struct l2cap_conn *conn,
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004357 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
4358 u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004359{
4360 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4361 u16 type, result;
4362
Jaganath Kanakkassery3f6fa3d2013-06-21 19:55:11 +05304363 if (cmd_len < sizeof(*rsp))
Johan Hedbergcb3b3152013-05-28 13:46:30 +03004364 return -EPROTO;
4365
Linus Torvalds1da177e2005-04-16 15:20:36 -07004366 type = __le16_to_cpu(rsp->type);
4367 result = __le16_to_cpu(rsp->result);
4368
4369 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4370
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004371 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4372 if (cmd->ident != conn->info_ident ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01004373 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004374 return 0;
4375
Ulisses Furquim17cd3f32012-01-30 18:26:28 -02004376 cancel_delayed_work(&conn->info_timer);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004377
Ville Tervoadb08ed2010-08-04 09:43:33 +03004378 if (result != L2CAP_IR_SUCCESS) {
4379 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4380 conn->info_ident = 0;
4381
4382 l2cap_conn_start(conn);
4383
4384 return 0;
4385 }
4386
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004387 switch (type) {
4388 case L2CAP_IT_FEAT_MASK:
Harvey Harrison83985312008-05-02 16:25:46 -07004389 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004390
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004391 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004392 struct l2cap_info_req req;
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004393 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004394
4395 conn->info_ident = l2cap_get_ident(conn);
4396
4397 l2cap_send_cmd(conn, conn->info_ident,
Gustavo Padovan2d792812012-10-06 10:07:01 +01004398 L2CAP_INFO_REQ, sizeof(req), &req);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004399 } else {
4400 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4401 conn->info_ident = 0;
4402
4403 l2cap_conn_start(conn);
4404 }
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004405 break;
4406
4407 case L2CAP_IT_FIXED_CHAN:
4408 conn->fixed_chan_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004409 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004410 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004411
4412 l2cap_conn_start(conn);
Andrei Emeltchenko978c93b2012-02-29 10:41:41 +02004413 break;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004414 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004415
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 return 0;
4417}
4418
Mat Martineau17009152012-10-23 15:24:07 -07004419static int l2cap_create_channel_req(struct l2cap_conn *conn,
4420 struct l2cap_cmd_hdr *cmd,
4421 u16 cmd_len, void *data)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004422{
4423 struct l2cap_create_chan_req *req = data;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004424 struct l2cap_create_chan_rsp rsp;
Mat Martineau17009152012-10-23 15:24:07 -07004425 struct l2cap_chan *chan;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004426 struct hci_dev *hdev;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004427 u16 psm, scid;
4428
4429 if (cmd_len != sizeof(*req))
4430 return -EPROTO;
4431
Marcel Holtmann848566b2013-10-01 22:59:22 -07004432 if (!conn->hs_enabled)
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004433 return -EINVAL;
4434
4435 psm = le16_to_cpu(req->psm);
4436 scid = le16_to_cpu(req->scid);
4437
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004438 BT_DBG("psm 0x%2.2x, scid 0x%4.4x, amp_id %d", psm, scid, req->amp_id);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004439
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004440 /* For controller id 0 make BR/EDR connection */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004441 if (req->amp_id == AMP_ID_BREDR) {
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004442 l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4443 req->amp_id);
4444 return 0;
4445 }
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004446
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004447 /* Validate AMP controller id */
4448 hdev = hci_dev_get(req->amp_id);
4449 if (!hdev)
4450 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004451
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004452 if (hdev->dev_type != HCI_AMP || !test_bit(HCI_UP, &hdev->flags)) {
Mat Martineau17009152012-10-23 15:24:07 -07004453 hci_dev_put(hdev);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004454 goto error;
Mat Martineau17009152012-10-23 15:24:07 -07004455 }
4456
4457 chan = l2cap_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4458 req->amp_id);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004459 if (chan) {
4460 struct amp_mgr *mgr = conn->hcon->amp_mgr;
4461 struct hci_conn *hs_hcon;
4462
Marcel Holtmann98e0f7e2013-10-13 02:23:40 -07004463 hs_hcon = hci_conn_hash_lookup_ba(hdev, AMP_LINK,
4464 &conn->hcon->dst);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004465 if (!hs_hcon) {
4466 hci_dev_put(hdev);
Johan Hedberg662d6522013-10-16 11:20:47 +03004467 cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
4468 chan->dcid);
4469 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004470 }
4471
4472 BT_DBG("mgr %p bredr_chan %p hs_hcon %p", mgr, chan, hs_hcon);
4473
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004474 mgr->bredr_chan = chan;
4475 chan->hs_hcon = hs_hcon;
Andrei Emeltchenkofd45bf42012-11-20 17:16:22 +02004476 chan->fcs = L2CAP_FCS_NONE;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004477 conn->mtu = hdev->block_mtu;
4478 }
4479
4480 hci_dev_put(hdev);
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004481
4482 return 0;
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004483
4484error:
4485 rsp.dcid = 0;
4486 rsp.scid = cpu_to_le16(scid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004487 rsp.result = cpu_to_le16(L2CAP_CR_BAD_AMP);
4488 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Andrei Emeltchenko6e1df6a2012-11-01 15:37:02 +02004489
4490 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4491 sizeof(rsp), &rsp);
4492
Johan Hedbergdc280802013-09-16 13:05:13 +03004493 return 0;
Mat Martineauf94ff6f2011-11-02 16:18:32 -07004494}
4495
Mat Martineau8eb200b2012-10-23 15:24:17 -07004496static void l2cap_send_move_chan_req(struct l2cap_chan *chan, u8 dest_amp_id)
4497{
4498 struct l2cap_move_chan_req req;
4499 u8 ident;
4500
4501 BT_DBG("chan %p, dest_amp_id %d", chan, dest_amp_id);
4502
4503 ident = l2cap_get_ident(chan->conn);
4504 chan->ident = ident;
4505
4506 req.icid = cpu_to_le16(chan->scid);
4507 req.dest_amp_id = dest_amp_id;
4508
4509 l2cap_send_cmd(chan->conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req),
4510 &req);
4511
4512 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4513}
4514
Mat Martineau1500109b2012-10-23 15:24:15 -07004515static void l2cap_send_move_chan_rsp(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004516{
4517 struct l2cap_move_chan_rsp rsp;
4518
Mat Martineau1500109b2012-10-23 15:24:15 -07004519 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004520
Mat Martineau1500109b2012-10-23 15:24:15 -07004521 rsp.icid = cpu_to_le16(chan->dcid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004522 rsp.result = cpu_to_le16(result);
4523
Mat Martineau1500109b2012-10-23 15:24:15 -07004524 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_RSP,
4525 sizeof(rsp), &rsp);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004526}
4527
Mat Martineau5b155ef2012-10-23 15:24:14 -07004528static void l2cap_send_move_chan_cfm(struct l2cap_chan *chan, u16 result)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004529{
4530 struct l2cap_move_chan_cfm cfm;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004531
Mat Martineau5b155ef2012-10-23 15:24:14 -07004532 BT_DBG("chan %p, result 0x%4.4x", chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004533
Mat Martineau5b155ef2012-10-23 15:24:14 -07004534 chan->ident = l2cap_get_ident(chan->conn);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004535
Mat Martineau5b155ef2012-10-23 15:24:14 -07004536 cfm.icid = cpu_to_le16(chan->scid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004537 cfm.result = cpu_to_le16(result);
4538
Mat Martineau5b155ef2012-10-23 15:24:14 -07004539 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_MOVE_CHAN_CFM,
4540 sizeof(cfm), &cfm);
4541
4542 __set_chan_timer(chan, L2CAP_MOVE_TIMEOUT);
4543}
4544
4545static void l2cap_send_move_chan_cfm_icid(struct l2cap_conn *conn, u16 icid)
4546{
4547 struct l2cap_move_chan_cfm cfm;
4548
4549 BT_DBG("conn %p, icid 0x%4.4x", conn, icid);
4550
4551 cfm.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004552 cfm.result = cpu_to_le16(L2CAP_MC_UNCONFIRMED);
Mat Martineau5b155ef2012-10-23 15:24:14 -07004553
4554 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_MOVE_CHAN_CFM,
4555 sizeof(cfm), &cfm);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004556}
4557
4558static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004559 u16 icid)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004560{
4561 struct l2cap_move_chan_cfm_rsp rsp;
4562
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004563 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004564
4565 rsp.icid = cpu_to_le16(icid);
4566 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4567}
4568
Mat Martineau5f3847a2012-10-23 15:24:12 -07004569static void __release_logical_link(struct l2cap_chan *chan)
4570{
4571 chan->hs_hchan = NULL;
4572 chan->hs_hcon = NULL;
4573
4574 /* Placeholder - release the logical link */
4575}
4576
Mat Martineau1500109b2012-10-23 15:24:15 -07004577static void l2cap_logical_fail(struct l2cap_chan *chan)
4578{
4579 /* Logical link setup failed */
4580 if (chan->state != BT_CONNECTED) {
4581 /* Create channel failure, disconnect */
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004582 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineau1500109b2012-10-23 15:24:15 -07004583 return;
4584 }
4585
4586 switch (chan->move_role) {
4587 case L2CAP_MOVE_ROLE_RESPONDER:
4588 l2cap_move_done(chan);
4589 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_SUPP);
4590 break;
4591 case L2CAP_MOVE_ROLE_INITIATOR:
4592 if (chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_COMP ||
4593 chan->move_state == L2CAP_MOVE_WAIT_LOGICAL_CFM) {
4594 /* Remote has only sent pending or
4595 * success responses, clean up
4596 */
4597 l2cap_move_done(chan);
4598 }
4599
4600 /* Other amp move states imply that the move
4601 * has already aborted
4602 */
4603 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4604 break;
4605 }
4606}
4607
4608static void l2cap_logical_finish_create(struct l2cap_chan *chan,
4609 struct hci_chan *hchan)
4610{
4611 struct l2cap_conf_rsp rsp;
Mat Martineau1500109b2012-10-23 15:24:15 -07004612
Andrei Emeltchenko336178a2012-10-31 15:46:27 +02004613 chan->hs_hchan = hchan;
Mat Martineau1500109b2012-10-23 15:24:15 -07004614 chan->hs_hcon->l2cap_data = chan->conn;
4615
Andrei Emeltchenko35ba9562012-10-25 15:20:43 +03004616 l2cap_send_efs_conf_rsp(chan, &rsp, chan->ident, 0);
Mat Martineau1500109b2012-10-23 15:24:15 -07004617
4618 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
Andrei Emeltchenkofe79c6f2012-10-25 15:20:42 +03004619 int err;
Mat Martineau1500109b2012-10-23 15:24:15 -07004620
4621 set_default_fcs(chan);
4622
4623 err = l2cap_ertm_init(chan);
4624 if (err < 0)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02004625 l2cap_send_disconn_req(chan, -err);
Mat Martineau1500109b2012-10-23 15:24:15 -07004626 else
4627 l2cap_chan_ready(chan);
4628 }
4629}
4630
4631static void l2cap_logical_finish_move(struct l2cap_chan *chan,
4632 struct hci_chan *hchan)
4633{
4634 chan->hs_hcon = hchan->conn;
4635 chan->hs_hcon->l2cap_data = chan->conn;
4636
4637 BT_DBG("move_state %d", chan->move_state);
4638
4639 switch (chan->move_state) {
4640 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4641 /* Move confirm will be sent after a success
4642 * response is received
4643 */
4644 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4645 break;
4646 case L2CAP_MOVE_WAIT_LOGICAL_CFM:
4647 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4648 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4649 } else if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
4650 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4651 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4652 } else if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4653 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4654 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4655 }
4656 break;
4657 default:
4658 /* Move was not in expected state, free the channel */
4659 __release_logical_link(chan);
4660
4661 chan->move_state = L2CAP_MOVE_STABLE;
4662 }
4663}
4664
4665/* Call with chan locked */
Andrei Emeltchenko27695fb2012-10-25 15:20:45 +03004666void l2cap_logical_cfm(struct l2cap_chan *chan, struct hci_chan *hchan,
4667 u8 status)
Mat Martineau5b155ef2012-10-23 15:24:14 -07004668{
Mat Martineau1500109b2012-10-23 15:24:15 -07004669 BT_DBG("chan %p, hchan %p, status %d", chan, hchan, status);
4670
4671 if (status) {
4672 l2cap_logical_fail(chan);
4673 __release_logical_link(chan);
4674 return;
4675 }
4676
4677 if (chan->state != BT_CONNECTED) {
4678 /* Ignore logical link if channel is on BR/EDR */
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004679 if (chan->local_amp_id != AMP_ID_BREDR)
Mat Martineau1500109b2012-10-23 15:24:15 -07004680 l2cap_logical_finish_create(chan, hchan);
4681 } else {
4682 l2cap_logical_finish_move(chan, hchan);
4683 }
Mat Martineau5b155ef2012-10-23 15:24:14 -07004684}
4685
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004686void l2cap_move_start(struct l2cap_chan *chan)
4687{
4688 BT_DBG("chan %p", chan);
4689
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004690 if (chan->local_amp_id == AMP_ID_BREDR) {
Mat Martineau3f7a56c2012-10-23 15:24:23 -07004691 if (chan->chan_policy != BT_CHANNEL_POLICY_AMP_PREFERRED)
4692 return;
4693 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4694 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4695 /* Placeholder - start physical link setup */
4696 } else {
4697 chan->move_role = L2CAP_MOVE_ROLE_INITIATOR;
4698 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
4699 chan->move_id = 0;
4700 l2cap_move_setup(chan);
4701 l2cap_send_move_chan_req(chan, 0);
4702 }
4703}
4704
Mat Martineau8eb200b2012-10-23 15:24:17 -07004705static void l2cap_do_create(struct l2cap_chan *chan, int result,
4706 u8 local_amp_id, u8 remote_amp_id)
4707{
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004708 BT_DBG("chan %p state %s %u -> %u", chan, state_to_string(chan->state),
4709 local_amp_id, remote_amp_id);
4710
Andrei Emeltchenko12d6cc62012-11-14 17:39:31 +02004711 chan->fcs = L2CAP_FCS_NONE;
4712
Andrei Emeltchenko62748ca2012-11-20 17:16:19 +02004713 /* Outgoing channel on AMP */
4714 if (chan->state == BT_CONNECT) {
4715 if (result == L2CAP_CR_SUCCESS) {
4716 chan->local_amp_id = local_amp_id;
4717 l2cap_send_create_chan_req(chan, remote_amp_id);
4718 } else {
4719 /* Revert to BR/EDR connect */
4720 l2cap_send_conn_req(chan);
4721 }
4722
4723 return;
4724 }
4725
4726 /* Incoming channel on AMP */
4727 if (__l2cap_no_conn_pending(chan)) {
Mat Martineau8eb200b2012-10-23 15:24:17 -07004728 struct l2cap_conn_rsp rsp;
4729 char buf[128];
4730 rsp.scid = cpu_to_le16(chan->dcid);
4731 rsp.dcid = cpu_to_le16(chan->scid);
4732
Mat Martineau8eb200b2012-10-23 15:24:17 -07004733 if (result == L2CAP_CR_SUCCESS) {
4734 /* Send successful response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004735 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
4736 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004737 } else {
4738 /* Send negative response */
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004739 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
4740 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004741 }
4742
4743 l2cap_send_cmd(chan->conn, chan->ident, L2CAP_CREATE_CHAN_RSP,
4744 sizeof(rsp), &rsp);
4745
4746 if (result == L2CAP_CR_SUCCESS) {
Gustavo Padovanf93fa272013-10-21 14:21:40 -02004747 l2cap_state_change(chan, BT_CONFIG);
Mat Martineau8eb200b2012-10-23 15:24:17 -07004748 set_bit(CONF_REQ_SENT, &chan->conf_state);
4749 l2cap_send_cmd(chan->conn, l2cap_get_ident(chan->conn),
4750 L2CAP_CONF_REQ,
4751 l2cap_build_conf_req(chan, buf), buf);
4752 chan->num_conf_req++;
4753 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004754 }
4755}
4756
4757static void l2cap_do_move_initiate(struct l2cap_chan *chan, u8 local_amp_id,
4758 u8 remote_amp_id)
4759{
4760 l2cap_move_setup(chan);
4761 chan->move_id = local_amp_id;
4762 chan->move_state = L2CAP_MOVE_WAIT_RSP;
4763
4764 l2cap_send_move_chan_req(chan, remote_amp_id);
4765}
4766
4767static void l2cap_do_move_respond(struct l2cap_chan *chan, int result)
4768{
4769 struct hci_chan *hchan = NULL;
4770
4771 /* Placeholder - get hci_chan for logical link */
4772
4773 if (hchan) {
4774 if (hchan->state == BT_CONNECTED) {
4775 /* Logical link is ready to go */
4776 chan->hs_hcon = hchan->conn;
4777 chan->hs_hcon->l2cap_data = chan->conn;
4778 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4779 l2cap_send_move_chan_rsp(chan, L2CAP_MR_SUCCESS);
4780
4781 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
4782 } else {
4783 /* Wait for logical link to be ready */
4784 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4785 }
4786 } else {
4787 /* Logical link not available */
4788 l2cap_send_move_chan_rsp(chan, L2CAP_MR_NOT_ALLOWED);
4789 }
4790}
4791
4792static void l2cap_do_move_cancel(struct l2cap_chan *chan, int result)
4793{
4794 if (chan->move_role == L2CAP_MOVE_ROLE_RESPONDER) {
4795 u8 rsp_result;
4796 if (result == -EINVAL)
4797 rsp_result = L2CAP_MR_BAD_ID;
4798 else
4799 rsp_result = L2CAP_MR_NOT_ALLOWED;
4800
4801 l2cap_send_move_chan_rsp(chan, rsp_result);
4802 }
4803
4804 chan->move_role = L2CAP_MOVE_ROLE_NONE;
4805 chan->move_state = L2CAP_MOVE_STABLE;
4806
4807 /* Restart data transmission */
4808 l2cap_ertm_send(chan);
4809}
4810
Andrei Emeltchenkoa514b172012-11-14 17:39:30 +02004811/* Invoke with locked chan */
4812void __l2cap_physical_cfm(struct l2cap_chan *chan, int result)
Mat Martineau8eb200b2012-10-23 15:24:17 -07004813{
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004814 u8 local_amp_id = chan->local_amp_id;
Andrei Emeltchenkofffadc02012-11-01 15:37:03 +02004815 u8 remote_amp_id = chan->remote_amp_id;
Andrei Emeltchenko770bfef2012-10-31 15:46:29 +02004816
Mat Martineau8eb200b2012-10-23 15:24:17 -07004817 BT_DBG("chan %p, result %d, local_amp_id %d, remote_amp_id %d",
4818 chan, result, local_amp_id, remote_amp_id);
4819
Mat Martineau8eb200b2012-10-23 15:24:17 -07004820 if (chan->state == BT_DISCONN || chan->state == BT_CLOSED) {
4821 l2cap_chan_unlock(chan);
4822 return;
4823 }
4824
4825 if (chan->state != BT_CONNECTED) {
4826 l2cap_do_create(chan, result, local_amp_id, remote_amp_id);
4827 } else if (result != L2CAP_MR_SUCCESS) {
4828 l2cap_do_move_cancel(chan, result);
4829 } else {
4830 switch (chan->move_role) {
4831 case L2CAP_MOVE_ROLE_INITIATOR:
4832 l2cap_do_move_initiate(chan, local_amp_id,
4833 remote_amp_id);
4834 break;
4835 case L2CAP_MOVE_ROLE_RESPONDER:
4836 l2cap_do_move_respond(chan, result);
4837 break;
4838 default:
4839 l2cap_do_move_cancel(chan, result);
4840 break;
4841 }
4842 }
Mat Martineau8eb200b2012-10-23 15:24:17 -07004843}
4844
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004845static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004846 struct l2cap_cmd_hdr *cmd,
4847 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004848{
4849 struct l2cap_move_chan_req *req = data;
Mat Martineau1500109b2012-10-23 15:24:15 -07004850 struct l2cap_move_chan_rsp rsp;
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004851 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004852 u16 icid = 0;
4853 u16 result = L2CAP_MR_NOT_ALLOWED;
4854
4855 if (cmd_len != sizeof(*req))
4856 return -EPROTO;
4857
4858 icid = le16_to_cpu(req->icid);
4859
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03004860 BT_DBG("icid 0x%4.4x, dest_amp_id %d", icid, req->dest_amp_id);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004861
Marcel Holtmann848566b2013-10-01 22:59:22 -07004862 if (!conn->hs_enabled)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004863 return -EINVAL;
4864
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004865 chan = l2cap_get_chan_by_dcid(conn, icid);
4866 if (!chan) {
Mat Martineau1500109b2012-10-23 15:24:15 -07004867 rsp.icid = cpu_to_le16(icid);
Joe Perchesdcf4adb2014-03-12 10:52:35 -07004868 rsp.result = cpu_to_le16(L2CAP_MR_NOT_ALLOWED);
Mat Martineau1500109b2012-10-23 15:24:15 -07004869 l2cap_send_cmd(conn, cmd->ident, L2CAP_MOVE_CHAN_RSP,
4870 sizeof(rsp), &rsp);
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004871 return 0;
4872 }
4873
Mat Martineau1500109b2012-10-23 15:24:15 -07004874 chan->ident = cmd->ident;
4875
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004876 if (chan->scid < L2CAP_CID_DYN_START ||
4877 chan->chan_policy == BT_CHANNEL_POLICY_BREDR_ONLY ||
4878 (chan->mode != L2CAP_MODE_ERTM &&
4879 chan->mode != L2CAP_MODE_STREAMING)) {
4880 result = L2CAP_MR_NOT_ALLOWED;
4881 goto send_move_response;
4882 }
4883
4884 if (chan->local_amp_id == req->dest_amp_id) {
4885 result = L2CAP_MR_SAME_ID;
4886 goto send_move_response;
4887 }
4888
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004889 if (req->dest_amp_id != AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004890 struct hci_dev *hdev;
4891 hdev = hci_dev_get(req->dest_amp_id);
4892 if (!hdev || hdev->dev_type != HCI_AMP ||
4893 !test_bit(HCI_UP, &hdev->flags)) {
4894 if (hdev)
4895 hci_dev_put(hdev);
4896
4897 result = L2CAP_MR_BAD_ID;
4898 goto send_move_response;
4899 }
4900 hci_dev_put(hdev);
4901 }
4902
4903 /* Detect a move collision. Only send a collision response
4904 * if this side has "lost", otherwise proceed with the move.
4905 * The winner has the larger bd_addr.
4906 */
4907 if ((__chan_is_moving(chan) ||
4908 chan->move_role != L2CAP_MOVE_ROLE_NONE) &&
Marcel Holtmann6f59b902013-10-13 05:24:01 -07004909 bacmp(&conn->hcon->src, &conn->hcon->dst) > 0) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004910 result = L2CAP_MR_COLLISION;
4911 goto send_move_response;
4912 }
4913
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004914 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
4915 l2cap_move_setup(chan);
4916 chan->move_id = req->dest_amp_id;
4917 icid = chan->dcid;
4918
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07004919 if (req->dest_amp_id == AMP_ID_BREDR) {
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004920 /* Moving to BR/EDR */
4921 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
4922 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4923 result = L2CAP_MR_PEND;
4924 } else {
4925 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM;
4926 result = L2CAP_MR_SUCCESS;
4927 }
4928 } else {
4929 chan->move_state = L2CAP_MOVE_WAIT_PREPARE;
4930 /* Placeholder - uncomment when amp functions are available */
4931 /*amp_accept_physical(chan, req->dest_amp_id);*/
4932 result = L2CAP_MR_PEND;
4933 }
4934
4935send_move_response:
Mat Martineau1500109b2012-10-23 15:24:15 -07004936 l2cap_send_move_chan_rsp(chan, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004937
Mat Martineau02b0fbb2012-10-23 15:24:10 -07004938 l2cap_chan_unlock(chan);
4939
Mat Martineau8d5a04a2011-11-02 16:18:35 -07004940 return 0;
4941}
4942
Mat Martineau5b155ef2012-10-23 15:24:14 -07004943static void l2cap_move_continue(struct l2cap_conn *conn, u16 icid, u16 result)
4944{
4945 struct l2cap_chan *chan;
4946 struct hci_chan *hchan = NULL;
4947
4948 chan = l2cap_get_chan_by_scid(conn, icid);
4949 if (!chan) {
4950 l2cap_send_move_chan_cfm_icid(conn, icid);
4951 return;
4952 }
4953
4954 __clear_chan_timer(chan);
4955 if (result == L2CAP_MR_PEND)
4956 __set_chan_timer(chan, L2CAP_MOVE_ERTX_TIMEOUT);
4957
4958 switch (chan->move_state) {
4959 case L2CAP_MOVE_WAIT_LOGICAL_COMP:
4960 /* Move confirm will be sent when logical link
4961 * is complete.
4962 */
4963 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4964 break;
4965 case L2CAP_MOVE_WAIT_RSP_SUCCESS:
4966 if (result == L2CAP_MR_PEND) {
4967 break;
4968 } else if (test_bit(CONN_LOCAL_BUSY,
4969 &chan->conn_state)) {
4970 chan->move_state = L2CAP_MOVE_WAIT_LOCAL_BUSY;
4971 } else {
4972 /* Logical link is up or moving to BR/EDR,
4973 * proceed with move
4974 */
4975 chan->move_state = L2CAP_MOVE_WAIT_CONFIRM_RSP;
4976 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
4977 }
4978 break;
4979 case L2CAP_MOVE_WAIT_RSP:
4980 /* Moving to AMP */
4981 if (result == L2CAP_MR_SUCCESS) {
4982 /* Remote is ready, send confirm immediately
4983 * after logical link is ready
4984 */
4985 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_CFM;
4986 } else {
4987 /* Both logical link and move success
4988 * are required to confirm
4989 */
4990 chan->move_state = L2CAP_MOVE_WAIT_LOGICAL_COMP;
4991 }
4992
4993 /* Placeholder - get hci_chan for logical link */
4994 if (!hchan) {
4995 /* Logical link not available */
4996 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
4997 break;
4998 }
4999
5000 /* If the logical link is not yet connected, do not
5001 * send confirmation.
5002 */
5003 if (hchan->state != BT_CONNECTED)
5004 break;
5005
5006 /* Logical link is already ready to go */
5007
5008 chan->hs_hcon = hchan->conn;
5009 chan->hs_hcon->l2cap_data = chan->conn;
5010
5011 if (result == L2CAP_MR_SUCCESS) {
5012 /* Can confirm now */
5013 l2cap_send_move_chan_cfm(chan, L2CAP_MC_CONFIRMED);
5014 } else {
5015 /* Now only need move success
5016 * to confirm
5017 */
5018 chan->move_state = L2CAP_MOVE_WAIT_RSP_SUCCESS;
5019 }
5020
5021 l2cap_logical_cfm(chan, hchan, L2CAP_MR_SUCCESS);
5022 break;
5023 default:
5024 /* Any other amp move state means the move failed. */
5025 chan->move_id = chan->local_amp_id;
5026 l2cap_move_done(chan);
5027 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5028 }
5029
5030 l2cap_chan_unlock(chan);
5031}
5032
5033static void l2cap_move_fail(struct l2cap_conn *conn, u8 ident, u16 icid,
5034 u16 result)
5035{
5036 struct l2cap_chan *chan;
5037
5038 chan = l2cap_get_chan_by_ident(conn, ident);
5039 if (!chan) {
5040 /* Could not locate channel, icid is best guess */
5041 l2cap_send_move_chan_cfm_icid(conn, icid);
5042 return;
5043 }
5044
5045 __clear_chan_timer(chan);
5046
5047 if (chan->move_role == L2CAP_MOVE_ROLE_INITIATOR) {
5048 if (result == L2CAP_MR_COLLISION) {
5049 chan->move_role = L2CAP_MOVE_ROLE_RESPONDER;
5050 } else {
5051 /* Cleanup - cancel move */
5052 chan->move_id = chan->local_amp_id;
5053 l2cap_move_done(chan);
5054 }
5055 }
5056
5057 l2cap_send_move_chan_cfm(chan, L2CAP_MC_UNCONFIRMED);
5058
5059 l2cap_chan_unlock(chan);
5060}
5061
5062static int l2cap_move_channel_rsp(struct l2cap_conn *conn,
5063 struct l2cap_cmd_hdr *cmd,
5064 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005065{
5066 struct l2cap_move_chan_rsp *rsp = data;
5067 u16 icid, result;
5068
5069 if (cmd_len != sizeof(*rsp))
5070 return -EPROTO;
5071
5072 icid = le16_to_cpu(rsp->icid);
5073 result = le16_to_cpu(rsp->result);
5074
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005075 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005076
Mat Martineau5b155ef2012-10-23 15:24:14 -07005077 if (result == L2CAP_MR_SUCCESS || result == L2CAP_MR_PEND)
5078 l2cap_move_continue(conn, icid, result);
5079 else
5080 l2cap_move_fail(conn, cmd->ident, icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005081
5082 return 0;
5083}
5084
Mat Martineau5f3847a2012-10-23 15:24:12 -07005085static int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5086 struct l2cap_cmd_hdr *cmd,
5087 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005088{
5089 struct l2cap_move_chan_cfm *cfm = data;
Mat Martineau5f3847a2012-10-23 15:24:12 -07005090 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005091 u16 icid, result;
5092
5093 if (cmd_len != sizeof(*cfm))
5094 return -EPROTO;
5095
5096 icid = le16_to_cpu(cfm->icid);
5097 result = le16_to_cpu(cfm->result);
5098
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005099 BT_DBG("icid 0x%4.4x, result 0x%4.4x", icid, result);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005100
Mat Martineau5f3847a2012-10-23 15:24:12 -07005101 chan = l2cap_get_chan_by_dcid(conn, icid);
5102 if (!chan) {
5103 /* Spec requires a response even if the icid was not found */
5104 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5105 return 0;
5106 }
5107
5108 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM) {
5109 if (result == L2CAP_MC_CONFIRMED) {
5110 chan->local_amp_id = chan->move_id;
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005111 if (chan->local_amp_id == AMP_ID_BREDR)
Mat Martineau5f3847a2012-10-23 15:24:12 -07005112 __release_logical_link(chan);
5113 } else {
5114 chan->move_id = chan->local_amp_id;
5115 }
5116
5117 l2cap_move_done(chan);
5118 }
5119
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005120 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5121
Mat Martineau5f3847a2012-10-23 15:24:12 -07005122 l2cap_chan_unlock(chan);
5123
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005124 return 0;
5125}
5126
5127static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005128 struct l2cap_cmd_hdr *cmd,
5129 u16 cmd_len, void *data)
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005130{
5131 struct l2cap_move_chan_cfm_rsp *rsp = data;
Mat Martineau3fd71a02012-10-23 15:24:16 -07005132 struct l2cap_chan *chan;
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005133 u16 icid;
5134
5135 if (cmd_len != sizeof(*rsp))
5136 return -EPROTO;
5137
5138 icid = le16_to_cpu(rsp->icid);
5139
Andrei Emeltchenkoad0ac6c2012-07-10 15:27:50 +03005140 BT_DBG("icid 0x%4.4x", icid);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005141
Mat Martineau3fd71a02012-10-23 15:24:16 -07005142 chan = l2cap_get_chan_by_scid(conn, icid);
5143 if (!chan)
5144 return 0;
5145
5146 __clear_chan_timer(chan);
5147
5148 if (chan->move_state == L2CAP_MOVE_WAIT_CONFIRM_RSP) {
5149 chan->local_amp_id = chan->move_id;
5150
Marcel Holtmann6ed971c2013-10-05 11:47:44 -07005151 if (chan->local_amp_id == AMP_ID_BREDR && chan->hs_hchan)
Mat Martineau3fd71a02012-10-23 15:24:16 -07005152 __release_logical_link(chan);
5153
5154 l2cap_move_done(chan);
5155 }
5156
5157 l2cap_chan_unlock(chan);
5158
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005159 return 0;
5160}
5161
Claudio Takahaside731152011-02-11 19:28:55 -02005162static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005163 struct l2cap_cmd_hdr *cmd,
Johan Hedberg203e6392013-05-15 10:07:15 +03005164 u16 cmd_len, u8 *data)
Claudio Takahaside731152011-02-11 19:28:55 -02005165{
5166 struct hci_conn *hcon = conn->hcon;
5167 struct l2cap_conn_param_update_req *req;
5168 struct l2cap_conn_param_update_rsp rsp;
Johan Hedberg203e6392013-05-15 10:07:15 +03005169 u16 min, max, latency, to_multiplier;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005170 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005171
Johan Hedberg40bef302014-07-16 11:42:27 +03005172 if (hcon->role != HCI_ROLE_MASTER)
Claudio Takahaside731152011-02-11 19:28:55 -02005173 return -EINVAL;
5174
Claudio Takahaside731152011-02-11 19:28:55 -02005175 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5176 return -EPROTO;
5177
5178 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005179 min = __le16_to_cpu(req->min);
5180 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005181 latency = __le16_to_cpu(req->latency);
5182 to_multiplier = __le16_to_cpu(req->to_multiplier);
5183
5184 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
Gustavo Padovan2d792812012-10-06 10:07:01 +01005185 min, max, latency, to_multiplier);
Claudio Takahaside731152011-02-11 19:28:55 -02005186
5187 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005188
Andre Guedesd4905f22014-06-25 21:52:52 -03005189 err = hci_check_conn_params(min, max, latency, to_multiplier);
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005190 if (err)
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005191 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005192 else
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005193 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
Claudio Takahaside731152011-02-11 19:28:55 -02005194
5195 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005196 sizeof(rsp), &rsp);
Claudio Takahaside731152011-02-11 19:28:55 -02005197
Andre Guedesffb5a8272014-07-01 18:10:11 -03005198 if (!err) {
Johan Hedbergf4869e22014-07-02 17:37:32 +03005199 u8 store_hint;
Andre Guedesffb5a8272014-07-01 18:10:11 -03005200
Johan Hedbergf4869e22014-07-02 17:37:32 +03005201 store_hint = hci_le_conn_update(hcon, min, max, latency,
5202 to_multiplier);
5203 mgmt_new_conn_param(hcon->hdev, &hcon->dst, hcon->dst_type,
5204 store_hint, min, max, latency,
5205 to_multiplier);
5206
Andre Guedesffb5a8272014-07-01 18:10:11 -03005207 }
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005208
Claudio Takahaside731152011-02-11 19:28:55 -02005209 return 0;
5210}
5211
Johan Hedbergf1496de2013-05-13 14:15:56 +03005212static int l2cap_le_connect_rsp(struct l2cap_conn *conn,
5213 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5214 u8 *data)
5215{
5216 struct l2cap_le_conn_rsp *rsp = (struct l2cap_le_conn_rsp *) data;
5217 u16 dcid, mtu, mps, credits, result;
5218 struct l2cap_chan *chan;
5219 int err;
5220
5221 if (cmd_len < sizeof(*rsp))
5222 return -EPROTO;
5223
5224 dcid = __le16_to_cpu(rsp->dcid);
5225 mtu = __le16_to_cpu(rsp->mtu);
5226 mps = __le16_to_cpu(rsp->mps);
5227 credits = __le16_to_cpu(rsp->credits);
5228 result = __le16_to_cpu(rsp->result);
5229
5230 if (result == L2CAP_CR_SUCCESS && (mtu < 23 || mps < 23))
5231 return -EPROTO;
5232
5233 BT_DBG("dcid 0x%4.4x mtu %u mps %u credits %u result 0x%2.2x",
5234 dcid, mtu, mps, credits, result);
5235
5236 mutex_lock(&conn->chan_lock);
5237
5238 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5239 if (!chan) {
5240 err = -EBADSLT;
5241 goto unlock;
5242 }
5243
5244 err = 0;
5245
5246 l2cap_chan_lock(chan);
5247
5248 switch (result) {
5249 case L2CAP_CR_SUCCESS:
5250 chan->ident = 0;
5251 chan->dcid = dcid;
5252 chan->omtu = mtu;
5253 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005254 chan->tx_credits = credits;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005255 l2cap_chan_ready(chan);
5256 break;
5257
5258 default:
5259 l2cap_chan_del(chan, ECONNREFUSED);
5260 break;
5261 }
5262
5263 l2cap_chan_unlock(chan);
5264
5265unlock:
5266 mutex_unlock(&conn->chan_lock);
5267
5268 return err;
5269}
5270
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005271static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005272 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5273 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005274{
5275 int err = 0;
5276
5277 switch (cmd->code) {
5278 case L2CAP_COMMAND_REJ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005279 l2cap_command_rej(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005280 break;
5281
5282 case L2CAP_CONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005283 err = l2cap_connect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005284 break;
5285
5286 case L2CAP_CONN_RSP:
Mat Martineauf5a25982012-10-11 17:48:21 +03005287 case L2CAP_CREATE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005288 l2cap_connect_create_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005289 break;
5290
5291 case L2CAP_CONF_REQ:
5292 err = l2cap_config_req(conn, cmd, cmd_len, data);
5293 break;
5294
5295 case L2CAP_CONF_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005296 l2cap_config_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005297 break;
5298
5299 case L2CAP_DISCONN_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005300 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005301 break;
5302
5303 case L2CAP_DISCONN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005304 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005305 break;
5306
5307 case L2CAP_ECHO_REQ:
5308 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5309 break;
5310
5311 case L2CAP_ECHO_RSP:
5312 break;
5313
5314 case L2CAP_INFO_REQ:
Johan Hedbergcb3b3152013-05-28 13:46:30 +03005315 err = l2cap_information_req(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005316 break;
5317
5318 case L2CAP_INFO_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005319 l2cap_information_rsp(conn, cmd, cmd_len, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005320 break;
5321
Mat Martineauf94ff6f2011-11-02 16:18:32 -07005322 case L2CAP_CREATE_CHAN_REQ:
5323 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
5324 break;
5325
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005326 case L2CAP_MOVE_CHAN_REQ:
5327 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
5328 break;
5329
5330 case L2CAP_MOVE_CHAN_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005331 l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005332 break;
5333
5334 case L2CAP_MOVE_CHAN_CFM:
5335 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
5336 break;
5337
5338 case L2CAP_MOVE_CHAN_CFM_RSP:
Johan Hedberg9245e732013-09-16 13:05:17 +03005339 l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
Mat Martineau8d5a04a2011-11-02 16:18:35 -07005340 break;
5341
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005342 default:
5343 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5344 err = -EINVAL;
5345 break;
5346 }
5347
5348 return err;
5349}
5350
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005351static int l2cap_le_connect_req(struct l2cap_conn *conn,
5352 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5353 u8 *data)
5354{
5355 struct l2cap_le_conn_req *req = (struct l2cap_le_conn_req *) data;
5356 struct l2cap_le_conn_rsp rsp;
5357 struct l2cap_chan *chan, *pchan;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005358 u16 dcid, scid, credits, mtu, mps;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005359 __le16 psm;
5360 u8 result;
5361
5362 if (cmd_len != sizeof(*req))
5363 return -EPROTO;
5364
5365 scid = __le16_to_cpu(req->scid);
5366 mtu = __le16_to_cpu(req->mtu);
5367 mps = __le16_to_cpu(req->mps);
5368 psm = req->psm;
5369 dcid = 0;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005370 credits = 0;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005371
5372 if (mtu < 23 || mps < 23)
5373 return -EPROTO;
5374
5375 BT_DBG("psm 0x%2.2x scid 0x%4.4x mtu %u mps %u", __le16_to_cpu(psm),
5376 scid, mtu, mps);
5377
5378 /* Check if we have socket listening on psm */
5379 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, &conn->hcon->src,
5380 &conn->hcon->dst, LE_LINK);
5381 if (!pchan) {
5382 result = L2CAP_CR_BAD_PSM;
5383 chan = NULL;
5384 goto response;
5385 }
5386
5387 mutex_lock(&conn->chan_lock);
5388 l2cap_chan_lock(pchan);
5389
5390 if (!smp_sufficient_security(conn->hcon, pchan->sec_level)) {
5391 result = L2CAP_CR_AUTHENTICATION;
5392 chan = NULL;
5393 goto response_unlock;
5394 }
5395
5396 /* Check if we already have channel with that dcid */
5397 if (__l2cap_get_chan_by_dcid(conn, scid)) {
5398 result = L2CAP_CR_NO_MEM;
5399 chan = NULL;
5400 goto response_unlock;
5401 }
5402
5403 chan = pchan->ops->new_connection(pchan);
5404 if (!chan) {
5405 result = L2CAP_CR_NO_MEM;
5406 goto response_unlock;
5407 }
5408
Johan Hedberg0ce43ce2013-12-05 14:55:33 +02005409 l2cap_le_flowctl_init(chan);
5410
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005411 bacpy(&chan->src, &conn->hcon->src);
5412 bacpy(&chan->dst, &conn->hcon->dst);
5413 chan->src_type = bdaddr_type(conn->hcon, conn->hcon->src_type);
5414 chan->dst_type = bdaddr_type(conn->hcon, conn->hcon->dst_type);
5415 chan->psm = psm;
5416 chan->dcid = scid;
5417 chan->omtu = mtu;
5418 chan->remote_mps = mps;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005419 chan->tx_credits = __le16_to_cpu(req->credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005420
5421 __l2cap_chan_add(conn, chan);
5422 dcid = chan->scid;
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005423 credits = chan->rx_credits;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005424
5425 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
5426
5427 chan->ident = cmd->ident;
5428
5429 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
5430 l2cap_state_change(chan, BT_CONNECT2);
5431 result = L2CAP_CR_PEND;
5432 chan->ops->defer(chan);
5433 } else {
5434 l2cap_chan_ready(chan);
5435 result = L2CAP_CR_SUCCESS;
5436 }
5437
5438response_unlock:
5439 l2cap_chan_unlock(pchan);
5440 mutex_unlock(&conn->chan_lock);
Johan Hedberga24cce12014-08-07 22:56:42 +03005441 l2cap_chan_put(pchan);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005442
5443 if (result == L2CAP_CR_PEND)
5444 return 0;
5445
5446response:
5447 if (chan) {
5448 rsp.mtu = cpu_to_le16(chan->imtu);
Johan Hedberg3916aed2013-10-07 15:35:26 +02005449 rsp.mps = cpu_to_le16(chan->mps);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005450 } else {
5451 rsp.mtu = 0;
5452 rsp.mps = 0;
5453 }
5454
5455 rsp.dcid = cpu_to_le16(dcid);
Johan Hedberg0cd75f72013-05-17 13:09:05 +03005456 rsp.credits = cpu_to_le16(credits);
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005457 rsp.result = cpu_to_le16(result);
5458
5459 l2cap_send_cmd(conn, cmd->ident, L2CAP_LE_CONN_RSP, sizeof(rsp), &rsp);
5460
5461 return 0;
5462}
5463
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005464static inline int l2cap_le_credits(struct l2cap_conn *conn,
5465 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5466 u8 *data)
5467{
5468 struct l2cap_le_credits *pkt;
5469 struct l2cap_chan *chan;
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005470 u16 cid, credits, max_credits;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005471
5472 if (cmd_len != sizeof(*pkt))
5473 return -EPROTO;
5474
5475 pkt = (struct l2cap_le_credits *) data;
5476 cid = __le16_to_cpu(pkt->cid);
5477 credits = __le16_to_cpu(pkt->credits);
5478
5479 BT_DBG("cid 0x%4.4x credits 0x%4.4x", cid, credits);
5480
5481 chan = l2cap_get_chan_by_dcid(conn, cid);
5482 if (!chan)
5483 return -EBADSLT;
5484
Johan Hedberg0f1bfe42014-01-27 15:11:35 -08005485 max_credits = LE_FLOWCTL_MAX_CREDITS - chan->tx_credits;
5486 if (credits > max_credits) {
5487 BT_ERR("LE credits overflow");
5488 l2cap_send_disconn_req(chan, ECONNRESET);
5489
5490 /* Return 0 so that we don't trigger an unnecessary
5491 * command reject packet.
5492 */
5493 return 0;
5494 }
5495
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005496 chan->tx_credits += credits;
5497
5498 while (chan->tx_credits && !skb_queue_empty(&chan->tx_q)) {
5499 l2cap_do_send(chan, skb_dequeue(&chan->tx_q));
5500 chan->tx_credits--;
5501 }
5502
5503 if (chan->tx_credits)
5504 chan->ops->resume(chan);
5505
5506 l2cap_chan_unlock(chan);
5507
5508 return 0;
5509}
5510
Johan Hedberg71fb4192013-12-10 10:52:48 +02005511static inline int l2cap_le_command_rej(struct l2cap_conn *conn,
5512 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5513 u8 *data)
5514{
5515 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
5516 struct l2cap_chan *chan;
5517
5518 if (cmd_len < sizeof(*rej))
5519 return -EPROTO;
5520
5521 mutex_lock(&conn->chan_lock);
5522
5523 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
5524 if (!chan)
5525 goto done;
5526
5527 l2cap_chan_lock(chan);
5528 l2cap_chan_del(chan, ECONNREFUSED);
5529 l2cap_chan_unlock(chan);
5530
5531done:
5532 mutex_unlock(&conn->chan_lock);
5533 return 0;
5534}
5535
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005536static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
Johan Hedberg203e6392013-05-15 10:07:15 +03005537 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
5538 u8 *data)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005539{
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005540 int err = 0;
5541
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005542 switch (cmd->code) {
5543 case L2CAP_COMMAND_REJ:
Johan Hedberg71fb4192013-12-10 10:52:48 +02005544 l2cap_le_command_rej(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005545 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005546
5547 case L2CAP_CONN_PARAM_UPDATE_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005548 err = l2cap_conn_param_update_req(conn, cmd, cmd_len, data);
5549 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005550
5551 case L2CAP_CONN_PARAM_UPDATE_RSP:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005552 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005553
Johan Hedbergf1496de2013-05-13 14:15:56 +03005554 case L2CAP_LE_CONN_RSP:
5555 l2cap_le_connect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005556 break;
Johan Hedbergf1496de2013-05-13 14:15:56 +03005557
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005558 case L2CAP_LE_CONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005559 err = l2cap_le_connect_req(conn, cmd, cmd_len, data);
5560 break;
Johan Hedberg27e2d4c2013-05-14 13:27:21 +03005561
Johan Hedbergfad5fc82013-12-05 09:45:01 +02005562 case L2CAP_LE_CREDITS:
5563 err = l2cap_le_credits(conn, cmd, cmd_len, data);
5564 break;
5565
Johan Hedberg3defe012013-05-15 10:16:06 +03005566 case L2CAP_DISCONN_REQ:
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005567 err = l2cap_disconnect_req(conn, cmd, cmd_len, data);
5568 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005569
5570 case L2CAP_DISCONN_RSP:
5571 l2cap_disconnect_rsp(conn, cmd, cmd_len, data);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005572 break;
Johan Hedberg3defe012013-05-15 10:16:06 +03005573
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005574 default:
5575 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005576 err = -EINVAL;
5577 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005578 }
Johan Hedbergb5ecba62013-12-02 12:21:29 +02005579
5580 return err;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005581}
5582
Johan Hedbergc5623552013-04-29 19:35:33 +03005583static inline void l2cap_le_sig_channel(struct l2cap_conn *conn,
5584 struct sk_buff *skb)
5585{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005586 struct hci_conn *hcon = conn->hcon;
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005587 struct l2cap_cmd_hdr *cmd;
5588 u16 len;
Johan Hedbergc5623552013-04-29 19:35:33 +03005589 int err;
5590
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005591 if (hcon->type != LE_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005592 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005593
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005594 if (skb->len < L2CAP_CMD_HDR_SIZE)
5595 goto drop;
Johan Hedbergc5623552013-04-29 19:35:33 +03005596
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005597 cmd = (void *) skb->data;
5598 skb_pull(skb, L2CAP_CMD_HDR_SIZE);
Johan Hedbergc5623552013-04-29 19:35:33 +03005599
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005600 len = le16_to_cpu(cmd->len);
Johan Hedbergc5623552013-04-29 19:35:33 +03005601
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005602 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd->code, len, cmd->ident);
Johan Hedbergc5623552013-04-29 19:35:33 +03005603
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005604 if (len != skb->len || !cmd->ident) {
5605 BT_DBG("corrupted command");
5606 goto drop;
5607 }
Johan Hedbergc5623552013-04-29 19:35:33 +03005608
Johan Hedberg203e6392013-05-15 10:07:15 +03005609 err = l2cap_le_sig_cmd(conn, cmd, len, skb->data);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005610 if (err) {
5611 struct l2cap_cmd_rej_unk rej;
Johan Hedbergc5623552013-04-29 19:35:33 +03005612
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005613 BT_ERR("Wrong link type (%d)", err);
Johan Hedbergc5623552013-04-29 19:35:33 +03005614
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005615 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Marcel Holtmann4f3e2192013-10-03 01:26:37 -07005616 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
5617 sizeof(rej), &rej);
Johan Hedbergc5623552013-04-29 19:35:33 +03005618 }
5619
Marcel Holtmann3b166292013-10-02 08:28:21 -07005620drop:
Johan Hedbergc5623552013-04-29 19:35:33 +03005621 kfree_skb(skb);
5622}
5623
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005624static inline void l2cap_sig_channel(struct l2cap_conn *conn,
Gustavo Padovan2d792812012-10-06 10:07:01 +01005625 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005626{
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005627 struct hci_conn *hcon = conn->hcon;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005628 u8 *data = skb->data;
5629 int len = skb->len;
5630 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005631 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632
5633 l2cap_raw_recv(conn, skb);
5634
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005635 if (hcon->type != ACL_LINK)
Marcel Holtmann3b166292013-10-02 08:28:21 -07005636 goto drop;
Johan Hedberg69c4e4e2013-09-16 13:05:18 +03005637
Linus Torvalds1da177e2005-04-16 15:20:36 -07005638 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005639 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5641 data += L2CAP_CMD_HDR_SIZE;
5642 len -= L2CAP_CMD_HDR_SIZE;
5643
Al Viro88219a02007-07-29 00:17:25 -07005644 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645
Gustavo Padovan2d792812012-10-06 10:07:01 +01005646 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len,
5647 cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005648
Al Viro88219a02007-07-29 00:17:25 -07005649 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650 BT_DBG("corrupted command");
5651 break;
5652 }
5653
Johan Hedbergc5623552013-04-29 19:35:33 +03005654 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655 if (err) {
Ilia Kolomisnkye2fd3182011-07-10 08:47:44 +03005656 struct l2cap_cmd_rej_unk rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005657
5658 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659
Joe Perchesdcf4adb2014-03-12 10:52:35 -07005660 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
Gustavo Padovan2d792812012-10-06 10:07:01 +01005661 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ,
5662 sizeof(rej), &rej);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663 }
5664
Al Viro88219a02007-07-29 00:17:25 -07005665 data += cmd_len;
5666 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005667 }
5668
Marcel Holtmann3b166292013-10-02 08:28:21 -07005669drop:
Linus Torvalds1da177e2005-04-16 15:20:36 -07005670 kfree_skb(skb);
5671}
5672
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005673static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005674{
5675 u16 our_fcs, rcv_fcs;
Andrei Emeltchenkoe4ca6d92011-10-11 13:37:52 +03005676 int hdr_size;
5677
5678 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
5679 hdr_size = L2CAP_EXT_HDR_SIZE;
5680 else
5681 hdr_size = L2CAP_ENH_HDR_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005682
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03005683 if (chan->fcs == L2CAP_FCS_CRC16) {
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005684 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005685 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5686 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5687
5688 if (our_fcs != rcv_fcs)
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005689 return -EBADMSG;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005690 }
5691 return 0;
5692}
5693
Mat Martineau6ea00482012-05-17 20:53:52 -07005694static void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005695{
Mat Martineaue31f7632012-05-17 20:53:41 -07005696 struct l2cap_ctrl control;
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005697
Mat Martineaue31f7632012-05-17 20:53:41 -07005698 BT_DBG("chan %p", chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005699
Mat Martineaue31f7632012-05-17 20:53:41 -07005700 memset(&control, 0, sizeof(control));
5701 control.sframe = 1;
5702 control.final = 1;
5703 control.reqseq = chan->buffer_seq;
5704 set_bit(CONN_SEND_FBIT, &chan->conn_state);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005705
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005706 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
Mat Martineaue31f7632012-05-17 20:53:41 -07005707 control.super = L2CAP_SUPER_RNR;
5708 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005709 }
5710
Mat Martineaue31f7632012-05-17 20:53:41 -07005711 if (test_and_clear_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
5712 chan->unacked_frames > 0)
5713 __set_retrans_timer(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005714
Mat Martineaue31f7632012-05-17 20:53:41 -07005715 /* Send pending iframes */
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03005716 l2cap_ertm_send(chan);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005717
Gustavo F. Padovane2ab4352011-06-10 21:28:49 -03005718 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
Mat Martineaue31f7632012-05-17 20:53:41 -07005719 test_bit(CONN_SEND_FBIT, &chan->conn_state)) {
5720 /* F-bit wasn't sent in an s-frame or i-frame yet, so
5721 * send it now.
5722 */
5723 control.super = L2CAP_SUPER_RR;
5724 l2cap_send_sframe(chan, &control);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005725 }
5726}
5727
Gustavo Padovan2d792812012-10-06 10:07:01 +01005728static void append_skb_frag(struct sk_buff *skb, struct sk_buff *new_frag,
5729 struct sk_buff **last_frag)
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005730{
Mat Martineau84084a32011-07-22 14:54:00 -07005731 /* skb->len reflects data in skb as well as all fragments
5732 * skb->data_len reflects only data in fragments
5733 */
5734 if (!skb_has_frag_list(skb))
5735 skb_shinfo(skb)->frag_list = new_frag;
5736
5737 new_frag->next = NULL;
5738
5739 (*last_frag)->next = new_frag;
5740 *last_frag = new_frag;
5741
5742 skb->len += new_frag->len;
5743 skb->data_len += new_frag->len;
5744 skb->truesize += new_frag->truesize;
5745}
5746
Mat Martineau4b51dae92012-05-17 20:53:37 -07005747static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb,
5748 struct l2cap_ctrl *control)
Mat Martineau84084a32011-07-22 14:54:00 -07005749{
5750 int err = -EINVAL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005751
Mat Martineau4b51dae92012-05-17 20:53:37 -07005752 switch (control->sar) {
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005753 case L2CAP_SAR_UNSEGMENTED:
Mat Martineau84084a32011-07-22 14:54:00 -07005754 if (chan->sdu)
5755 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005756
Gustavo Padovan80b98022012-05-27 22:27:51 -03005757 err = chan->ops->recv(chan, skb);
Mat Martineau84084a32011-07-22 14:54:00 -07005758 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005759
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005760 case L2CAP_SAR_START:
Mat Martineau84084a32011-07-22 14:54:00 -07005761 if (chan->sdu)
5762 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005763
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005764 chan->sdu_len = get_unaligned_le16(skb->data);
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03005765 skb_pull(skb, L2CAP_SDULEN_SIZE);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005766
Mat Martineau84084a32011-07-22 14:54:00 -07005767 if (chan->sdu_len > chan->imtu) {
5768 err = -EMSGSIZE;
5769 break;
5770 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005771
Mat Martineau84084a32011-07-22 14:54:00 -07005772 if (skb->len >= chan->sdu_len)
5773 break;
5774
5775 chan->sdu = skb;
5776 chan->sdu_last_frag = skb;
5777
5778 skb = NULL;
5779 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005780 break;
5781
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005782 case L2CAP_SAR_CONTINUE:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005783 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005784 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005785
Mat Martineau84084a32011-07-22 14:54:00 -07005786 append_skb_frag(chan->sdu, skb,
5787 &chan->sdu_last_frag);
5788 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005789
Mat Martineau84084a32011-07-22 14:54:00 -07005790 if (chan->sdu->len >= chan->sdu_len)
5791 break;
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005792
Mat Martineau84084a32011-07-22 14:54:00 -07005793 err = 0;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005794 break;
5795
Andrei Emeltchenko7e0ef6e2011-10-11 13:37:45 +03005796 case L2CAP_SAR_END:
Gustavo F. Padovan6f61fd472011-03-25 20:09:37 -03005797 if (!chan->sdu)
Mat Martineau84084a32011-07-22 14:54:00 -07005798 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005799
Mat Martineau84084a32011-07-22 14:54:00 -07005800 append_skb_frag(chan->sdu, skb,
5801 &chan->sdu_last_frag);
5802 skb = NULL;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005803
Mat Martineau84084a32011-07-22 14:54:00 -07005804 if (chan->sdu->len != chan->sdu_len)
5805 break;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005806
Gustavo Padovan80b98022012-05-27 22:27:51 -03005807 err = chan->ops->recv(chan, chan->sdu);
Gustavo F. Padovan4178ba42010-05-01 16:15:45 -03005808
Mat Martineau84084a32011-07-22 14:54:00 -07005809 if (!err) {
5810 /* Reassembly complete */
5811 chan->sdu = NULL;
5812 chan->sdu_last_frag = NULL;
5813 chan->sdu_len = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005814 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005815 break;
5816 }
5817
Mat Martineau84084a32011-07-22 14:54:00 -07005818 if (err) {
5819 kfree_skb(skb);
5820 kfree_skb(chan->sdu);
5821 chan->sdu = NULL;
5822 chan->sdu_last_frag = NULL;
5823 chan->sdu_len = 0;
5824 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005825
Mat Martineau84084a32011-07-22 14:54:00 -07005826 return err;
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005827}
5828
Mat Martineau32b32732012-10-23 15:24:11 -07005829static int l2cap_resegment(struct l2cap_chan *chan)
5830{
5831 /* Placeholder */
5832 return 0;
5833}
5834
Mat Martineaue3281402011-07-07 09:39:02 -07005835void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
Gustavo F. Padovan712132eb2010-06-21 19:39:50 -03005836{
Mat Martineau61aa4f52012-05-17 20:53:40 -07005837 u8 event;
5838
5839 if (chan->mode != L2CAP_MODE_ERTM)
5840 return;
5841
5842 event = busy ? L2CAP_EV_LOCAL_BUSY_DETECTED : L2CAP_EV_LOCAL_BUSY_CLEAR;
Andrei Emeltchenko401bb1f2012-05-21 15:47:46 +03005843 l2cap_tx(chan, NULL, NULL, event);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005844}
5845
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005846static int l2cap_rx_queued_iframes(struct l2cap_chan *chan)
5847{
Mat Martineau63838722012-05-17 20:53:45 -07005848 int err = 0;
5849 /* Pass sequential frames to l2cap_reassemble_sdu()
5850 * until a gap is encountered.
5851 */
5852
5853 BT_DBG("chan %p", chan);
5854
5855 while (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
5856 struct sk_buff *skb;
5857 BT_DBG("Searching for skb with txseq %d (queue len %d)",
5858 chan->buffer_seq, skb_queue_len(&chan->srej_q));
5859
5860 skb = l2cap_ertm_seq_in_queue(&chan->srej_q, chan->buffer_seq);
5861
5862 if (!skb)
5863 break;
5864
5865 skb_unlink(skb, &chan->srej_q);
5866 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
5867 err = l2cap_reassemble_sdu(chan, skb, &bt_cb(skb)->control);
5868 if (err)
5869 break;
5870 }
5871
5872 if (skb_queue_empty(&chan->srej_q)) {
5873 chan->rx_state = L2CAP_RX_STATE_RECV;
5874 l2cap_send_ack(chan);
5875 }
5876
5877 return err;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005878}
5879
5880static void l2cap_handle_srej(struct l2cap_chan *chan,
5881 struct l2cap_ctrl *control)
5882{
Mat Martineauf80842a2012-05-17 20:53:46 -07005883 struct sk_buff *skb;
5884
5885 BT_DBG("chan %p, control %p", chan, control);
5886
5887 if (control->reqseq == chan->next_tx_seq) {
5888 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005889 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005890 return;
5891 }
5892
5893 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5894
5895 if (skb == NULL) {
5896 BT_DBG("Seq %d not available for retransmission",
5897 control->reqseq);
5898 return;
5899 }
5900
5901 if (chan->max_tx != 0 && bt_cb(skb)->control.retries >= chan->max_tx) {
5902 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005903 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineauf80842a2012-05-17 20:53:46 -07005904 return;
5905 }
5906
5907 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5908
5909 if (control->poll) {
5910 l2cap_pass_to_tx(chan, control);
5911
5912 set_bit(CONN_SEND_FBIT, &chan->conn_state);
5913 l2cap_retransmit(chan, control);
5914 l2cap_ertm_send(chan);
5915
5916 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5917 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5918 chan->srej_save_reqseq = control->reqseq;
5919 }
5920 } else {
5921 l2cap_pass_to_tx_fbit(chan, control);
5922
5923 if (control->final) {
5924 if (chan->srej_save_reqseq != control->reqseq ||
5925 !test_and_clear_bit(CONN_SREJ_ACT,
5926 &chan->conn_state))
5927 l2cap_retransmit(chan, control);
5928 } else {
5929 l2cap_retransmit(chan, control);
5930 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F) {
5931 set_bit(CONN_SREJ_ACT, &chan->conn_state);
5932 chan->srej_save_reqseq = control->reqseq;
5933 }
5934 }
5935 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005936}
5937
5938static void l2cap_handle_rej(struct l2cap_chan *chan,
5939 struct l2cap_ctrl *control)
5940{
Mat Martineaufcd289d2012-05-17 20:53:47 -07005941 struct sk_buff *skb;
5942
5943 BT_DBG("chan %p, control %p", chan, control);
5944
5945 if (control->reqseq == chan->next_tx_seq) {
5946 BT_DBG("Invalid reqseq %d, disconnecting", control->reqseq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005947 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005948 return;
5949 }
5950
5951 skb = l2cap_ertm_seq_in_queue(&chan->tx_q, control->reqseq);
5952
5953 if (chan->max_tx && skb &&
5954 bt_cb(skb)->control.retries >= chan->max_tx) {
5955 BT_DBG("Retry limit exceeded (%d)", chan->max_tx);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02005956 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaufcd289d2012-05-17 20:53:47 -07005957 return;
5958 }
5959
5960 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
5961
5962 l2cap_pass_to_tx(chan, control);
5963
5964 if (control->final) {
5965 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
5966 l2cap_retransmit_all(chan, control);
5967 } else {
5968 l2cap_retransmit_all(chan, control);
5969 l2cap_ertm_send(chan);
5970 if (chan->tx_state == L2CAP_TX_STATE_WAIT_F)
5971 set_bit(CONN_REJ_ACT, &chan->conn_state);
5972 }
Mat Martineaud2a7ac52012-05-17 20:53:42 -07005973}
5974
Mat Martineau4b51dae92012-05-17 20:53:37 -07005975static u8 l2cap_classify_txseq(struct l2cap_chan *chan, u16 txseq)
5976{
5977 BT_DBG("chan %p, txseq %d", chan, txseq);
5978
5979 BT_DBG("last_acked_seq %d, expected_tx_seq %d", chan->last_acked_seq,
5980 chan->expected_tx_seq);
5981
5982 if (chan->rx_state == L2CAP_RX_STATE_SREJ_SENT) {
5983 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
Gustavo Padovan2d792812012-10-06 10:07:01 +01005984 chan->tx_win) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07005985 /* See notes below regarding "double poll" and
5986 * invalid packets.
5987 */
5988 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
5989 BT_DBG("Invalid/Ignore - after SREJ");
5990 return L2CAP_TXSEQ_INVALID_IGNORE;
5991 } else {
5992 BT_DBG("Invalid - in window after SREJ sent");
5993 return L2CAP_TXSEQ_INVALID;
5994 }
5995 }
5996
5997 if (chan->srej_list.head == txseq) {
5998 BT_DBG("Expected SREJ");
5999 return L2CAP_TXSEQ_EXPECTED_SREJ;
6000 }
6001
6002 if (l2cap_ertm_seq_in_queue(&chan->srej_q, txseq)) {
6003 BT_DBG("Duplicate SREJ - txseq already stored");
6004 return L2CAP_TXSEQ_DUPLICATE_SREJ;
6005 }
6006
6007 if (l2cap_seq_list_contains(&chan->srej_list, txseq)) {
6008 BT_DBG("Unexpected SREJ - not requested");
6009 return L2CAP_TXSEQ_UNEXPECTED_SREJ;
6010 }
6011 }
6012
6013 if (chan->expected_tx_seq == txseq) {
6014 if (__seq_offset(chan, txseq, chan->last_acked_seq) >=
6015 chan->tx_win) {
6016 BT_DBG("Invalid - txseq outside tx window");
6017 return L2CAP_TXSEQ_INVALID;
6018 } else {
6019 BT_DBG("Expected");
6020 return L2CAP_TXSEQ_EXPECTED;
6021 }
6022 }
6023
6024 if (__seq_offset(chan, txseq, chan->last_acked_seq) <
Gustavo Padovan2d792812012-10-06 10:07:01 +01006025 __seq_offset(chan, chan->expected_tx_seq, chan->last_acked_seq)) {
Mat Martineau4b51dae92012-05-17 20:53:37 -07006026 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6027 return L2CAP_TXSEQ_DUPLICATE;
6028 }
6029
6030 if (__seq_offset(chan, txseq, chan->last_acked_seq) >= chan->tx_win) {
6031 /* A source of invalid packets is a "double poll" condition,
6032 * where delays cause us to send multiple poll packets. If
6033 * the remote stack receives and processes both polls,
6034 * sequence numbers can wrap around in such a way that a
6035 * resent frame has a sequence number that looks like new data
6036 * with a sequence gap. This would trigger an erroneous SREJ
6037 * request.
6038 *
6039 * Fortunately, this is impossible with a tx window that's
6040 * less than half of the maximum sequence number, which allows
6041 * invalid frames to be safely ignored.
6042 *
6043 * With tx window sizes greater than half of the tx window
6044 * maximum, the frame is invalid and cannot be ignored. This
6045 * causes a disconnect.
6046 */
6047
6048 if (chan->tx_win <= ((chan->tx_win_max + 1) >> 1)) {
6049 BT_DBG("Invalid/Ignore - txseq outside tx window");
6050 return L2CAP_TXSEQ_INVALID_IGNORE;
6051 } else {
6052 BT_DBG("Invalid - txseq outside tx window");
6053 return L2CAP_TXSEQ_INVALID;
6054 }
6055 } else {
6056 BT_DBG("Unexpected - txseq indicates missing frames");
6057 return L2CAP_TXSEQ_UNEXPECTED;
6058 }
6059}
6060
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006061static int l2cap_rx_state_recv(struct l2cap_chan *chan,
6062 struct l2cap_ctrl *control,
6063 struct sk_buff *skb, u8 event)
6064{
6065 int err = 0;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006066 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006067
6068 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6069 event);
6070
6071 switch (event) {
6072 case L2CAP_EV_RECV_IFRAME:
6073 switch (l2cap_classify_txseq(chan, control->txseq)) {
6074 case L2CAP_TXSEQ_EXPECTED:
6075 l2cap_pass_to_tx(chan, control);
6076
6077 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6078 BT_DBG("Busy, discarding expected seq %d",
6079 control->txseq);
6080 break;
6081 }
6082
6083 chan->expected_tx_seq = __next_seq(chan,
6084 control->txseq);
6085
6086 chan->buffer_seq = chan->expected_tx_seq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006087 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006088
6089 err = l2cap_reassemble_sdu(chan, skb, control);
6090 if (err)
6091 break;
6092
6093 if (control->final) {
6094 if (!test_and_clear_bit(CONN_REJ_ACT,
6095 &chan->conn_state)) {
6096 control->final = 0;
6097 l2cap_retransmit_all(chan, control);
6098 l2cap_ertm_send(chan);
6099 }
6100 }
6101
6102 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
6103 l2cap_send_ack(chan);
6104 break;
6105 case L2CAP_TXSEQ_UNEXPECTED:
6106 l2cap_pass_to_tx(chan, control);
6107
6108 /* Can't issue SREJ frames in the local busy state.
6109 * Drop this frame, it will be seen as missing
6110 * when local busy is exited.
6111 */
6112 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
6113 BT_DBG("Busy, discarding unexpected seq %d",
6114 control->txseq);
6115 break;
6116 }
6117
6118 /* There was a gap in the sequence, so an SREJ
6119 * must be sent for each missing frame. The
6120 * current frame is stored for later use.
6121 */
6122 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006123 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006124 BT_DBG("Queued %p (queue len %d)", skb,
6125 skb_queue_len(&chan->srej_q));
6126
6127 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
6128 l2cap_seq_list_clear(&chan->srej_list);
6129 l2cap_send_srej(chan, control->txseq);
6130
6131 chan->rx_state = L2CAP_RX_STATE_SREJ_SENT;
6132 break;
6133 case L2CAP_TXSEQ_DUPLICATE:
6134 l2cap_pass_to_tx(chan, control);
6135 break;
6136 case L2CAP_TXSEQ_INVALID_IGNORE:
6137 break;
6138 case L2CAP_TXSEQ_INVALID:
6139 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006140 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006141 break;
6142 }
6143 break;
6144 case L2CAP_EV_RECV_RR:
6145 l2cap_pass_to_tx(chan, control);
6146 if (control->final) {
6147 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6148
Mat Martineaue6a3ee62012-10-23 15:24:22 -07006149 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state) &&
6150 !__chan_is_moving(chan)) {
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006151 control->final = 0;
6152 l2cap_retransmit_all(chan, control);
6153 }
6154
6155 l2cap_ertm_send(chan);
6156 } else if (control->poll) {
6157 l2cap_send_i_or_rr_or_rnr(chan);
6158 } else {
6159 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6160 &chan->conn_state) &&
6161 chan->unacked_frames)
6162 __set_retrans_timer(chan);
6163
6164 l2cap_ertm_send(chan);
6165 }
6166 break;
6167 case L2CAP_EV_RECV_RNR:
6168 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6169 l2cap_pass_to_tx(chan, control);
6170 if (control && control->poll) {
6171 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6172 l2cap_send_rr_or_rnr(chan, 0);
6173 }
6174 __clear_retrans_timer(chan);
6175 l2cap_seq_list_clear(&chan->retrans_list);
6176 break;
6177 case L2CAP_EV_RECV_REJ:
6178 l2cap_handle_rej(chan, control);
6179 break;
6180 case L2CAP_EV_RECV_SREJ:
6181 l2cap_handle_srej(chan, control);
6182 break;
6183 default:
6184 break;
6185 }
6186
6187 if (skb && !skb_in_use) {
6188 BT_DBG("Freeing %p", skb);
6189 kfree_skb(skb);
6190 }
6191
6192 return err;
6193}
6194
6195static int l2cap_rx_state_srej_sent(struct l2cap_chan *chan,
6196 struct l2cap_ctrl *control,
6197 struct sk_buff *skb, u8 event)
6198{
6199 int err = 0;
6200 u16 txseq = control->txseq;
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006201 bool skb_in_use = false;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006202
6203 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6204 event);
6205
6206 switch (event) {
6207 case L2CAP_EV_RECV_IFRAME:
6208 switch (l2cap_classify_txseq(chan, txseq)) {
6209 case L2CAP_TXSEQ_EXPECTED:
6210 /* Keep frame for reassembly later */
6211 l2cap_pass_to_tx(chan, control);
6212 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006213 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006214 BT_DBG("Queued %p (queue len %d)", skb,
6215 skb_queue_len(&chan->srej_q));
6216
6217 chan->expected_tx_seq = __next_seq(chan, txseq);
6218 break;
6219 case L2CAP_TXSEQ_EXPECTED_SREJ:
6220 l2cap_seq_list_pop(&chan->srej_list);
6221
6222 l2cap_pass_to_tx(chan, control);
6223 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006224 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006225 BT_DBG("Queued %p (queue len %d)", skb,
6226 skb_queue_len(&chan->srej_q));
6227
6228 err = l2cap_rx_queued_iframes(chan);
6229 if (err)
6230 break;
6231
6232 break;
6233 case L2CAP_TXSEQ_UNEXPECTED:
6234 /* Got a frame that can't be reassembled yet.
6235 * Save it for later, and send SREJs to cover
6236 * the missing frames.
6237 */
6238 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006239 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006240 BT_DBG("Queued %p (queue len %d)", skb,
6241 skb_queue_len(&chan->srej_q));
6242
6243 l2cap_pass_to_tx(chan, control);
6244 l2cap_send_srej(chan, control->txseq);
6245 break;
6246 case L2CAP_TXSEQ_UNEXPECTED_SREJ:
6247 /* This frame was requested with an SREJ, but
6248 * some expected retransmitted frames are
6249 * missing. Request retransmission of missing
6250 * SREJ'd frames.
6251 */
6252 skb_queue_tail(&chan->srej_q, skb);
Peter Senna Tschudin941247f2013-09-22 20:44:10 +02006253 skb_in_use = true;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006254 BT_DBG("Queued %p (queue len %d)", skb,
6255 skb_queue_len(&chan->srej_q));
6256
6257 l2cap_pass_to_tx(chan, control);
6258 l2cap_send_srej_list(chan, control->txseq);
6259 break;
6260 case L2CAP_TXSEQ_DUPLICATE_SREJ:
6261 /* We've already queued this frame. Drop this copy. */
6262 l2cap_pass_to_tx(chan, control);
6263 break;
6264 case L2CAP_TXSEQ_DUPLICATE:
6265 /* Expecting a later sequence number, so this frame
6266 * was already received. Ignore it completely.
6267 */
6268 break;
6269 case L2CAP_TXSEQ_INVALID_IGNORE:
6270 break;
6271 case L2CAP_TXSEQ_INVALID:
6272 default:
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006273 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006274 break;
6275 }
6276 break;
6277 case L2CAP_EV_RECV_RR:
6278 l2cap_pass_to_tx(chan, control);
6279 if (control->final) {
6280 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6281
6282 if (!test_and_clear_bit(CONN_REJ_ACT,
6283 &chan->conn_state)) {
6284 control->final = 0;
6285 l2cap_retransmit_all(chan, control);
6286 }
6287
6288 l2cap_ertm_send(chan);
6289 } else if (control->poll) {
6290 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6291 &chan->conn_state) &&
6292 chan->unacked_frames) {
6293 __set_retrans_timer(chan);
6294 }
6295
6296 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6297 l2cap_send_srej_tail(chan);
6298 } else {
6299 if (test_and_clear_bit(CONN_REMOTE_BUSY,
6300 &chan->conn_state) &&
6301 chan->unacked_frames)
6302 __set_retrans_timer(chan);
6303
6304 l2cap_send_ack(chan);
6305 }
6306 break;
6307 case L2CAP_EV_RECV_RNR:
6308 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6309 l2cap_pass_to_tx(chan, control);
6310 if (control->poll) {
6311 l2cap_send_srej_tail(chan);
6312 } else {
6313 struct l2cap_ctrl rr_control;
6314 memset(&rr_control, 0, sizeof(rr_control));
6315 rr_control.sframe = 1;
6316 rr_control.super = L2CAP_SUPER_RR;
6317 rr_control.reqseq = chan->buffer_seq;
6318 l2cap_send_sframe(chan, &rr_control);
6319 }
6320
6321 break;
6322 case L2CAP_EV_RECV_REJ:
6323 l2cap_handle_rej(chan, control);
6324 break;
6325 case L2CAP_EV_RECV_SREJ:
6326 l2cap_handle_srej(chan, control);
6327 break;
6328 }
6329
6330 if (skb && !skb_in_use) {
6331 BT_DBG("Freeing %p", skb);
6332 kfree_skb(skb);
6333 }
6334
6335 return err;
6336}
6337
Mat Martineau32b32732012-10-23 15:24:11 -07006338static int l2cap_finish_move(struct l2cap_chan *chan)
6339{
6340 BT_DBG("chan %p", chan);
6341
6342 chan->rx_state = L2CAP_RX_STATE_RECV;
6343
6344 if (chan->hs_hcon)
6345 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6346 else
6347 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6348
6349 return l2cap_resegment(chan);
6350}
6351
6352static int l2cap_rx_state_wait_p(struct l2cap_chan *chan,
6353 struct l2cap_ctrl *control,
6354 struct sk_buff *skb, u8 event)
6355{
6356 int err;
6357
6358 BT_DBG("chan %p, control %p, skb %p, event %d", chan, control, skb,
6359 event);
6360
6361 if (!control->poll)
6362 return -EPROTO;
6363
6364 l2cap_process_reqseq(chan, control->reqseq);
6365
6366 if (!skb_queue_empty(&chan->tx_q))
6367 chan->tx_send_head = skb_peek(&chan->tx_q);
6368 else
6369 chan->tx_send_head = NULL;
6370
6371 /* Rewind next_tx_seq to the point expected
6372 * by the receiver.
6373 */
6374 chan->next_tx_seq = control->reqseq;
6375 chan->unacked_frames = 0;
6376
6377 err = l2cap_finish_move(chan);
6378 if (err)
6379 return err;
6380
6381 set_bit(CONN_SEND_FBIT, &chan->conn_state);
6382 l2cap_send_i_or_rr_or_rnr(chan);
6383
6384 if (event == L2CAP_EV_RECV_IFRAME)
6385 return -EPROTO;
6386
6387 return l2cap_rx_state_recv(chan, control, NULL, event);
6388}
6389
6390static int l2cap_rx_state_wait_f(struct l2cap_chan *chan,
6391 struct l2cap_ctrl *control,
6392 struct sk_buff *skb, u8 event)
6393{
6394 int err;
6395
6396 if (!control->final)
6397 return -EPROTO;
6398
6399 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
6400
6401 chan->rx_state = L2CAP_RX_STATE_RECV;
6402 l2cap_process_reqseq(chan, control->reqseq);
6403
6404 if (!skb_queue_empty(&chan->tx_q))
6405 chan->tx_send_head = skb_peek(&chan->tx_q);
6406 else
6407 chan->tx_send_head = NULL;
6408
6409 /* Rewind next_tx_seq to the point expected
6410 * by the receiver.
6411 */
6412 chan->next_tx_seq = control->reqseq;
6413 chan->unacked_frames = 0;
6414
6415 if (chan->hs_hcon)
6416 chan->conn->mtu = chan->hs_hcon->hdev->block_mtu;
6417 else
6418 chan->conn->mtu = chan->conn->hcon->hdev->acl_mtu;
6419
6420 err = l2cap_resegment(chan);
6421
6422 if (!err)
6423 err = l2cap_rx_state_recv(chan, control, skb, event);
6424
6425 return err;
6426}
6427
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006428static bool __valid_reqseq(struct l2cap_chan *chan, u16 reqseq)
6429{
6430 /* Make sure reqseq is for a packet that has been sent but not acked */
6431 u16 unacked;
6432
6433 unacked = __seq_offset(chan, chan->next_tx_seq, chan->expected_ack_seq);
6434 return __seq_offset(chan, chan->next_tx_seq, reqseq) <= unacked;
6435}
6436
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006437static int l2cap_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6438 struct sk_buff *skb, u8 event)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006439{
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006440 int err = 0;
6441
6442 BT_DBG("chan %p, control %p, skb %p, event %d, state %d", chan,
6443 control, skb, event, chan->rx_state);
6444
6445 if (__valid_reqseq(chan, control->reqseq)) {
6446 switch (chan->rx_state) {
6447 case L2CAP_RX_STATE_RECV:
6448 err = l2cap_rx_state_recv(chan, control, skb, event);
6449 break;
6450 case L2CAP_RX_STATE_SREJ_SENT:
6451 err = l2cap_rx_state_srej_sent(chan, control, skb,
6452 event);
6453 break;
Mat Martineau32b32732012-10-23 15:24:11 -07006454 case L2CAP_RX_STATE_WAIT_P:
6455 err = l2cap_rx_state_wait_p(chan, control, skb, event);
6456 break;
6457 case L2CAP_RX_STATE_WAIT_F:
6458 err = l2cap_rx_state_wait_f(chan, control, skb, event);
6459 break;
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006460 default:
6461 /* shut it down */
6462 break;
6463 }
6464 } else {
6465 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6466 control->reqseq, chan->next_tx_seq,
6467 chan->expected_ack_seq);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006468 l2cap_send_disconn_req(chan, ECONNRESET);
Mat Martineaud2a7ac52012-05-17 20:53:42 -07006469 }
6470
6471 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006472}
6473
6474static int l2cap_stream_rx(struct l2cap_chan *chan, struct l2cap_ctrl *control,
6475 struct sk_buff *skb)
6476{
Mat Martineau4b51dae92012-05-17 20:53:37 -07006477 int err = 0;
6478
6479 BT_DBG("chan %p, control %p, skb %p, state %d", chan, control, skb,
6480 chan->rx_state);
6481
6482 if (l2cap_classify_txseq(chan, control->txseq) ==
6483 L2CAP_TXSEQ_EXPECTED) {
6484 l2cap_pass_to_tx(chan, control);
6485
6486 BT_DBG("buffer_seq %d->%d", chan->buffer_seq,
6487 __next_seq(chan, chan->buffer_seq));
6488
6489 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
6490
6491 l2cap_reassemble_sdu(chan, skb, control);
6492 } else {
6493 if (chan->sdu) {
6494 kfree_skb(chan->sdu);
6495 chan->sdu = NULL;
6496 }
6497 chan->sdu_last_frag = NULL;
6498 chan->sdu_len = 0;
6499
6500 if (skb) {
6501 BT_DBG("Freeing %p", skb);
6502 kfree_skb(skb);
6503 }
6504 }
6505
6506 chan->last_acked_seq = control->txseq;
6507 chan->expected_tx_seq = __next_seq(chan, control->txseq);
6508
6509 return err;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006510}
6511
6512static int l2cap_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6513{
6514 struct l2cap_ctrl *control = &bt_cb(skb)->control;
6515 u16 len;
6516 u8 event;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006517
Mat Martineaub76bbd62012-04-11 10:48:43 -07006518 __unpack_control(chan, skb);
6519
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006520 len = skb->len;
6521
6522 /*
6523 * We can just drop the corrupted I-frame here.
6524 * Receiver will miss it and start proper recovery
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006525 * procedures and ask for retransmission.
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006526 */
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006527 if (l2cap_check_fcs(chan, skb))
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006528 goto drop;
6529
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006530 if (!control->sframe && control->sar == L2CAP_SAR_START)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006531 len -= L2CAP_SDULEN_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006532
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006533 if (chan->fcs == L2CAP_FCS_CRC16)
Andrei Emeltchenko03a51212011-10-17 12:19:58 +03006534 len -= L2CAP_FCS_SIZE;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006535
Gustavo F. Padovan47d1ec62011-04-13 15:57:03 -03006536 if (len > chan->mps) {
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006537 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006538 goto drop;
6539 }
6540
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006541 if (!control->sframe) {
6542 int err;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006543
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006544 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6545 control->sar, control->reqseq, control->final,
6546 control->txseq);
Andrei Emeltchenko836be932011-10-17 12:19:57 +03006547
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006548 /* Validate F-bit - F=0 always valid, F=1 only
6549 * valid in TX WAIT_F
6550 */
6551 if (control->final && chan->tx_state != L2CAP_TX_STATE_WAIT_F)
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006552 goto drop;
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006553
6554 if (chan->mode != L2CAP_MODE_STREAMING) {
6555 event = L2CAP_EV_RECV_IFRAME;
6556 err = l2cap_rx(chan, control, skb, event);
6557 } else {
6558 err = l2cap_stream_rx(chan, control, skb);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006559 }
6560
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006561 if (err)
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006562 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006563 } else {
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006564 const u8 rx_func_to_event[4] = {
6565 L2CAP_EV_RECV_RR, L2CAP_EV_RECV_REJ,
6566 L2CAP_EV_RECV_RNR, L2CAP_EV_RECV_SREJ
6567 };
6568
6569 /* Only I-frames are expected in streaming mode */
6570 if (chan->mode == L2CAP_MODE_STREAMING)
6571 goto drop;
6572
6573 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6574 control->reqseq, control->final, control->poll,
6575 control->super);
6576
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006577 if (len != 0) {
Andrei Emeltchenko1bb166e2012-11-20 17:16:21 +02006578 BT_ERR("Trailing bytes: %d in sframe", len);
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006579 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006580 goto drop;
6581 }
6582
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006583 /* Validate F and P bits */
6584 if (control->final && (control->poll ||
6585 chan->tx_state != L2CAP_TX_STATE_WAIT_F))
6586 goto drop;
6587
6588 event = rx_func_to_event[control->super];
6589 if (l2cap_rx(chan, control, skb, event))
Andrei Emeltchenko5e4e3972012-11-28 17:59:39 +02006590 l2cap_send_disconn_req(chan, ECONNRESET);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006591 }
6592
6593 return 0;
6594
6595drop:
6596 kfree_skb(skb);
6597 return 0;
6598}
6599
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006600static void l2cap_chan_le_send_credits(struct l2cap_chan *chan)
6601{
6602 struct l2cap_conn *conn = chan->conn;
6603 struct l2cap_le_credits pkt;
6604 u16 return_credits;
6605
6606 /* We return more credits to the sender only after the amount of
6607 * credits falls below half of the initial amount.
6608 */
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006609 if (chan->rx_credits >= (le_max_credits + 1) / 2)
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006610 return;
6611
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02006612 return_credits = le_max_credits - chan->rx_credits;
Johan Hedbergb1c325c2013-12-05 09:43:34 +02006613
6614 BT_DBG("chan %p returning %u credits to sender", chan, return_credits);
6615
6616 chan->rx_credits += return_credits;
6617
6618 pkt.cid = cpu_to_le16(chan->scid);
6619 pkt.credits = cpu_to_le16(return_credits);
6620
6621 chan->ident = l2cap_get_ident(conn);
6622
6623 l2cap_send_cmd(conn, chan->ident, L2CAP_LE_CREDITS, sizeof(pkt), &pkt);
6624}
6625
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006626static int l2cap_le_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
6627{
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006628 int err;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006629
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006630 if (!chan->rx_credits) {
6631 BT_ERR("No credits to receive LE L2CAP data");
Johan Hedbergdfd97742014-01-27 15:11:34 -08006632 l2cap_send_disconn_req(chan, ECONNRESET);
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006633 return -ENOBUFS;
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006634 }
6635
6636 if (chan->imtu < skb->len) {
6637 BT_ERR("Too big LE L2CAP PDU");
6638 return -ENOBUFS;
6639 }
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006640
6641 chan->rx_credits--;
6642 BT_DBG("rx_credits %u -> %u", chan->rx_credits + 1, chan->rx_credits);
6643
6644 l2cap_chan_le_send_credits(chan);
6645
Johan Hedbergaac23bf2013-06-01 10:14:57 +03006646 err = 0;
6647
6648 if (!chan->sdu) {
6649 u16 sdu_len;
6650
6651 sdu_len = get_unaligned_le16(skb->data);
6652 skb_pull(skb, L2CAP_SDULEN_SIZE);
6653
6654 BT_DBG("Start of new SDU. sdu_len %u skb->len %u imtu %u",
6655 sdu_len, skb->len, chan->imtu);
6656
6657 if (sdu_len > chan->imtu) {
6658 BT_ERR("Too big LE L2CAP SDU length received");
6659 err = -EMSGSIZE;
6660 goto failed;
6661 }
6662
6663 if (skb->len > sdu_len) {
6664 BT_ERR("Too much LE L2CAP data received");
6665 err = -EINVAL;
6666 goto failed;
6667 }
6668
6669 if (skb->len == sdu_len)
6670 return chan->ops->recv(chan, skb);
6671
6672 chan->sdu = skb;
6673 chan->sdu_len = sdu_len;
6674 chan->sdu_last_frag = skb;
6675
6676 return 0;
6677 }
6678
6679 BT_DBG("SDU fragment. chan->sdu->len %u skb->len %u chan->sdu_len %u",
6680 chan->sdu->len, skb->len, chan->sdu_len);
6681
6682 if (chan->sdu->len + skb->len > chan->sdu_len) {
6683 BT_ERR("Too much LE L2CAP data received");
6684 err = -EINVAL;
6685 goto failed;
6686 }
6687
6688 append_skb_frag(chan->sdu, skb, &chan->sdu_last_frag);
6689 skb = NULL;
6690
6691 if (chan->sdu->len == chan->sdu_len) {
6692 err = chan->ops->recv(chan, chan->sdu);
6693 if (!err) {
6694 chan->sdu = NULL;
6695 chan->sdu_last_frag = NULL;
6696 chan->sdu_len = 0;
6697 }
6698 }
6699
6700failed:
6701 if (err) {
6702 kfree_skb(skb);
6703 kfree_skb(chan->sdu);
6704 chan->sdu = NULL;
6705 chan->sdu_last_frag = NULL;
6706 chan->sdu_len = 0;
6707 }
6708
6709 /* We can't return an error here since we took care of the skb
6710 * freeing internally. An error return would cause the caller to
6711 * do a double-free of the skb.
6712 */
6713 return 0;
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006714}
6715
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006716static void l2cap_data_channel(struct l2cap_conn *conn, u16 cid,
6717 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006718{
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006719 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006720
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -03006721 chan = l2cap_get_chan_by_scid(conn, cid);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03006722 if (!chan) {
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006723 if (cid == L2CAP_CID_A2MP) {
6724 chan = a2mp_channel_create(conn, skb);
6725 if (!chan) {
6726 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006727 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006728 }
6729
6730 l2cap_chan_lock(chan);
6731 } else {
6732 BT_DBG("unknown cid 0x%4.4x", cid);
6733 /* Drop packet and return */
6734 kfree_skb(skb);
Andrei Emeltchenko13ca56e2012-05-31 11:18:55 +03006735 return;
Andrei Emeltchenko97e8e892012-05-29 13:59:17 +03006736 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006737 }
6738
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03006739 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006740
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006741 if (chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006742 goto drop;
6743
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006744 switch (chan->mode) {
Johan Hedberg38319712013-05-17 12:49:23 +03006745 case L2CAP_MODE_LE_FLOWCTL:
Johan Hedbergfad5fc82013-12-05 09:45:01 +02006746 if (l2cap_le_data_rcv(chan, skb) < 0)
6747 goto drop;
6748
6749 goto done;
6750
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006751 case L2CAP_MODE_BASIC:
6752 /* If socket recv buffers overflows we drop data here
6753 * which is *bad* because L2CAP has to be reliable.
6754 * But we don't have any other choice. L2CAP doesn't
6755 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006756
Szymon Janc2c96e032014-02-18 20:48:34 +01006757 if (chan->imtu < skb->len) {
6758 BT_ERR("Dropping L2CAP data: receive buffer overflow");
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006759 goto drop;
Szymon Janc2c96e032014-02-18 20:48:34 +01006760 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006761
Gustavo Padovan80b98022012-05-27 22:27:51 -03006762 if (!chan->ops->recv(chan, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006763 goto done;
6764 break;
6765
6766 case L2CAP_MODE_ERTM:
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006767 case L2CAP_MODE_STREAMING:
Mat Martineaucec8ab6e2012-05-17 20:53:36 -07006768 l2cap_data_rcv(chan, skb);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006769 goto done;
6770
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006771 default:
Gustavo F. Padovan0c1bc5c2011-04-13 17:20:49 -03006772 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006773 break;
6774 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006775
6776drop:
6777 kfree_skb(skb);
6778
6779done:
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02006780 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006781}
6782
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006783static void l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm,
6784 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006785{
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006786 struct hci_conn *hcon = conn->hcon;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006787 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006788
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006789 if (hcon->type != ACL_LINK)
Johan Hedberga24cce12014-08-07 22:56:42 +03006790 goto free_skb;
Marcel Holtmannae4fd2d2013-10-03 00:03:39 -07006791
Johan Hedbergbf20fd42013-05-14 13:23:13 +03006792 chan = l2cap_global_chan_by_psm(0, psm, &hcon->src, &hcon->dst,
6793 ACL_LINK);
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03006794 if (!chan)
Johan Hedberga24cce12014-08-07 22:56:42 +03006795 goto free_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006796
Andrei Emeltchenko5b4ceda2012-02-24 16:35:32 +02006797 BT_DBG("chan %p, len %d", chan, skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006798
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03006799 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006800 goto drop;
6801
Vinicius Costa Gomese13e21d2011-06-17 22:46:27 -03006802 if (chan->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006803 goto drop;
6804
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006805 /* Store remote BD_ADDR and PSM for msg_name */
Marcel Holtmann06ae3312013-10-18 03:43:00 -07006806 bacpy(&bt_cb(skb)->bdaddr, &hcon->dst);
Marcel Holtmann2edf8702013-10-13 12:55:29 -07006807 bt_cb(skb)->psm = psm;
6808
Johan Hedberga24cce12014-08-07 22:56:42 +03006809 if (!chan->ops->recv(chan, skb)) {
6810 l2cap_chan_put(chan);
Andrei Emeltchenko84104b22012-05-31 11:18:56 +03006811 return;
Johan Hedberga24cce12014-08-07 22:56:42 +03006812 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006813
6814drop:
Johan Hedberga24cce12014-08-07 22:56:42 +03006815 l2cap_chan_put(chan);
6816free_skb:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006817 kfree_skb(skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006818}
6819
6820static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
6821{
6822 struct l2cap_hdr *lh = (void *) skb->data;
Johan Hedberg61a939c2014-01-17 20:45:11 +02006823 struct hci_conn *hcon = conn->hcon;
Al Viro8e036fc2007-07-29 00:16:36 -07006824 u16 cid, len;
6825 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006826
Johan Hedberg61a939c2014-01-17 20:45:11 +02006827 if (hcon->state != BT_CONNECTED) {
6828 BT_DBG("queueing pending rx skb");
6829 skb_queue_tail(&conn->pending_rx, skb);
6830 return;
6831 }
6832
Linus Torvalds1da177e2005-04-16 15:20:36 -07006833 skb_pull(skb, L2CAP_HDR_SIZE);
6834 cid = __le16_to_cpu(lh->cid);
6835 len = __le16_to_cpu(lh->len);
6836
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006837 if (len != skb->len) {
6838 kfree_skb(skb);
6839 return;
6840 }
6841
Johan Hedberg9e1d7e12014-07-06 11:03:36 +03006842 /* Since we can't actively block incoming LE connections we must
6843 * at least ensure that we ignore incoming data from them.
6844 */
6845 if (hcon->type == LE_LINK &&
Johan Hedbergdcc36c12014-07-09 12:59:13 +03006846 hci_bdaddr_list_lookup(&hcon->hdev->blacklist, &hcon->dst,
6847 bdaddr_type(hcon, hcon->dst_type))) {
Johan Hedberge4931502014-07-02 09:36:21 +03006848 kfree_skb(skb);
6849 return;
6850 }
6851
Linus Torvalds1da177e2005-04-16 15:20:36 -07006852 BT_DBG("len %d, cid 0x%4.4x", len, cid);
6853
6854 switch (cid) {
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006855 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07006856 l2cap_sig_channel(conn, skb);
6857 break;
6858
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03006859 case L2CAP_CID_CONN_LESS:
Andrei Emeltchenko097db762012-03-09 14:16:17 +02006860 psm = get_unaligned((__le16 *) skb->data);
Andrei Emeltchenko0181a702012-05-29 10:04:05 +03006861 skb_pull(skb, L2CAP_PSMLEN_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07006862 l2cap_conless_channel(conn, psm, skb);
6863 break;
6864
Marcel Holtmanna2877622013-10-02 23:46:54 -07006865 case L2CAP_CID_LE_SIGNALING:
6866 l2cap_le_sig_channel(conn, skb);
6867 break;
6868
Linus Torvalds1da177e2005-04-16 15:20:36 -07006869 default:
6870 l2cap_data_channel(conn, cid, skb);
6871 break;
6872 }
6873}
6874
Johan Hedberg61a939c2014-01-17 20:45:11 +02006875static void process_pending_rx(struct work_struct *work)
6876{
6877 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
6878 pending_rx_work);
6879 struct sk_buff *skb;
6880
6881 BT_DBG("");
6882
6883 while ((skb = skb_dequeue(&conn->pending_rx)))
6884 l2cap_recv_frame(conn, skb);
6885}
6886
Johan Hedberg162b49e2014-01-17 20:45:10 +02006887static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon)
6888{
6889 struct l2cap_conn *conn = hcon->l2cap_data;
6890 struct hci_chan *hchan;
6891
6892 if (conn)
6893 return conn;
6894
6895 hchan = hci_chan_create(hcon);
6896 if (!hchan)
6897 return NULL;
6898
Johan Hedberg27f70f32014-07-21 10:50:06 +03006899 conn = kzalloc(sizeof(*conn), GFP_KERNEL);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006900 if (!conn) {
6901 hci_chan_del(hchan);
6902 return NULL;
6903 }
6904
6905 kref_init(&conn->ref);
6906 hcon->l2cap_data = conn;
6907 conn->hcon = hcon;
6908 hci_conn_get(conn->hcon);
6909 conn->hchan = hchan;
6910
6911 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
6912
6913 switch (hcon->type) {
6914 case LE_LINK:
6915 if (hcon->hdev->le_mtu) {
6916 conn->mtu = hcon->hdev->le_mtu;
6917 break;
6918 }
6919 /* fall through */
6920 default:
6921 conn->mtu = hcon->hdev->acl_mtu;
6922 break;
6923 }
6924
6925 conn->feat_mask = 0;
6926
6927 if (hcon->type == ACL_LINK)
6928 conn->hs_enabled = test_bit(HCI_HS_ENABLED,
6929 &hcon->hdev->dev_flags);
6930
Marcel Holtmann5a54e7c2014-07-13 20:50:15 +02006931 mutex_init(&conn->ident_lock);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006932 mutex_init(&conn->chan_lock);
6933
6934 INIT_LIST_HEAD(&conn->chan_l);
6935 INIT_LIST_HEAD(&conn->users);
6936
Johan Hedberg276d8072014-08-11 22:06:41 +03006937 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
Johan Hedberg162b49e2014-01-17 20:45:10 +02006938
Johan Hedbergdec5b492014-08-11 22:06:37 +03006939 INIT_WORK(&conn->disconn_work, disconn_work);
6940
Johan Hedberg61a939c2014-01-17 20:45:11 +02006941 skb_queue_head_init(&conn->pending_rx);
6942 INIT_WORK(&conn->pending_rx_work, process_pending_rx);
6943
Johan Hedberg162b49e2014-01-17 20:45:10 +02006944 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
6945
6946 return conn;
6947}
6948
6949static bool is_valid_psm(u16 psm, u8 dst_type) {
6950 if (!psm)
6951 return false;
6952
6953 if (bdaddr_type_is_le(dst_type))
6954 return (psm <= 0x00ff);
6955
6956 /* PSM must be odd and lsb of upper byte must be 0 */
6957 return ((psm & 0x0101) == 0x0001);
6958}
6959
6960int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid,
6961 bdaddr_t *dst, u8 dst_type)
6962{
6963 struct l2cap_conn *conn;
6964 struct hci_conn *hcon;
6965 struct hci_dev *hdev;
Johan Hedberg162b49e2014-01-17 20:45:10 +02006966 int err;
6967
6968 BT_DBG("%pMR -> %pMR (type %u) psm 0x%2.2x", &chan->src, dst,
6969 dst_type, __le16_to_cpu(psm));
6970
6971 hdev = hci_get_route(dst, &chan->src);
6972 if (!hdev)
6973 return -EHOSTUNREACH;
6974
6975 hci_dev_lock(hdev);
6976
6977 l2cap_chan_lock(chan);
6978
6979 if (!is_valid_psm(__le16_to_cpu(psm), dst_type) && !cid &&
6980 chan->chan_type != L2CAP_CHAN_RAW) {
6981 err = -EINVAL;
6982 goto done;
6983 }
6984
Johan Hedberg21626e62014-01-24 10:35:41 +02006985 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !psm) {
6986 err = -EINVAL;
6987 goto done;
6988 }
6989
6990 if (chan->chan_type == L2CAP_CHAN_FIXED && !cid) {
Johan Hedberg162b49e2014-01-17 20:45:10 +02006991 err = -EINVAL;
6992 goto done;
6993 }
6994
6995 switch (chan->mode) {
6996 case L2CAP_MODE_BASIC:
6997 break;
6998 case L2CAP_MODE_LE_FLOWCTL:
6999 l2cap_le_flowctl_init(chan);
7000 break;
7001 case L2CAP_MODE_ERTM:
7002 case L2CAP_MODE_STREAMING:
7003 if (!disable_ertm)
7004 break;
7005 /* fall through */
7006 default:
Johan Hedbergbeb19e42014-07-18 11:15:26 +03007007 err = -EOPNOTSUPP;
Johan Hedberg162b49e2014-01-17 20:45:10 +02007008 goto done;
7009 }
7010
7011 switch (chan->state) {
7012 case BT_CONNECT:
7013 case BT_CONNECT2:
7014 case BT_CONFIG:
7015 /* Already connecting */
7016 err = 0;
7017 goto done;
7018
7019 case BT_CONNECTED:
7020 /* Already connected */
7021 err = -EISCONN;
7022 goto done;
7023
7024 case BT_OPEN:
7025 case BT_BOUND:
7026 /* Can connect */
7027 break;
7028
7029 default:
7030 err = -EBADFD;
7031 goto done;
7032 }
7033
7034 /* Set destination address and psm */
7035 bacpy(&chan->dst, dst);
7036 chan->dst_type = dst_type;
7037
7038 chan->psm = psm;
7039 chan->dcid = cid;
7040
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007041 if (bdaddr_type_is_le(dst_type)) {
Johan Hedberge804d252014-07-16 11:42:28 +03007042 u8 role;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007043
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007044 /* Convert from L2CAP channel address type to HCI address type
7045 */
7046 if (dst_type == BDADDR_LE_PUBLIC)
7047 dst_type = ADDR_LE_DEV_PUBLIC;
7048 else
7049 dst_type = ADDR_LE_DEV_RANDOM;
7050
Johan Hedberge804d252014-07-16 11:42:28 +03007051 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
7052 role = HCI_ROLE_SLAVE;
7053 else
7054 role = HCI_ROLE_MASTER;
Johan Hedbergcdd62752014-07-07 15:02:28 +03007055
Andre Guedes04a6c582014-02-26 20:21:44 -03007056 hcon = hci_connect_le(hdev, dst, dst_type, chan->sec_level,
Johan Hedberge804d252014-07-16 11:42:28 +03007057 HCI_LE_CONN_TIMEOUT, role);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007058 } else {
Johan Hedbergd93375a2014-07-07 15:02:27 +03007059 u8 auth_type = l2cap_get_auth_type(chan);
Andre Guedes04a6c582014-02-26 20:21:44 -03007060 hcon = hci_connect_acl(hdev, dst, chan->sec_level, auth_type);
Andre Guedes6f77d8c2014-02-26 20:21:45 -03007061 }
Johan Hedberg162b49e2014-01-17 20:45:10 +02007062
7063 if (IS_ERR(hcon)) {
7064 err = PTR_ERR(hcon);
7065 goto done;
7066 }
7067
7068 conn = l2cap_conn_add(hcon);
7069 if (!conn) {
7070 hci_conn_drop(hcon);
7071 err = -ENOMEM;
7072 goto done;
7073 }
7074
7075 if (cid && __l2cap_get_chan_by_dcid(conn, cid)) {
7076 hci_conn_drop(hcon);
7077 err = -EBUSY;
7078 goto done;
7079 }
7080
7081 /* Update source addr of the socket */
7082 bacpy(&chan->src, &hcon->src);
7083 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7084
7085 l2cap_chan_unlock(chan);
7086 l2cap_chan_add(conn, chan);
7087 l2cap_chan_lock(chan);
7088
7089 /* l2cap_chan_add takes its own ref so we can drop this one */
7090 hci_conn_drop(hcon);
7091
7092 l2cap_state_change(chan, BT_CONNECT);
7093 __set_chan_timer(chan, chan->ops->get_sndtimeo(chan));
7094
Johan Hedberg61202e42014-01-28 15:16:48 -08007095 /* Release chan->sport so that it can be reused by other
7096 * sockets (as it's only used for listening sockets).
7097 */
7098 write_lock(&chan_list_lock);
7099 chan->sport = 0;
7100 write_unlock(&chan_list_lock);
7101
Johan Hedberg162b49e2014-01-17 20:45:10 +02007102 if (hcon->state == BT_CONNECTED) {
7103 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
7104 __clear_chan_timer(chan);
Johan Hedberge7cafc42014-07-17 15:35:38 +03007105 if (l2cap_chan_check_security(chan, true))
Johan Hedberg162b49e2014-01-17 20:45:10 +02007106 l2cap_state_change(chan, BT_CONNECTED);
7107 } else
7108 l2cap_do_start(chan);
7109 }
7110
7111 err = 0;
7112
7113done:
7114 l2cap_chan_unlock(chan);
7115 hci_dev_unlock(hdev);
7116 hci_dev_put(hdev);
7117 return err;
7118}
Jukka Rissanen6b8d4a62014-06-18 16:37:08 +03007119EXPORT_SYMBOL_GPL(l2cap_chan_connect);
Johan Hedberg162b49e2014-01-17 20:45:10 +02007120
Linus Torvalds1da177e2005-04-16 15:20:36 -07007121/* ---- L2CAP interface with lower layer (HCI) ---- */
7122
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007123int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007124{
7125 int exact = 0, lm1 = 0, lm2 = 0;
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007126 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007128 BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007129
7130 /* Find listening sockets and check their link_mode */
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007131 read_lock(&chan_list_lock);
7132 list_for_each_entry(c, &chan_list, global_l) {
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007133 if (c->state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007134 continue;
7135
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007136 if (!bacmp(&c->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007137 lm1 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007138 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007139 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007140 exact++;
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007141 } else if (!bacmp(&c->src, BDADDR_ANY)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007142 lm2 |= HCI_LM_ACCEPT;
Andrei Emeltchenko43bd0f32011-10-11 14:04:34 +03007143 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007144 lm2 |= HCI_LM_MASTER;
7145 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 }
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007147 read_unlock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007148
7149 return exact ? lm1 : lm2;
7150}
7151
Johan Hedberge760ec12014-08-07 22:56:47 +03007152/* Find the next fixed channel in BT_LISTEN state, continue iteration
7153 * from an existing channel in the list or from the beginning of the
7154 * global list (by passing NULL as first parameter).
7155 */
7156static struct l2cap_chan *l2cap_global_fixed_chan(struct l2cap_chan *c,
Johan Hedberg54a1b622014-08-07 22:56:48 +03007157 bdaddr_t *src, u8 link_type)
Johan Hedberge760ec12014-08-07 22:56:47 +03007158{
7159 read_lock(&chan_list_lock);
7160
7161 if (c)
7162 c = list_next_entry(c, global_l);
7163 else
7164 c = list_entry(chan_list.next, typeof(*c), global_l);
7165
7166 list_for_each_entry_from(c, &chan_list, global_l) {
7167 if (c->chan_type != L2CAP_CHAN_FIXED)
7168 continue;
7169 if (c->state != BT_LISTEN)
7170 continue;
7171 if (bacmp(&c->src, src) && bacmp(&c->src, BDADDR_ANY))
7172 continue;
Johan Hedberg54a1b622014-08-07 22:56:48 +03007173 if (link_type == ACL_LINK && c->src_type != BDADDR_BREDR)
7174 continue;
7175 if (link_type == LE_LINK && c->src_type == BDADDR_BREDR)
7176 continue;
Johan Hedberge760ec12014-08-07 22:56:47 +03007177
7178 l2cap_chan_hold(c);
7179 read_unlock(&chan_list_lock);
7180 return c;
7181 }
7182
7183 read_unlock(&chan_list_lock);
7184
7185 return NULL;
7186}
7187
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007188void l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007189{
Johan Hedberge760ec12014-08-07 22:56:47 +03007190 struct hci_dev *hdev = hcon->hdev;
Marcel Holtmann01394182006-07-03 10:02:46 +02007191 struct l2cap_conn *conn;
Johan Hedberge760ec12014-08-07 22:56:47 +03007192 struct l2cap_chan *pchan;
7193 u8 dst_type;
Marcel Holtmann01394182006-07-03 10:02:46 +02007194
Andrei Emeltchenko6ed93dc2012-09-25 12:49:43 +03007195 BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007196
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007197 if (status) {
Joe Perchese1750722011-06-29 18:18:29 -07007198 l2cap_conn_del(hcon, bt_to_errno(status));
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007199 return;
Andrei Emeltchenkoba6fc312012-10-31 15:46:26 +02007200 }
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007201
7202 conn = l2cap_conn_add(hcon);
7203 if (!conn)
7204 return;
7205
Johan Hedberge760ec12014-08-07 22:56:47 +03007206 dst_type = bdaddr_type(hcon, hcon->dst_type);
7207
7208 /* If device is blocked, do not create channels for it */
7209 if (hci_bdaddr_list_lookup(&hdev->blacklist, &hcon->dst, dst_type))
7210 return;
7211
7212 /* Find fixed channels and notify them of the new connection. We
7213 * use multiple individual lookups, continuing each time where
7214 * we left off, because the list lock would prevent calling the
7215 * potentially sleeping l2cap_chan_lock() function.
7216 */
Johan Hedberg54a1b622014-08-07 22:56:48 +03007217 pchan = l2cap_global_fixed_chan(NULL, &hdev->bdaddr, hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007218 while (pchan) {
7219 struct l2cap_chan *chan, *next;
7220
7221 /* Client fixed channels should override server ones */
7222 if (__l2cap_get_chan_by_dcid(conn, pchan->scid))
7223 goto next;
7224
7225 l2cap_chan_lock(pchan);
7226 chan = pchan->ops->new_connection(pchan);
7227 if (chan) {
7228 bacpy(&chan->src, &hcon->src);
7229 bacpy(&chan->dst, &hcon->dst);
7230 chan->src_type = bdaddr_type(hcon, hcon->src_type);
7231 chan->dst_type = dst_type;
7232
7233 __l2cap_chan_add(conn, chan);
7234 }
7235
7236 l2cap_chan_unlock(pchan);
7237next:
Johan Hedberg54a1b622014-08-07 22:56:48 +03007238 next = l2cap_global_fixed_chan(pchan, &hdev->bdaddr,
7239 hcon->type);
Johan Hedberge760ec12014-08-07 22:56:47 +03007240 l2cap_chan_put(pchan);
7241 pchan = next;
7242 }
7243
Johan Hedbergdc0f5082014-08-07 22:56:46 +03007244 l2cap_conn_ready(conn);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245}
7246
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007247int l2cap_disconn_ind(struct hci_conn *hcon)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007248{
7249 struct l2cap_conn *conn = hcon->l2cap_data;
7250
7251 BT_DBG("hcon %p", hcon);
7252
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007253 if (!conn)
Andrei Emeltchenko9f5a0d72011-11-07 14:20:25 +02007254 return HCI_ERROR_REMOTE_USER_TERM;
Marcel Holtmann2950f212009-02-12 14:02:50 +01007255 return conn->disc_reason;
7256}
7257
Andrei Emeltchenko9e664632012-07-24 16:06:15 +03007258void l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259{
7260 BT_DBG("hcon %p reason %d", hcon, reason);
7261
Joe Perchese1750722011-06-29 18:18:29 -07007262 l2cap_conn_del(hcon, bt_to_errno(reason));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007263}
7264
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007265static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007266{
Gustavo F. Padovan715ec002011-05-02 17:13:55 -03007267 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007268 return;
7269
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007270 if (encrypt == 0x00) {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007271 if (chan->sec_level == BT_SECURITY_MEDIUM) {
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007272 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
Marcel Holtmann7d513e92014-01-15 22:37:40 -08007273 } else if (chan->sec_level == BT_SECURITY_HIGH ||
7274 chan->sec_level == BT_SECURITY_FIPS)
Gustavo F. Padovan0f852722011-05-04 19:42:50 -03007275 l2cap_chan_close(chan, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007276 } else {
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007277 if (chan->sec_level == BT_SECURITY_MEDIUM)
Gustavo F. Padovanc9b66672011-05-17 14:59:01 -03007278 __clear_chan_timer(chan);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007279 }
7280}
7281
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007282int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007283{
Marcel Holtmann40be4922008-07-14 20:13:50 +02007284 struct l2cap_conn *conn = hcon->l2cap_data;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03007285 struct l2cap_chan *chan;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007286
Marcel Holtmann01394182006-07-03 10:02:46 +02007287 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007288 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007289
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007290 BT_DBG("conn %p status 0x%2.2x encrypt %u", conn, status, encrypt);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007291
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007292 mutex_lock(&conn->chan_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007293
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007294 list_for_each_entry(chan, &conn->chan_l, list) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007295 l2cap_chan_lock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007296
Andrei Emeltchenko89d8b402012-07-10 15:27:51 +03007297 BT_DBG("chan %p scid 0x%4.4x state %s", chan, chan->scid,
7298 state_to_string(chan->state));
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007299
Johan Hedberg2338a7e2014-01-24 10:35:40 +02007300 if (chan->scid == L2CAP_CID_A2MP) {
Andrei Emeltchenko78eb2f92012-07-19 17:03:47 +03007301 l2cap_chan_unlock(chan);
7302 continue;
7303 }
7304
Johan Hedberg191eb392014-08-07 22:56:45 +03007305 if (!status && encrypt)
7306 chan->sec_level = hcon->sec_level;
Vinicius Costa Gomesf1cb9af2011-01-26 21:42:57 -03007307
Andrei Emeltchenko96eff462012-11-15 18:14:53 +02007308 if (!__l2cap_no_conn_pending(chan)) {
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007309 l2cap_chan_unlock(chan);
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007310 continue;
7311 }
7312
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007313 if (!status && (chan->state == BT_CONNECTED ||
Gustavo Padovan2d792812012-10-06 10:07:01 +01007314 chan->state == BT_CONFIG)) {
Marcel Holtmannd97c8992013-10-14 02:53:54 -07007315 chan->ops->resume(chan);
Gustavo F. Padovan43434782011-04-12 18:31:57 -03007316 l2cap_check_encryption(chan, encrypt);
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007317 l2cap_chan_unlock(chan);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007318 continue;
7319 }
7320
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007321 if (chan->state == BT_CONNECT) {
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007322 if (!status)
Andrei Emeltchenko93c3e8f2012-09-27 17:26:16 +03007323 l2cap_start_connection(chan);
Johan Hedberg6d3c15d2013-12-02 22:13:24 +02007324 else
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007325 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Gustavo F. Padovan89bc500e2011-06-03 00:19:47 -03007326 } else if (chan->state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007327 struct l2cap_conn_rsp rsp;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007328 __u16 res, stat;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007329
7330 if (!status) {
Marcel Holtmannbdc25782013-10-14 02:45:34 -07007331 if (test_bit(FLAG_DEFER_SETUP, &chan->flags)) {
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007332 res = L2CAP_CR_PEND;
7333 stat = L2CAP_CS_AUTHOR_PEND;
Gustavo Padovan2dc4e512012-10-12 19:35:24 +08007334 chan->ops->defer(chan);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007335 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007336 l2cap_state_change(chan, BT_CONFIG);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007337 res = L2CAP_CR_SUCCESS;
7338 stat = L2CAP_CS_NO_INFO;
7339 }
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007340 } else {
Gustavo Padovanacdcabf2013-10-21 14:21:39 -02007341 l2cap_state_change(chan, BT_DISCONN);
Marcel Holtmannba13ccd2012-03-01 14:25:33 -08007342 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007343 res = L2CAP_CR_SEC_BLOCK;
7344 stat = L2CAP_CS_NO_INFO;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007345 }
7346
Gustavo F. Padovanfe4128e2011-04-13 19:50:45 -03007347 rsp.scid = cpu_to_le16(chan->dcid);
7348 rsp.dcid = cpu_to_le16(chan->scid);
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007349 rsp.result = cpu_to_le16(res);
7350 rsp.status = cpu_to_le16(stat);
Gustavo F. Padovanfc7f8a72011-03-25 13:59:37 -03007351 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
Gustavo Padovan2d792812012-10-06 10:07:01 +01007352 sizeof(rsp), &rsp);
Mat Martineau2d369352012-05-23 14:59:30 -07007353
7354 if (!test_bit(CONF_REQ_SENT, &chan->conf_state) &&
7355 res == L2CAP_CR_SUCCESS) {
7356 char buf[128];
7357 set_bit(CONF_REQ_SENT, &chan->conf_state);
7358 l2cap_send_cmd(conn, l2cap_get_ident(conn),
7359 L2CAP_CONF_REQ,
7360 l2cap_build_conf_req(chan, buf),
7361 buf);
7362 chan->num_conf_req++;
7363 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007364 }
7365
Andrei Emeltchenko6be36552012-02-22 17:11:56 +02007366 l2cap_chan_unlock(chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007367 }
7368
Andrei Emeltchenko3df91ea2012-02-21 12:54:55 +02007369 mutex_unlock(&conn->chan_lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007370
Linus Torvalds1da177e2005-04-16 15:20:36 -07007371 return 0;
7372}
7373
Ulisses Furquim686ebf22011-12-21 10:11:33 -02007374int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007375{
7376 struct l2cap_conn *conn = hcon->l2cap_data;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007377 struct l2cap_hdr *hdr;
7378 int len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007379
Andrei Emeltchenko1d13a252012-10-15 11:58:41 +03007380 /* For AMP controller do not create l2cap conn */
7381 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7382 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007383
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007384 if (!conn)
Claudio Takahasibaf43252013-04-11 13:55:50 -03007385 conn = l2cap_conn_add(hcon);
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007386
7387 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007388 goto drop;
7389
7390 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7391
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007392 switch (flags) {
7393 case ACL_START:
7394 case ACL_START_NO_FLUSH:
7395 case ACL_COMPLETE:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007396 if (conn->rx_len) {
7397 BT_ERR("Unexpected start frame (len %d)", skb->len);
7398 kfree_skb(conn->rx_skb);
7399 conn->rx_skb = NULL;
7400 conn->rx_len = 0;
7401 l2cap_conn_unreliable(conn, ECOMM);
7402 }
7403
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007404 /* Start fragment always begin with Basic L2CAP header */
7405 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007406 BT_ERR("Frame is too short (len %d)", skb->len);
7407 l2cap_conn_unreliable(conn, ECOMM);
7408 goto drop;
7409 }
7410
7411 hdr = (struct l2cap_hdr *) skb->data;
7412 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7413
7414 if (len == skb->len) {
7415 /* Complete frame received */
7416 l2cap_recv_frame(conn, skb);
7417 return 0;
7418 }
7419
7420 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7421
7422 if (skb->len > len) {
7423 BT_ERR("Frame is too long (len %d, expected len %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007424 skb->len, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007425 l2cap_conn_unreliable(conn, ECOMM);
7426 goto drop;
7427 }
7428
7429 /* Allocate skb for the complete frame (with header) */
Gustavo Padovan8bcde1f2012-05-28 19:18:14 -03007430 conn->rx_skb = bt_skb_alloc(len, GFP_KERNEL);
Gustavo F. Padovanaf05b30b2009-04-20 01:31:08 -03007431 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007432 goto drop;
7433
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007434 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007435 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007436 conn->rx_len = len - skb->len;
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007437 break;
7438
7439 case ACL_CONT:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007440 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7441
7442 if (!conn->rx_len) {
7443 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7444 l2cap_conn_unreliable(conn, ECOMM);
7445 goto drop;
7446 }
7447
7448 if (skb->len > conn->rx_len) {
7449 BT_ERR("Fragment is too long (len %d, expected %d)",
Gustavo Padovan2d792812012-10-06 10:07:01 +01007450 skb->len, conn->rx_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007451 kfree_skb(conn->rx_skb);
7452 conn->rx_skb = NULL;
7453 conn->rx_len = 0;
7454 l2cap_conn_unreliable(conn, ECOMM);
7455 goto drop;
7456 }
7457
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007458 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Gustavo Padovan2d792812012-10-06 10:07:01 +01007459 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007460 conn->rx_len -= skb->len;
7461
7462 if (!conn->rx_len) {
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007463 /* Complete frame received. l2cap_recv_frame
7464 * takes ownership of the skb so set the global
7465 * rx_skb pointer to NULL first.
7466 */
7467 struct sk_buff *rx_skb = conn->rx_skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 conn->rx_skb = NULL;
Johan Hedbergc4e5baf2013-10-10 13:33:37 +02007469 l2cap_recv_frame(conn, rx_skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470 }
Andrei Emeltchenkod73a0982012-10-15 11:58:40 +03007471 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472 }
7473
7474drop:
7475 kfree_skb(skb);
7476 return 0;
7477}
7478
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007479static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480{
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007481 struct l2cap_chan *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007482
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007483 read_lock(&chan_list_lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484
Gustavo F. Padovan23691d72011-04-27 18:26:32 -03007485 list_for_each_entry(c, &chan_list, global_l) {
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007486 seq_printf(f, "%pMR %pMR %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmann7eafc592013-10-13 08:12:47 -07007487 &c->src, &c->dst,
Andrei Emeltchenkofcb73332012-09-25 12:49:44 +03007488 c->state, __le16_to_cpu(c->psm),
7489 c->scid, c->dcid, c->imtu, c->omtu,
7490 c->sec_level, c->mode);
Andrei Emeltchenko61e1b4b2012-01-19 11:19:50 +02007491 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007492
Gustavo F. Padovan333055f2011-12-22 15:14:39 -02007493 read_unlock(&chan_list_lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007494
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007495 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496}
7497
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007498static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7499{
7500 return single_open(file, l2cap_debugfs_show, inode->i_private);
7501}
7502
7503static const struct file_operations l2cap_debugfs_fops = {
7504 .open = l2cap_debugfs_open,
7505 .read = seq_read,
7506 .llseek = seq_lseek,
7507 .release = single_release,
7508};
7509
7510static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007512int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513{
7514 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007515
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007516 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007517 if (err < 0)
7518 return err;
7519
Marcel Holtmann1120e4b2013-10-17 17:24:16 -07007520 if (IS_ERR_OR_NULL(bt_debugfs))
7521 return 0;
7522
7523 l2cap_debugfs = debugfs_create_file("l2cap", 0444, bt_debugfs,
7524 NULL, &l2cap_debugfs_fops);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007525
Samuel Ortiz40b93972014-05-14 17:53:35 +02007526 debugfs_create_u16("l2cap_le_max_credits", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007527 &le_max_credits);
Samuel Ortiz40b93972014-05-14 17:53:35 +02007528 debugfs_create_u16("l2cap_le_default_mps", 0644, bt_debugfs,
Johan Hedbergf15b8ec2013-12-03 15:08:25 +02007529 &le_default_mps);
7530
Linus Torvalds1da177e2005-04-16 15:20:36 -07007531 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532}
7533
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007534void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007535{
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007536 debugfs_remove(l2cap_debugfs);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007537 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538}
7539
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007540module_param(disable_ertm, bool, 0644);
7541MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");