blob: db8f3f511b4d2cf4e884c75263ed1dbe81b78d4a [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
Peter Krystadd6a9ceb2011-12-01 15:44:54 -0800552 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700553 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
554 l2cap_pi(sk));
555 }
556 l2cap_pi(sk)->ampchan = NULL;
557 l2cap_pi(sk)->amp_id = 0;
558 }
559
560 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200561 sock_set_flag(sk, SOCK_ZAPPED);
562
563 if (err)
564 sk->sk_err = err;
565
566 if (parent) {
567 bt_accept_unlink(sk);
568 parent->sk_data_ready(parent, 0);
569 } else
570 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300571
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700572 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300573
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700574 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
575 if (l2cap_pi(sk)->sdu)
576 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300577
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700578 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700580 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
581 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300583 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200584}
585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700586static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300587{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700588 if (sk->sk_type == SOCK_RAW) {
589 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530590 case BT_SECURITY_HIGH:
591 return HCI_AT_DEDICATED_BONDING_MITM;
592 case BT_SECURITY_MEDIUM:
593 return HCI_AT_DEDICATED_BONDING;
594 default:
595 return HCI_AT_NO_BONDING;
596 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700597 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
598 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
599 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700601 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530602 return HCI_AT_NO_BONDING_MITM;
603 else
604 return HCI_AT_NO_BONDING;
605 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700606 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530607 case BT_SECURITY_HIGH:
608 return HCI_AT_GENERAL_BONDING_MITM;
609 case BT_SECURITY_MEDIUM:
610 return HCI_AT_GENERAL_BONDING;
611 default:
612 return HCI_AT_NO_BONDING;
613 }
614 }
615}
616
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200617/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700618static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200619{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700620 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100621 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200622
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700623 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100624
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700625 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
626 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200627}
628
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700629u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200630{
631 u8 id;
632
633 /* Get next available identificator.
634 * 1 - 128 are used by kernel.
635 * 129 - 199 are reserved.
636 * 200 - 254 are used by utilities like l2ping, etc.
637 */
638
639 spin_lock_bh(&conn->lock);
640
641 if (++conn->tx_ident > 128)
642 conn->tx_ident = 1;
643
644 id = conn->tx_ident;
645
646 spin_unlock_bh(&conn->lock);
647
648 return id;
649}
650
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700651static void apply_fcs(struct sk_buff *skb)
652{
653 size_t len;
654 u16 partial_crc;
655 struct sk_buff *iter;
656 struct sk_buff *final_frag = skb;
657
658 if (skb_has_frag_list(skb))
659 len = skb_headlen(skb);
660 else
661 len = skb->len - L2CAP_FCS_SIZE;
662
663 partial_crc = crc16(0, (u8 *) skb->data, len);
664
665 skb_walk_frags(skb, iter) {
666 len = iter->len;
667 if (!iter->next)
668 len -= L2CAP_FCS_SIZE;
669
670 partial_crc = crc16(partial_crc, iter->data, len);
671 final_frag = iter;
672 }
673
674 put_unaligned_le16(partial_crc,
675 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
676}
677
678void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200679{
680 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200681 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200682
683 BT_DBG("code 0x%2.2x", code);
684
685 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300686 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200687
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200688 if (lmp_no_flush_capable(conn->hcon->hdev))
689 flags = ACL_START_NO_FLUSH;
690 else
691 flags = ACL_START;
692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700693 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700694
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700695 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200696}
697
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700698static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300699{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700700 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300701}
702
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700703static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300704{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700705 struct l2cap_conn_req req;
706 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
707 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700709 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700711 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
712 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300713}
714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700715static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300716{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700717 struct l2cap_create_chan_req req;
718 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
719 req.psm = l2cap_pi(sk)->psm;
720 req.amp_id = amp_id;
721
722 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
723 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
724
725 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
726 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300727}
728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700729static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200730{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700731 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200732
733 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100734 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
735 return;
736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700737 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
738 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200739
Peter Krystadc446d212011-09-20 15:35:50 -0700740 if (l2cap_pi(sk)->amp_pref ==
741 BT_AMP_POLICY_PREFER_AMP &&
742 conn->fc_mask & L2CAP_FC_A2MP)
743 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700744 else
745 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200746 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200747 } else {
748 struct l2cap_info_req req;
749 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
750
751 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
752 conn->info_ident = l2cap_get_ident(conn);
753
754 mod_timer(&conn->info_timer, jiffies +
755 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
756
757 l2cap_send_cmd(conn, conn->info_ident,
758 L2CAP_INFO_REQ, sizeof(req), &req);
759 }
760}
761
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300762static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
763{
764 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300765 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300766 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
767
768 switch (mode) {
769 case L2CAP_MODE_ERTM:
770 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
771 case L2CAP_MODE_STREAMING:
772 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
773 default:
774 return 0x00;
775 }
776}
777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700778void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300779{
780 struct l2cap_disconn_req req;
781
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300782 if (!conn)
783 return;
784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700785 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700787 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
788 skb_queue_purge(SREJ_QUEUE(sk));
789
790 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
792 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300793 }
794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700795 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
796 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300797 l2cap_send_cmd(conn, l2cap_get_ident(conn),
798 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700800 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300801 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300802}
803
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200805static void l2cap_conn_start(struct l2cap_conn *conn)
806{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700807 struct l2cap_chan_list *l = &conn->chan_list;
808 struct sock_del_list del, *tmp1, *tmp2;
809 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200810
811 BT_DBG("conn %p", conn);
812
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700813 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200814
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700815 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300816
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700817 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200818 bh_lock_sock(sk);
819
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700820 if (sk->sk_type != SOCK_SEQPACKET &&
821 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200822 bh_unlock_sock(sk);
823 continue;
824 }
825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700826 if (sk->sk_state == BT_CONNECT) {
827 if (!l2cap_check_security(sk) ||
828 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300829 bh_unlock_sock(sk);
830 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200831 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700833 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
834 conn->feat_mask)
835 && l2cap_pi(sk)->conf_state &
836 L2CAP_CONF_STATE2_DEVICE) {
837 tmp1 = kzalloc(sizeof(struct sock_del_list),
838 GFP_ATOMIC);
839 tmp1->sk = sk;
840 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300841 bh_unlock_sock(sk);
842 continue;
843 }
844
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700845 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300846
Peter Krystadc446d212011-09-20 15:35:50 -0700847 if (l2cap_pi(sk)->amp_pref ==
848 BT_AMP_POLICY_PREFER_AMP &&
849 conn->fc_mask & L2CAP_FC_A2MP)
850 amp_create_physical(conn, sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 else
852 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200855 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300856 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
858 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200859
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700860 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100861 if (bt_sk(sk)->defer_setup) {
862 struct sock *parent = bt_sk(sk)->parent;
863 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
864 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700865 if (parent)
866 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867
868 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700869 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100870 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
871 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
872 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200873 } else {
874 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
875 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
876 }
877
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700878 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
879 l2cap_pi(sk)->amp_id) {
880 amp_accept_physical(conn,
881 l2cap_pi(sk)->amp_id, sk);
882 bh_unlock_sock(sk);
883 continue;
884 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700886 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
887 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
888
889 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300890 rsp.result != L2CAP_CR_SUCCESS) {
891 bh_unlock_sock(sk);
892 continue;
893 }
894
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700895 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300896 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700897 l2cap_build_conf_req(sk, buf), buf);
898 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200899 }
900
901 bh_unlock_sock(sk);
902 }
903
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700904 read_unlock(&l->lock);
905
906 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
907 bh_lock_sock(tmp1->sk);
908 __l2cap_sock_close(tmp1->sk, ECONNRESET);
909 bh_unlock_sock(tmp1->sk);
910 list_del(&tmp1->list);
911 kfree(tmp1);
912 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200913}
914
Inga Stotlandf214b6e2011-10-11 08:56:15 -0700915/* Find socket with fixed cid with given source and destination bdaddrs.
916 * Returns closest match, locked.
917 */
918static struct sock *l2cap_get_sock_by_fixed_scid(int state,
919 __le16 cid, bdaddr_t *src, bdaddr_t *dst)
920{
921 struct sock *sk = NULL, *sk1 = NULL;
922 struct hlist_node *node;
923
924 read_lock(&l2cap_sk_list.lock);
925
926 sk_for_each(sk, node, &l2cap_sk_list.head) {
927 if (state && sk->sk_state != state)
928 continue;
929
930 if (l2cap_pi(sk)->scid == cid && !bacmp(&bt_sk(sk)->dst, dst)) {
931 /* Exact match. */
932 if (!bacmp(&bt_sk(sk)->src, src))
933 break;
934
935 /* Closest match */
936 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
937 sk1 = sk;
938 }
939 }
940
941 read_unlock(&l2cap_sk_list.lock);
942
943 return node ? sk : sk1;
944}
945
Ville Tervob62f3282011-02-10 22:38:50 -0300946/* Find socket with cid and source bdaddr.
947 * Returns closest match, locked.
948 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700949static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300950{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700951 struct sock *sk = NULL, *sk1 = NULL;
952 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700954 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300955
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700956 sk_for_each(sk, node, &l2cap_sk_list.head) {
957 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300958 continue;
959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700960 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300961 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700962 if (!bacmp(&bt_sk(sk)->src, src))
963 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300964
965 /* Closest match */
966 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300968 }
969 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700973 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300974}
975
976static void l2cap_le_conn_ready(struct l2cap_conn *conn)
977{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700978 struct l2cap_chan_list *list = &conn->chan_list;
979 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
981 BT_DBG("");
982
983 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300985 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700986 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300987 return;
988
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300989 bh_lock_sock(parent);
990
Ville Tervob62f3282011-02-10 22:38:50 -0300991 /* Check for backlog size */
992 if (sk_acceptq_is_full(parent)) {
993 BT_DBG("backlog full %d", parent->sk_ack_backlog);
994 goto clean;
995 }
996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700997 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
998 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300999 goto clean;
1000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001001 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001002
1003 hci_conn_hold(conn->hcon);
1004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -03001006 bacpy(&bt_sk(sk)->src, conn->src);
1007 bacpy(&bt_sk(sk)->dst, conn->dst);
1008
Gustavo F. Padovand1010242011-03-25 00:39:48 -03001009 bt_accept_enqueue(parent, sk);
1010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001011 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -03001014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001015 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -03001016 parent->sk_data_ready(parent, 0);
1017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001018 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -03001019
1020clean:
1021 bh_unlock_sock(parent);
1022}
1023
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001024static void l2cap_conn_ready(struct l2cap_conn *conn)
1025{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026 struct l2cap_chan_list *l = &conn->chan_list;
1027 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001028
1029 BT_DBG("conn %p", conn);
1030
Ville Tervob62f3282011-02-10 22:38:50 -03001031 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
1032 l2cap_le_conn_ready(conn);
1033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001034 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001035
Brian Gixa68668b2011-08-11 15:49:36 -07001036 if (l->head) {
1037 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1038 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001039
Brian Gixa68668b2011-08-11 15:49:36 -07001040 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001041 u8 sec_level = l2cap_pi(sk)->sec_level;
1042 u8 pending_sec = conn->hcon->pending_sec_level;
1043
1044 if (pending_sec > sec_level)
1045 sec_level = pending_sec;
1046
Brian Gix065e8ff2011-09-29 15:14:08 -07001047 if (smp_conn_security(conn, sec_level)) {
Brian Gixa68668b2011-08-11 15:49:36 -07001048 l2cap_chan_ready(sk);
Brian Gix065e8ff2011-09-29 15:14:08 -07001049 hci_conn_put(conn->hcon);
1050 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001051
Brian Gixa68668b2011-08-11 15:49:36 -07001052 } else if (sk->sk_type != SOCK_SEQPACKET &&
1053 sk->sk_type != SOCK_STREAM) {
1054 l2cap_sock_clear_timer(sk);
1055 sk->sk_state = BT_CONNECTED;
1056 sk->sk_state_change(sk);
1057 } else if (sk->sk_state == BT_CONNECT)
1058 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001059
Brian Gixa68668b2011-08-11 15:49:36 -07001060 bh_unlock_sock(sk);
1061 }
1062 } else if (conn->hcon->type == LE_LINK) {
1063 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001064 }
1065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001066 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001067}
1068
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001069/* Notify sockets that we cannot guaranty reliability anymore */
1070static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1071{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001072 struct l2cap_chan_list *l = &conn->chan_list;
1073 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001074
1075 BT_DBG("conn %p", conn);
1076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001077 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001079 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1080 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001081 sk->sk_err = err;
1082 }
1083
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001084 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001085}
1086
1087static void l2cap_info_timeout(unsigned long arg)
1088{
1089 struct l2cap_conn *conn = (void *) arg;
1090
Marcel Holtmann984947d2009-02-06 23:35:19 +01001091 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001092 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001093
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001094 l2cap_conn_start(conn);
1095}
1096
Linus Torvalds1da177e2005-04-16 15:20:36 -07001097static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1098{
Marcel Holtmann01394182006-07-03 10:02:46 +02001099 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100
Marcel Holtmann01394182006-07-03 10:02:46 +02001101 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001102 return conn;
1103
Marcel Holtmann01394182006-07-03 10:02:46 +02001104 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1105 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107
1108 hcon->l2cap_data = conn;
1109 conn->hcon = hcon;
1110
Marcel Holtmann01394182006-07-03 10:02:46 +02001111 BT_DBG("hcon %p conn %p", hcon, conn);
1112
Ville Tervoacd7d372011-02-10 22:38:49 -03001113 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1114 conn->mtu = hcon->hdev->le_mtu;
1115 else
1116 conn->mtu = hcon->hdev->acl_mtu;
1117
Linus Torvalds1da177e2005-04-16 15:20:36 -07001118 conn->src = &hcon->hdev->bdaddr;
1119 conn->dst = &hcon->dst;
1120
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001121 conn->feat_mask = 0;
1122
Linus Torvalds1da177e2005-04-16 15:20:36 -07001123 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001124 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001126 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001127 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001128 (unsigned long) conn);
1129 else
Ville Tervob62f3282011-02-10 22:38:50 -03001130 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001131 (unsigned long) conn);
1132
Marcel Holtmann2950f212009-02-12 14:02:50 +01001133 conn->disc_reason = 0x13;
1134
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 return conn;
1136}
1137
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001138static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001139{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001140 struct l2cap_conn *conn = hcon->l2cap_data;
1141 struct sock *sk;
1142 struct sock *next;
1143
1144 if (!conn)
1145 return;
1146
1147 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1148
1149 if ((conn->hcon == hcon) && (conn->rx_skb))
1150 kfree_skb(conn->rx_skb);
1151
1152 BT_DBG("conn->hcon %p", conn->hcon);
1153
1154 /* Kill channels */
1155 for (sk = conn->chan_list.head; sk; ) {
1156 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1157 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1158 next = l2cap_pi(sk)->next_c;
1159 bh_lock_sock(sk);
1160 l2cap_chan_del(sk, err);
1161 bh_unlock_sock(sk);
1162 l2cap_sock_kill(sk);
1163 sk = next;
1164 } else
1165 sk = l2cap_pi(sk)->next_c;
1166 }
1167
1168 if (conn->hcon == hcon) {
1169 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1170 del_timer_sync(&conn->info_timer);
1171
1172 hcon->l2cap_data = NULL;
1173
1174 kfree(conn);
1175 }
1176}
1177
1178static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1179{
1180 struct l2cap_chan_list *l = &conn->chan_list;
1181 write_lock_bh(&l->lock);
1182 __l2cap_chan_add(conn, sk);
1183 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001184}
1185
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187
1188/* Find socket with psm and source bdaddr.
1189 * Returns closest match.
1190 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001191static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001193 struct sock *sk = NULL, *sk1 = NULL;
1194 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001196 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 sk_for_each(sk, node, &l2cap_sk_list.head) {
1199 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200 continue;
1201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001202 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001204 if (!bacmp(&bt_sk(sk)->src, src))
1205 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001206
1207 /* Closest match */
1208 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001209 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001210 }
1211 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001215 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216}
1217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001219{
1220 bdaddr_t *src = &bt_sk(sk)->src;
1221 bdaddr_t *dst = &bt_sk(sk)->dst;
1222 struct l2cap_conn *conn;
1223 struct hci_conn *hcon;
1224 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001225 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001226 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001228 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001229 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001231 hdev = hci_get_route(dst, src);
1232 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 return -EHOSTUNREACH;
1234
1235 hci_dev_lock_bh(hdev);
1236
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001239 if (l2cap_pi(sk)->fixed_channel) {
1240 /* Fixed channels piggyback on existing ACL connections */
1241 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1242 if (!hcon || !hcon->l2cap_data) {
1243 err = -ENOTCONN;
1244 goto done;
1245 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001246
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001247 conn = hcon->l2cap_data;
1248 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001249 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001250 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001251 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001252 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001253 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001254 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001256 if (IS_ERR(hcon)) {
1257 err = PTR_ERR(hcon);
1258 goto done;
1259 }
1260
1261 conn = l2cap_conn_add(hcon, 0);
1262 if (!conn) {
1263 hci_conn_put(hcon);
1264 err = -ENOMEM;
1265 goto done;
1266 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267 }
1268
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 /* Update source addr of the socket */
1270 bacpy(src, conn->src);
1271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001272 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001273
Brian Gixa68668b2011-08-11 15:49:36 -07001274 if ((l2cap_pi(sk)->fixed_channel) ||
1275 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1276 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001277 sk->sk_state = BT_CONNECTED;
1278 sk->sk_state_change(sk);
1279 } else {
1280 sk->sk_state = BT_CONNECT;
1281 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1282 sk->sk_state_change(sk);
1283
1284 if (hcon->state == BT_CONNECTED) {
1285 if (sk->sk_type != SOCK_SEQPACKET &&
1286 sk->sk_type != SOCK_STREAM) {
1287 l2cap_sock_clear_timer(sk);
1288 if (l2cap_check_security(sk)) {
1289 sk->sk_state = BT_CONNECTED;
1290 sk->sk_state_change(sk);
1291 }
1292 } else
1293 l2cap_do_start(sk);
1294 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 }
1296
Ville Tervo30e76272011-02-22 16:10:53 -03001297 err = 0;
1298
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299done:
1300 hci_dev_unlock_bh(hdev);
1301 hci_dev_put(hdev);
1302 return err;
1303}
1304
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001305int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001306{
1307 DECLARE_WAITQUEUE(wait, current);
1308 int err = 0;
1309 int timeo = HZ/5;
1310
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001311 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1313 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1314 set_current_state(TASK_INTERRUPTIBLE);
1315
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001316 if (!timeo)
1317 timeo = HZ/5;
1318
1319 if (signal_pending(current)) {
1320 err = sock_intr_errno(timeo);
1321 break;
1322 }
1323
1324 release_sock(sk);
1325 timeo = schedule_timeout(timeo);
1326 lock_sock(sk);
1327
1328 err = sock_error(sk);
1329 if (err)
1330 break;
1331 }
1332 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001333 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001334 return err;
1335}
1336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001337static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001338{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001339 struct l2cap_pinfo *pi =
1340 container_of(work, struct l2cap_pinfo, tx_work);
1341 struct sock *sk = (struct sock *)pi;
1342 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001344 lock_sock(sk);
1345 l2cap_ertm_send(sk);
1346 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07001347 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001348}
1349
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001350static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001351{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 struct sock *sk = skb->sk;
1353 int queued;
Mat Martineau2f0cd842011-10-20 14:34:26 -07001354 int keep_sk = 0;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1357 if (queued < L2CAP_MIN_ERTM_QUEUED)
Mat Martineau2f0cd842011-10-20 14:34:26 -07001358 keep_sk = queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
1359
1360 if (!keep_sk)
1361 sock_put(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001362}
1363
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001365{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1371 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1372 BT_DBG("Sending on AMP connection %p %p",
1373 pi->ampcon, pi->ampchan);
1374 if (pi->ampchan)
1375 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1376 ACL_COMPLETE);
1377 else
1378 kfree_skb(skb);
1379 } else {
1380 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001381
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001382 bt_cb(skb)->force_active = pi->force_active;
1383 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001385 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1386 !l2cap_pi(sk)->flushable)
1387 flags = ACL_START_NO_FLUSH;
1388 else
1389 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001390
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001391 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001392 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001393}
1394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001396{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001397 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001398 struct l2cap_pinfo *pi = l2cap_pi(sk);
1399 struct bt_l2cap_control *control;
1400 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001402 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001404 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001405 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001407 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1408 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001410 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1411 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1412 return 0;
1413
1414 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1415 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1416 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1417
1418 skb = sk->sk_send_head;
1419
1420 bt_cb(skb)->retries = 1;
1421 control = &bt_cb(skb)->control;
1422
1423 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1424 control->final = 1;
1425 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1426 }
1427 control->reqseq = pi->buffer_seq;
1428 pi->last_acked_seq = pi->buffer_seq;
1429 control->txseq = pi->next_tx_seq;
1430
1431 if (pi->extended_control) {
1432 put_unaligned_le32(__pack_extended_control(control),
1433 skb->data + L2CAP_HDR_SIZE);
1434 } else {
1435 put_unaligned_le16(__pack_enhanced_control(control),
1436 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001437 }
1438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001439 if (pi->fcs == L2CAP_FCS_CRC16)
1440 apply_fcs(skb);
1441
1442 /* Clone after data has been modified. Data is assumed to be
1443 read-only (for locking purposes) on cloned sk_buffs.
1444 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001445 tx_skb = skb_clone(skb, GFP_ATOMIC);
1446
Mat Martineau2f0cd842011-10-20 14:34:26 -07001447 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001448 tx_skb->sk = sk;
1449 tx_skb->destructor = l2cap_skb_destructor;
1450 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001451
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001452 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001454 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001455
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001456 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001458 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1459 pi->unacked_frames += 1;
1460 pi->frames_sent += 1;
1461 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001463 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1464 sk->sk_send_head = NULL;
1465 else
1466 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1467 }
1468
1469 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1470 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1471 atomic_read(&pi->ertm_queued));
1472
1473 return sent;
1474}
1475
1476int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1477{
1478 struct sk_buff *skb;
1479 struct l2cap_pinfo *pi = l2cap_pi(sk);
1480 struct bt_l2cap_control *control;
1481 int sent = 0;
1482
1483 BT_DBG("sk %p, skbs %p", sk, skbs);
1484
1485 if (sk->sk_state != BT_CONNECTED)
1486 return -ENOTCONN;
1487
1488 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1489 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1490 return 0;
1491
1492 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1493
1494 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1495 while (!skb_queue_empty(TX_QUEUE(sk))) {
1496
1497 skb = skb_dequeue(TX_QUEUE(sk));
1498
1499 BT_DBG("skb %p", skb);
1500
1501 bt_cb(skb)->retries = 1;
1502 control = &bt_cb(skb)->control;
1503
1504 BT_DBG("control %p", control);
1505
1506 control->reqseq = 0;
1507 control->txseq = pi->next_tx_seq;
1508
1509 if (pi->extended_control) {
1510 put_unaligned_le32(__pack_extended_control(control),
1511 skb->data + L2CAP_HDR_SIZE);
1512 } else {
1513 put_unaligned_le16(__pack_enhanced_control(control),
1514 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001515 }
1516
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 if (pi->fcs == L2CAP_FCS_CRC16)
1518 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001521
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001523
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001524 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1525 pi->frames_sent += 1;
1526 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001527 }
1528
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001529 BT_DBG("Sent %d", sent);
1530
1531 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001532}
1533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001534static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001535{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001536 while (len > 0) {
1537 if (iv->iov_len) {
1538 int copy = min_t(unsigned int, len, iv->iov_len);
1539 memcpy(kdata, iv->iov_base, copy);
1540 len -= copy;
1541 kdata += copy;
1542 iv->iov_base += copy;
1543 iv->iov_len -= copy;
1544 }
1545 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001546 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001549}
1550
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001551static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1552 int len, int count, struct sk_buff *skb,
1553 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001554{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001555 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001556 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001557 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001558 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001559
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001560 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1561 msg, (int)len, (int)count, skb);
1562
1563 if (!conn)
1564 return -ENOTCONN;
1565
1566 /* When resegmenting, data is copied from kernel space */
1567 if (reseg) {
1568 err = memcpy_fromkvec(skb_put(skb, count),
1569 (struct kvec *) msg->msg_iov, count);
1570 } else {
1571 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1572 count);
1573 }
1574
1575 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001576 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 sent += count;
1579 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001580 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001581
1582 /* Continuation fragments (no L2CAP header) */
1583 frag = &skb_shinfo(skb)->frag_list;
1584 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001585 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001586 count = min_t(unsigned int, conn->mtu, len);
1587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001588 /* Add room for the FCS if it fits */
1589 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1590 len + L2CAP_FCS_SIZE <= conn->mtu)
1591 skblen = count + L2CAP_FCS_SIZE;
1592 else
1593 skblen = count;
1594
1595 /* Don't use bt_skb_send_alloc() while resegmenting, since
1596 * it is not ok to block.
1597 */
1598 if (reseg) {
1599 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1600 if (*frag)
1601 skb_set_owner_w(*frag, sk);
1602 } else {
1603 *frag = bt_skb_send_alloc(sk, skblen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1605 }
1606
Linus Torvalds1da177e2005-04-16 15:20:36 -07001607 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001608 return -EFAULT;
1609
1610 /* When resegmenting, data is copied from kernel space */
1611 if (reseg) {
1612 err = memcpy_fromkvec(skb_put(*frag, count),
1613 (struct kvec *) msg->msg_iov,
1614 count);
1615 } else {
1616 err = memcpy_fromiovec(skb_put(*frag, count),
1617 msg->msg_iov, count);
1618 }
1619
1620 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001621 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622
1623 sent += count;
1624 len -= count;
1625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001626 final = *frag;
1627
Linus Torvalds1da177e2005-04-16 15:20:36 -07001628 frag = &(*frag)->next;
1629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001630
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001631 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1632 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1633 if (reseg) {
1634 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1635 GFP_ATOMIC);
1636 if (*frag)
1637 skb_set_owner_w(*frag, sk);
1638 } else {
1639 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1640 msg->msg_flags & MSG_DONTWAIT,
1641 &err);
1642 }
1643
1644 if (!*frag)
1645 return -EFAULT;
1646
1647 final = *frag;
1648 }
1649
1650 skb_put(final, L2CAP_FCS_SIZE);
1651 }
1652
Linus Torvalds1da177e2005-04-16 15:20:36 -07001653 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001654}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001655
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001656struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001657{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001658 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001659 struct sk_buff *skb;
1660 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1661 struct l2cap_hdr *lh;
1662
1663 BT_DBG("sk %p len %d", sk, (int)len);
1664
1665 count = min_t(unsigned int, (conn->mtu - hlen), len);
1666 skb = bt_skb_send_alloc(sk, count + hlen,
1667 msg->msg_flags & MSG_DONTWAIT, &err);
1668 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001669 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001670
1671 /* Create L2CAP header */
1672 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001674 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001675 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001676
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001677 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001678 if (unlikely(err < 0)) {
1679 kfree_skb(skb);
1680 return ERR_PTR(err);
1681 }
1682 return skb;
1683}
1684
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001685struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001686{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001687 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001688 struct sk_buff *skb;
1689 int err, count, hlen = L2CAP_HDR_SIZE;
1690 struct l2cap_hdr *lh;
1691
1692 BT_DBG("sk %p len %d", sk, (int)len);
1693
1694 count = min_t(unsigned int, (conn->mtu - hlen), len);
1695 skb = bt_skb_send_alloc(sk, count + hlen,
1696 msg->msg_flags & MSG_DONTWAIT, &err);
1697 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001698 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001699
1700 /* Create L2CAP header */
1701 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001702 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001703 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1704
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001705 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001706 if (unlikely(err < 0)) {
1707 kfree_skb(skb);
1708 return ERR_PTR(err);
1709 }
1710 return skb;
1711}
1712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001713struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1714 struct msghdr *msg, size_t len,
1715 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001716{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001717 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001718 int err, count, hlen;
1719 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001720 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001721 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001723 if (l2cap_pi(sk)->extended_control)
1724 hlen = L2CAP_EXTENDED_HDR_SIZE;
1725 else
1726 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001727
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001728 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001731 if (fcs == L2CAP_FCS_CRC16)
1732 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1735 sk, msg, (int)len, (int)sdulen, hlen);
1736
1737 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1738
1739 /* Allocate extra headroom for Qualcomm PAL. This is only
1740 * necessary in two places (here and when creating sframes)
1741 * because only unfragmented iframes and sframes are sent
1742 * using AMP controllers.
1743 */
1744 if (l2cap_pi(sk)->ampcon &&
1745 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1746 reserve = BT_SKB_RESERVE_80211;
1747
1748 /* Don't use bt_skb_send_alloc() while resegmenting, since
1749 * it is not ok to block.
1750 */
1751 if (reseg) {
1752 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1753 if (skb)
1754 skb_set_owner_w(skb, sk);
1755 } else {
1756 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1757 msg->msg_flags & MSG_DONTWAIT, &err);
1758 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001759 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001760 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 if (reserve)
1763 skb_reserve(skb, reserve);
1764
1765 bt_cb(skb)->control.fcs = fcs;
1766
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001767 /* Create L2CAP header */
1768 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001769 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1770 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001772 /* Control header is populated later */
1773 if (l2cap_pi(sk)->extended_control)
1774 put_unaligned_le32(0, skb_put(skb, 4));
1775 else
1776 put_unaligned_le16(0, skb_put(skb, 2));
1777
1778 if (sdulen)
1779 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1780
1781 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001782 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001783 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001784 kfree_skb(skb);
1785 return ERR_PTR(err);
1786 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001787
1788 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001789 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790}
1791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001793{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 struct l2cap_pinfo *pi;
1795 struct sk_buff *acked_skb;
1796 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001798 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001800 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001801
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001802 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1803 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001805 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1806 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1807
1808 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1809 ackseq = __next_seq(ackseq, pi)) {
1810
1811 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1812 if (acked_skb) {
1813 skb_unlink(acked_skb, TX_QUEUE(sk));
1814 kfree_skb(acked_skb);
1815 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001816 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001817 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001819 pi->expected_ack_seq = reqseq;
1820
1821 if (pi->unacked_frames == 0)
1822 l2cap_ertm_stop_retrans_timer(pi);
1823
1824 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001825}
1826
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001828{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001829 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001830 int len;
1831 int reserve = 0;
1832 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 if (l2cap_pi(sk)->extended_control)
1835 len = L2CAP_EXTENDED_HDR_SIZE;
1836 else
1837 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1840 len += L2CAP_FCS_SIZE;
1841
1842 /* Allocate extra headroom for Qualcomm PAL */
1843 if (l2cap_pi(sk)->ampcon &&
1844 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1845 reserve = BT_SKB_RESERVE_80211;
1846
1847 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1848
1849 if (!skb)
1850 return ERR_PTR(-ENOMEM);
1851
1852 if (reserve)
1853 skb_reserve(skb, reserve);
1854
1855 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1856 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1857 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1858
1859 if (l2cap_pi(sk)->extended_control)
1860 put_unaligned_le32(control, skb_put(skb, 4));
1861 else
1862 put_unaligned_le16(control, skb_put(skb, 2));
1863
1864 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1865 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1866 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001867 }
1868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001869 return skb;
1870}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001872static void l2cap_ertm_send_sframe(struct sock *sk,
1873 struct bt_l2cap_control *control)
1874{
1875 struct l2cap_pinfo *pi;
1876 struct sk_buff *skb;
1877 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001879 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001880
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001881 if (control->frame_type != 's')
1882 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001884 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001885
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001886 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1887 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1888 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1889 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1890 return;
1891 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001893 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1894 control->final = 1;
1895 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1896 }
1897
1898 if (control->super == L2CAP_SFRAME_RR)
1899 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1900 else if (control->super == L2CAP_SFRAME_RNR)
1901 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1902
1903 if (control->super != L2CAP_SFRAME_SREJ) {
1904 pi->last_acked_seq = control->reqseq;
1905 l2cap_ertm_stop_ack_timer(pi);
1906 }
1907
1908 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1909 (int) control->final, (int) control->poll,
1910 (int) control->super);
1911
1912 if (pi->extended_control)
1913 control_field = __pack_extended_control(control);
1914 else
1915 control_field = __pack_enhanced_control(control);
1916
1917 skb = l2cap_create_sframe_pdu(sk, control_field);
1918 if (!IS_ERR(skb))
1919 l2cap_do_send(sk, skb);
1920}
1921
1922static void l2cap_ertm_send_ack(struct sock *sk)
1923{
1924 struct l2cap_pinfo *pi = l2cap_pi(sk);
1925 struct bt_l2cap_control control;
1926 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1927 int threshold;
1928
1929 BT_DBG("sk %p", sk);
1930 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1931 (int)pi->buffer_seq);
1932
1933 memset(&control, 0, sizeof(control));
1934 control.frame_type = 's';
1935
1936 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1937 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1938 l2cap_ertm_stop_ack_timer(pi);
1939 control.super = L2CAP_SFRAME_RNR;
1940 control.reqseq = pi->buffer_seq;
1941 l2cap_ertm_send_sframe(sk, &control);
1942 } else {
1943 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1944 l2cap_ertm_send(sk);
1945 /* If any i-frames were sent, they included an ack */
1946 if (pi->buffer_seq == pi->last_acked_seq)
1947 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001948 }
1949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001950 /* Ack now if the tx window is 3/4ths full.
1951 * Calculate without mul or div
1952 */
1953 threshold = pi->tx_win;
1954 threshold += threshold << 1;
1955 threshold >>= 2;
1956
1957 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1958 threshold);
1959
1960 if (frames_to_ack >= threshold) {
1961 l2cap_ertm_stop_ack_timer(pi);
1962 control.super = L2CAP_SFRAME_RR;
1963 control.reqseq = pi->buffer_seq;
1964 l2cap_ertm_send_sframe(sk, &control);
1965 frames_to_ack = 0;
1966 }
1967
1968 if (frames_to_ack)
1969 l2cap_ertm_start_ack_timer(pi);
1970 }
1971}
1972
1973static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1974{
1975 struct l2cap_pinfo *pi;
1976 struct bt_l2cap_control control;
1977
1978 BT_DBG("sk %p, poll %d", sk, (int) poll);
1979
1980 pi = l2cap_pi(sk);
1981
1982 memset(&control, 0, sizeof(control));
1983 control.frame_type = 's';
1984 control.poll = poll;
1985
1986 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1987 control.super = L2CAP_SFRAME_RNR;
1988 else
1989 control.super = L2CAP_SFRAME_RR;
1990
1991 control.reqseq = pi->buffer_seq;
1992 l2cap_ertm_send_sframe(sk, &control);
1993}
1994
1995static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1996{
1997 struct l2cap_pinfo *pi;
1998 struct bt_l2cap_control control;
1999
2000 BT_DBG("sk %p", sk);
2001
2002 pi = l2cap_pi(sk);
2003
2004 memset(&control, 0, sizeof(control));
2005 control.frame_type = 's';
2006 control.final = 1;
2007 control.reqseq = pi->buffer_seq;
2008 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
2009
2010 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
2011 control.super = L2CAP_SFRAME_RNR;
2012 l2cap_ertm_send_sframe(sk, &control);
2013 }
2014
2015 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
2016 (pi->unacked_frames > 0))
2017 l2cap_ertm_start_retrans_timer(pi);
2018
2019 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
2020
2021 /* Send pending iframes */
2022 l2cap_ertm_send(sk);
2023
2024 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
2025 /* F-bit wasn't sent in an s-frame or i-frame yet, so
2026 * send it now.
2027 */
2028 control.super = L2CAP_SFRAME_RR;
2029 l2cap_ertm_send_sframe(sk, &control);
2030 }
2031}
2032
2033static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
2034{
2035 struct bt_l2cap_control control;
2036 struct l2cap_pinfo *pi;
2037 u16 seq;
2038
2039 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2040
2041 pi = l2cap_pi(sk);
2042 memset(&control, 0, sizeof(control));
2043 control.frame_type = 's';
2044 control.super = L2CAP_SFRAME_SREJ;
2045
2046 for (seq = pi->expected_tx_seq; seq != txseq;
2047 seq = __next_seq(seq, pi)) {
2048 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2049 control.reqseq = seq;
2050 l2cap_ertm_send_sframe(sk, &control);
2051 l2cap_seq_list_append(&pi->srej_list, seq);
2052 }
2053 }
2054
2055 pi->expected_tx_seq = __next_seq(txseq, pi);
2056}
2057
2058static void l2cap_ertm_send_srej_tail(struct sock *sk)
2059{
2060 struct bt_l2cap_control control;
2061 struct l2cap_pinfo *pi;
2062
2063 BT_DBG("sk %p", sk);
2064
2065 pi = l2cap_pi(sk);
2066
2067 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2068 return;
2069
2070 memset(&control, 0, sizeof(control));
2071 control.frame_type = 's';
2072 control.super = L2CAP_SFRAME_SREJ;
2073 control.reqseq = pi->srej_list.tail;
2074 l2cap_ertm_send_sframe(sk, &control);
2075}
2076
2077static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2078{
2079 struct bt_l2cap_control control;
2080 struct l2cap_pinfo *pi;
2081 u16 initial_head;
2082 u16 seq;
2083
2084 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2085
2086 pi = l2cap_pi(sk);
2087 memset(&control, 0, sizeof(control));
2088 control.frame_type = 's';
2089 control.super = L2CAP_SFRAME_SREJ;
2090
2091 /* Capture initial list head to allow only one pass through the list. */
2092 initial_head = pi->srej_list.head;
2093
2094 do {
2095 seq = l2cap_seq_list_pop(&pi->srej_list);
2096 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2097 break;
2098
2099 control.reqseq = seq;
2100 l2cap_ertm_send_sframe(sk, &control);
2101 l2cap_seq_list_append(&pi->srej_list, seq);
2102 } while (pi->srej_list.head != initial_head);
2103}
2104
2105static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2106{
2107 struct l2cap_pinfo *pi = l2cap_pi(sk);
2108 BT_DBG("sk %p", sk);
2109
2110 pi->expected_tx_seq = pi->buffer_seq;
2111 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2112 skb_queue_purge(SREJ_QUEUE(sk));
2113 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2114}
2115
2116static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2117 struct bt_l2cap_control *control,
2118 struct sk_buff_head *skbs, u8 event)
2119{
2120 struct l2cap_pinfo *pi;
2121 int err = 0;
2122
2123 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2124 (int)event);
2125 pi = l2cap_pi(sk);
2126
2127 switch (event) {
2128 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2129 if (sk->sk_send_head == NULL)
2130 sk->sk_send_head = skb_peek(skbs);
2131
2132 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2133 l2cap_ertm_send(sk);
2134 break;
2135 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2136 BT_DBG("Enter LOCAL_BUSY");
2137 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2138
2139 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2140 /* The SREJ_SENT state must be aborted if we are to
2141 * enter the LOCAL_BUSY state.
2142 */
2143 l2cap_ertm_abort_rx_srej_sent(sk);
2144 }
2145
2146 l2cap_ertm_send_ack(sk);
2147
2148 break;
2149 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2150 BT_DBG("Exit LOCAL_BUSY");
2151 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2152
2153 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2154 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2155 pi->amp_move_state =
2156 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2157 l2cap_send_move_chan_cfm(pi->conn, pi,
2158 pi->scid,
2159 L2CAP_MOVE_CHAN_CONFIRMED);
2160 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2161 } else if (pi->amp_move_role ==
2162 L2CAP_AMP_MOVE_RESPONDER) {
2163 pi->amp_move_state =
2164 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2165 l2cap_send_move_chan_rsp(pi->conn,
2166 pi->amp_move_cmd_ident,
2167 pi->dcid,
2168 L2CAP_MOVE_CHAN_SUCCESS);
2169 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002170 break;
2171 }
2172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002173 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2174 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2175 struct bt_l2cap_control local_control;
2176
2177 memset(&local_control, 0, sizeof(local_control));
2178 local_control.frame_type = 's';
2179 local_control.super = L2CAP_SFRAME_RR;
2180 local_control.poll = 1;
2181 local_control.reqseq = pi->buffer_seq;
2182 l2cap_ertm_send_sframe(sk, &local_control);
2183
2184 pi->retry_count = 1;
2185 l2cap_ertm_start_monitor_timer(pi);
2186 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002187 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002188 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002189 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2190 l2cap_ertm_process_reqseq(sk, control->reqseq);
2191 break;
2192 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2193 l2cap_ertm_send_rr_or_rnr(sk, 1);
2194 pi->retry_count = 1;
2195 l2cap_ertm_start_monitor_timer(pi);
2196 l2cap_ertm_stop_ack_timer(pi);
2197 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2198 break;
2199 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2200 l2cap_ertm_send_rr_or_rnr(sk, 1);
2201 pi->retry_count = 1;
2202 l2cap_ertm_start_monitor_timer(pi);
2203 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2204 break;
2205 case L2CAP_ERTM_EVENT_RECV_FBIT:
2206 /* Nothing to process */
2207 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002208 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002209 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002210 }
2211
2212 return err;
2213}
2214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002215static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2216 struct bt_l2cap_control *control,
2217 struct sk_buff_head *skbs, u8 event)
2218{
2219 struct l2cap_pinfo *pi;
2220 int err = 0;
2221
2222 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2223 (int)event);
2224 pi = l2cap_pi(sk);
2225
2226 switch (event) {
2227 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2228 if (sk->sk_send_head == NULL)
2229 sk->sk_send_head = skb_peek(skbs);
2230 /* Queue data, but don't send. */
2231 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2232 break;
2233 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2234 BT_DBG("Enter LOCAL_BUSY");
2235 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2236
2237 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2238 /* The SREJ_SENT state must be aborted if we are to
2239 * enter the LOCAL_BUSY state.
2240 */
2241 l2cap_ertm_abort_rx_srej_sent(sk);
2242 }
2243
2244 l2cap_ertm_send_ack(sk);
2245
2246 break;
2247 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2248 BT_DBG("Exit LOCAL_BUSY");
2249 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2250
2251 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2252 struct bt_l2cap_control local_control;
2253 memset(&local_control, 0, sizeof(local_control));
2254 local_control.frame_type = 's';
2255 local_control.super = L2CAP_SFRAME_RR;
2256 local_control.poll = 1;
2257 local_control.reqseq = pi->buffer_seq;
2258 l2cap_ertm_send_sframe(sk, &local_control);
2259
2260 pi->retry_count = 1;
2261 l2cap_ertm_start_monitor_timer(pi);
2262 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2263 }
2264 break;
2265 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2266 l2cap_ertm_process_reqseq(sk, control->reqseq);
2267
2268 /* Fall through */
2269
2270 case L2CAP_ERTM_EVENT_RECV_FBIT:
2271 if (control && control->final) {
2272 l2cap_ertm_stop_monitor_timer(pi);
2273 if (pi->unacked_frames > 0)
2274 l2cap_ertm_start_retrans_timer(pi);
2275 pi->retry_count = 0;
2276 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2277 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2278 }
2279 break;
2280 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2281 /* Ignore */
2282 break;
2283 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2284 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2285 l2cap_ertm_send_rr_or_rnr(sk, 1);
2286 l2cap_ertm_start_monitor_timer(pi);
2287 pi->retry_count += 1;
2288 } else
2289 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2290 break;
2291 default:
2292 break;
2293 }
2294
2295 return err;
2296}
2297
2298int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2299 struct sk_buff_head *skbs, u8 event)
2300{
2301 struct l2cap_pinfo *pi;
2302 int err = 0;
2303
2304 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2305 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2306
2307 pi = l2cap_pi(sk);
2308
2309 switch (pi->tx_state) {
2310 case L2CAP_ERTM_TX_STATE_XMIT:
2311 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2312 break;
2313 case L2CAP_ERTM_TX_STATE_WAIT_F:
2314 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2315 break;
2316 default:
2317 /* Ignore event */
2318 break;
2319 }
2320
2321 return err;
2322}
2323
2324int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2325 struct msghdr *msg, size_t len, int reseg)
2326{
2327 struct sk_buff *skb;
2328 u16 sdu_len;
2329 size_t pdu_len;
2330 int err = 0;
2331 u8 sar;
2332
2333 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2334
2335 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2336 * so fragmented skbs are not used. The HCI layer's handling
2337 * of fragmented skbs is not compatible with ERTM's queueing.
2338 */
2339
2340 /* PDU size is derived from the HCI MTU */
2341 pdu_len = l2cap_pi(sk)->conn->mtu;
2342
2343 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2344 if (!l2cap_pi(sk)->ampcon)
2345 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2346
2347 /* Adjust for largest possible L2CAP overhead. */
2348 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2349
2350 /* Remote device may have requested smaller PDUs */
2351 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2352
2353 if (len <= pdu_len) {
2354 sar = L2CAP_SAR_UNSEGMENTED;
2355 sdu_len = 0;
2356 pdu_len = len;
2357 } else {
2358 sar = L2CAP_SAR_START;
2359 sdu_len = len;
2360 pdu_len -= L2CAP_SDULEN_SIZE;
2361 }
2362
2363 while (len) {
2364 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2365
2366 BT_DBG("iframe skb %p", skb);
2367
2368 if (IS_ERR(skb)) {
2369 __skb_queue_purge(seg_queue);
2370 return PTR_ERR(skb);
2371 }
2372
2373 bt_cb(skb)->control.sar = sar;
2374 __skb_queue_tail(seg_queue, skb);
2375
2376 len -= pdu_len;
2377 if (sdu_len) {
2378 sdu_len = 0;
2379 pdu_len += L2CAP_SDULEN_SIZE;
2380 }
2381
2382 if (len <= pdu_len) {
2383 sar = L2CAP_SAR_END;
2384 pdu_len = len;
2385 } else {
2386 sar = L2CAP_SAR_CONTINUE;
2387 }
2388 }
2389
2390 return err;
2391}
2392
2393static inline int is_initial_frame(u8 sar)
2394{
2395 return (sar == L2CAP_SAR_UNSEGMENTED ||
2396 sar == L2CAP_SAR_START);
2397}
2398
2399static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2400 size_t veclen)
2401{
2402 struct sk_buff *frag_iter;
2403
2404 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2405
2406 if (iv->iov_len + skb->len > veclen)
2407 return -ENOMEM;
2408
2409 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2410 iv->iov_len += skb->len;
2411
2412 skb_walk_frags(skb, frag_iter) {
2413 if (iv->iov_len + skb->len > veclen)
2414 return -ENOMEM;
2415
2416 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2417 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2418 frag_iter->len);
2419 iv->iov_len += frag_iter->len;
2420 }
2421
2422 return 0;
2423}
2424
2425int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2426{
2427 void *buf;
2428 int buflen;
2429 int err = 0;
2430 struct sk_buff *skb;
2431 struct msghdr msg;
2432 struct kvec iv;
2433 struct sk_buff_head old_frames;
2434 struct l2cap_pinfo *pi = l2cap_pi(sk);
2435
2436 BT_DBG("sk %p", sk);
2437
2438 if (skb_queue_empty(queue))
2439 return 0;
2440
2441 memset(&msg, 0, sizeof(msg));
2442 msg.msg_iov = (struct iovec *) &iv;
2443
2444 buflen = pi->omtu + L2CAP_FCS_SIZE;
2445 buf = kzalloc(buflen, GFP_TEMPORARY);
2446
2447 if (!buf) {
2448 BT_DBG("Could not allocate resegmentation buffer");
2449 return -ENOMEM;
2450 }
2451
2452 /* Move current frames off the original queue */
2453 __skb_queue_head_init(&old_frames);
2454 skb_queue_splice_tail_init(queue, &old_frames);
2455
2456 while (!skb_queue_empty(&old_frames)) {
2457 struct sk_buff_head current_sdu;
2458 u8 original_sar;
2459
2460 /* Reassemble each SDU from one or more PDUs */
2461
2462 iv.iov_base = buf;
2463 iv.iov_len = 0;
2464
2465 skb = skb_peek(&old_frames);
2466 original_sar = bt_cb(skb)->control.sar;
2467
2468 __skb_unlink(skb, &old_frames);
2469
2470 /* Append data to SDU */
2471 if (pi->extended_control)
2472 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2473 else
2474 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2475
2476 if (original_sar == L2CAP_SAR_START)
2477 skb_pull(skb, L2CAP_SDULEN_SIZE);
2478
2479 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2480
2481 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2482 iv.iov_len -= L2CAP_FCS_SIZE;
2483
2484 /* Free skb */
2485 kfree_skb(skb);
2486
2487 if (err)
2488 break;
2489
2490 while (!skb_queue_empty(&old_frames) && !err) {
2491 /* Check next frame */
2492 skb = skb_peek(&old_frames);
2493
2494 if (is_initial_frame(bt_cb(skb)->control.sar))
2495 break;
2496
2497 __skb_unlink(skb, &old_frames);
2498
2499 /* Append data to SDU */
2500 if (pi->extended_control)
2501 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2502 else
2503 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2504
2505 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2506 skb_pull(skb, L2CAP_SDULEN_SIZE);
2507
2508 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2509
2510 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2511 iv.iov_len -= L2CAP_FCS_SIZE;
2512
2513 /* Free skb */
2514 kfree_skb(skb);
2515 }
2516
2517 if (err)
2518 break;
2519
2520 /* Segment data */
2521
2522 __skb_queue_head_init(&current_sdu);
2523
2524 /* skbs for the SDU were just freed, but the
2525 * resegmenting process could produce more, smaller
2526 * skbs due to smaller PDUs and reduced HCI MTU. The
2527 * overhead from the sk_buff structs could put us over
2528 * the sk_sndbuf limit.
2529 *
2530 * Since this code is running in response to a
2531 * received poll/final packet, it cannot block.
2532 * Therefore, memory allocation needs to be allowed by
2533 * falling back to bt_skb_alloc() (with
2534 * skb_set_owner_w() to maintain sk_wmem_alloc
2535 * correctly).
2536 */
2537 msg.msg_iovlen = iv.iov_len;
2538 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2539 msg.msg_iovlen, 1);
2540
2541 if (err || skb_queue_empty(&current_sdu)) {
2542 BT_DBG("Error %d resegmenting data for socket %p",
2543 err, sk);
2544 __skb_queue_purge(&current_sdu);
2545 break;
2546 }
2547
2548 /* Fix up first PDU SAR bits */
2549 if (!is_initial_frame(original_sar)) {
2550 BT_DBG("Changing SAR bits, %d PDUs",
2551 skb_queue_len(&current_sdu));
2552 skb = skb_peek(&current_sdu);
2553
2554 if (skb_queue_len(&current_sdu) == 1) {
2555 /* Change SAR from 'unsegmented' to 'end' */
2556 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2557 } else {
2558 struct l2cap_hdr *lh;
2559 size_t hdrlen;
2560
2561 /* Change SAR from 'start' to 'continue' */
2562 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2563
2564 /* Start frames contain 2 bytes for
2565 * sdulen and continue frames don't.
2566 * Must rewrite header to eliminate
2567 * sdulen and then adjust l2cap frame
2568 * length.
2569 */
2570 if (pi->extended_control)
2571 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2572 else
2573 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2574
2575 memmove(skb->data + L2CAP_SDULEN_SIZE,
2576 skb->data, hdrlen);
2577 skb_pull(skb, L2CAP_SDULEN_SIZE);
2578 lh = (struct l2cap_hdr *)skb->data;
2579 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2580 L2CAP_SDULEN_SIZE);
2581 }
2582 }
2583
2584 /* Add to queue */
2585 skb_queue_splice_tail(&current_sdu, queue);
2586 }
2587
2588 __skb_queue_purge(&old_frames);
2589 if (err)
2590 __skb_queue_purge(queue);
2591
2592 kfree(buf);
2593
2594 BT_DBG("Queue resegmented, err=%d", err);
2595 return err;
2596}
2597
2598static void l2cap_resegment_worker(struct work_struct *work)
2599{
2600 int err = 0;
2601 struct l2cap_resegment_work *seg_work =
2602 container_of(work, struct l2cap_resegment_work, work);
2603 struct sock *sk = seg_work->sk;
2604
2605 kfree(seg_work);
2606
2607 BT_DBG("sk %p", sk);
2608 lock_sock(sk);
2609
2610 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2611 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002612 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002613 return;
2614 }
2615
2616 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2617
2618 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2619
2620 if (skb_queue_empty(TX_QUEUE(sk)))
2621 sk->sk_send_head = NULL;
2622 else
2623 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2624
2625 if (err)
2626 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2627 else
2628 l2cap_ertm_send(sk);
2629
2630 release_sock(sk);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002631 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002632}
2633
2634static int l2cap_setup_resegment(struct sock *sk)
2635{
2636 struct l2cap_resegment_work *seg_work;
2637
2638 BT_DBG("sk %p", sk);
2639
2640 if (skb_queue_empty(TX_QUEUE(sk)))
2641 return 0;
2642
2643 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2644 if (!seg_work)
2645 return -ENOMEM;
2646
2647 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002648 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002649 seg_work->sk = sk;
2650
2651 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2652 kfree(seg_work);
Mat Martineau2f0cd842011-10-20 14:34:26 -07002653 sock_put(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002654 return -ENOMEM;
2655 }
2656
2657 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2658
2659 return 0;
2660}
2661
2662static inline int l2cap_rmem_available(struct sock *sk)
2663{
2664 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2665 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2666 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2667}
2668
2669static inline int l2cap_rmem_full(struct sock *sk)
2670{
2671 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2672 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2673 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2674}
2675
2676void l2cap_amp_move_init(struct sock *sk)
2677{
2678 BT_DBG("sk %p", sk);
2679
2680 if (!l2cap_pi(sk)->conn)
2681 return;
2682
2683 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2684 return;
2685
2686 if (l2cap_pi(sk)->amp_id == 0) {
2687 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2688 return;
2689 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2690 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2691 amp_create_physical(l2cap_pi(sk)->conn, sk);
2692 } else {
2693 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2694 l2cap_pi(sk)->amp_move_state =
2695 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2696 l2cap_pi(sk)->amp_move_id = 0;
2697 l2cap_amp_move_setup(sk);
2698 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2699 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2700 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2701 }
2702}
2703
2704static void l2cap_chan_ready(struct sock *sk)
2705{
2706 struct sock *parent = bt_sk(sk)->parent;
2707
2708 BT_DBG("sk %p, parent %p", sk, parent);
2709
2710 l2cap_pi(sk)->conf_state = 0;
2711 l2cap_sock_clear_timer(sk);
2712
2713 if (!parent) {
2714 /* Outgoing channel.
2715 * Wake up socket sleeping on connect.
2716 */
2717 sk->sk_state = BT_CONNECTED;
2718 sk->sk_state_change(sk);
2719 } else {
2720 /* Incoming channel.
2721 * Wake up socket sleeping on accept.
2722 */
2723 parent->sk_data_ready(parent, 0);
2724 }
2725}
2726
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727/* Copy frame to all raw sockets on that connection */
2728static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2729{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002730 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002731 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002732 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002733
2734 BT_DBG("conn %p", conn);
2735
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002736 read_lock(&l->lock);
2737 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2738 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002739 continue;
2740
2741 /* Don't send frame to the socket it came from */
2742 if (skb->sk == sk)
2743 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002744 nskb = skb_clone(skb, GFP_ATOMIC);
2745 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002746 continue;
2747
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002748 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002749 kfree_skb(nskb);
2750 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002751 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002752}
2753
2754/* ---- L2CAP signalling commands ---- */
2755static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2756 u8 code, u8 ident, u16 dlen, void *data)
2757{
2758 struct sk_buff *skb, **frag;
2759 struct l2cap_cmd_hdr *cmd;
2760 struct l2cap_hdr *lh;
2761 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002762 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002763
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002764 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2765 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002766
2767 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002768 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002769
2770 skb = bt_skb_alloc(count, GFP_ATOMIC);
2771 if (!skb)
2772 return NULL;
2773
2774 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002775 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002776
2777 if (conn->hcon->type == LE_LINK)
2778 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2779 else
2780 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002781
2782 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2783 cmd->code = code;
2784 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002785 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002786
2787 if (dlen) {
2788 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2789 memcpy(skb_put(skb, count), data, count);
2790 data += count;
2791 }
2792
2793 len -= skb->len;
2794
2795 /* Continuation fragments (no L2CAP header) */
2796 frag = &skb_shinfo(skb)->frag_list;
2797 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002798 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002799
2800 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2801 if (!*frag)
2802 goto fail;
2803
2804 memcpy(skb_put(*frag, count), data, count);
2805
2806 len -= count;
2807 data += count;
2808
2809 frag = &(*frag)->next;
2810 }
2811
2812 return skb;
2813
2814fail:
2815 kfree_skb(skb);
2816 return NULL;
2817}
2818
2819static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2820{
2821 struct l2cap_conf_opt *opt = *ptr;
2822 int len;
2823
2824 len = L2CAP_CONF_OPT_SIZE + opt->len;
2825 *ptr += len;
2826
2827 *type = opt->type;
2828 *olen = opt->len;
2829
2830 switch (opt->len) {
2831 case 1:
2832 *val = *((u8 *) opt->val);
2833 break;
2834
2835 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002836 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002837 break;
2838
2839 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002840 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002841 break;
2842
2843 default:
2844 *val = (unsigned long) opt->val;
2845 break;
2846 }
2847
2848 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2849 return len;
2850}
2851
Linus Torvalds1da177e2005-04-16 15:20:36 -07002852static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2853{
2854 struct l2cap_conf_opt *opt = *ptr;
2855
2856 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2857
2858 opt->type = type;
2859 opt->len = len;
2860
2861 switch (len) {
2862 case 1:
2863 *((u8 *) opt->val) = val;
2864 break;
2865
2866 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002867 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002868 break;
2869
2870 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002871 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002872 break;
2873
2874 default:
2875 memcpy(opt->val, (void *) val, len);
2876 break;
2877 }
2878
2879 *ptr += L2CAP_CONF_OPT_SIZE + len;
2880}
2881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002883{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 struct delayed_work *delayed =
2885 container_of(work, struct delayed_work, work);
2886 struct l2cap_pinfo *pi =
2887 container_of(delayed, struct l2cap_pinfo, ack_work);
2888 struct sock *sk = (struct sock *)pi;
2889 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002890
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002891 BT_DBG("sk %p", sk);
2892
2893 if (!sk)
2894 return;
2895
2896 lock_sock(sk);
2897
2898 if (!l2cap_pi(sk)->conn) {
2899 release_sock(sk);
2900 return;
2901 }
2902
2903 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2904 l2cap_pi(sk)->last_acked_seq,
2905 l2cap_pi(sk));
2906
2907 if (frames_to_ack)
2908 l2cap_ertm_send_rr_or_rnr(sk, 0);
2909
2910 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002911}
2912
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002913static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002914{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002915 struct delayed_work *delayed =
2916 container_of(work, struct delayed_work, work);
2917 struct l2cap_pinfo *pi =
2918 container_of(delayed, struct l2cap_pinfo, retrans_work);
2919 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002920
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002921 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002923 if (!sk)
2924 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002926 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002928 if (!l2cap_pi(sk)->conn) {
2929 release_sock(sk);
2930 return;
2931 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002932
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002933 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2934 release_sock(sk);
2935}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002937static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2938{
2939 struct delayed_work *delayed =
2940 container_of(work, struct delayed_work, work);
2941 struct l2cap_pinfo *pi =
2942 container_of(delayed, struct l2cap_pinfo, monitor_work);
2943 struct sock *sk = (struct sock *)pi;
2944
2945 BT_DBG("sk %p", sk);
2946
2947 if (!sk)
2948 return;
2949
2950 lock_sock(sk);
2951
2952 if (!l2cap_pi(sk)->conn) {
2953 release_sock(sk);
2954 return;
2955 }
2956
2957 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2958
2959 release_sock(sk);
2960}
2961
2962static inline void l2cap_ertm_init(struct sock *sk)
2963{
2964 l2cap_pi(sk)->next_tx_seq = 0;
2965 l2cap_pi(sk)->expected_tx_seq = 0;
2966 l2cap_pi(sk)->expected_ack_seq = 0;
2967 l2cap_pi(sk)->unacked_frames = 0;
2968 l2cap_pi(sk)->buffer_seq = 0;
2969 l2cap_pi(sk)->frames_sent = 0;
2970 l2cap_pi(sk)->last_acked_seq = 0;
2971 l2cap_pi(sk)->sdu = NULL;
2972 l2cap_pi(sk)->sdu_last_frag = NULL;
2973 l2cap_pi(sk)->sdu_len = 0;
2974 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2975
2976 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2977 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2978
2979 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2980 l2cap_pi(sk)->rx_state);
2981
2982 l2cap_pi(sk)->amp_id = 0;
2983 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2984 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2985 l2cap_pi(sk)->amp_move_reqseq = 0;
2986 l2cap_pi(sk)->amp_move_event = 0;
2987
2988 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2989 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2990 l2cap_ertm_retrans_timeout);
2991 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2992 l2cap_ertm_monitor_timeout);
2993 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2994 skb_queue_head_init(SREJ_QUEUE(sk));
2995 skb_queue_head_init(TX_QUEUE(sk));
2996
2997 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2998 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
2999 l2cap_pi(sk)->remote_tx_win);
3000}
3001
3002void l2cap_ertm_destruct(struct sock *sk)
3003{
3004 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
3005 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
3006}
3007
3008void l2cap_ertm_shutdown(struct sock *sk)
3009{
3010 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
3011 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
3012 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
3013}
3014
3015void l2cap_ertm_recv_done(struct sock *sk)
3016{
3017 lock_sock(sk);
3018
3019 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
3020 release_sock(sk);
3021 return;
3022 }
3023
3024 /* Consume any queued incoming frames and update local busy status */
3025 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
3026 l2cap_ertm_rx_queued_iframes(sk))
3027 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
3028 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3029 l2cap_rmem_available(sk))
3030 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
3031
3032 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03003033}
3034
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003035static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
3036{
3037 switch (mode) {
3038 case L2CAP_MODE_STREAMING:
3039 case L2CAP_MODE_ERTM:
3040 if (l2cap_mode_supported(mode, remote_feat_mask))
3041 return mode;
3042 /* fall through */
3043 default:
3044 return L2CAP_MODE_BASIC;
3045 }
3046}
3047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003048static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3051 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3052 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3053 pi->extended_control = 1;
3054 } else {
3055 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3056 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3057
3058 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3059 pi->extended_control = 0;
3060 }
3061}
3062
3063static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3064 struct hci_ext_fs *new,
3065 struct hci_ext_fs *agg)
3066{
3067 *agg = *cur;
3068 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3069 /* current flow spec has known rate */
3070 if ((new->max_sdu == 0xFFFF) ||
3071 (new->sdu_arr_time == 0xFFFFFFFF)) {
3072 /* new fs has unknown rate, so aggregate is unknown */
3073 agg->max_sdu = 0xFFFF;
3074 agg->sdu_arr_time = 0xFFFFFFFF;
3075 } else {
3076 /* new fs has known rate, so aggregate is known */
3077 u64 cur_rate;
3078 u64 new_rate;
3079 cur_rate = cur->max_sdu * 1000000ULL;
3080 if (cur->sdu_arr_time)
3081 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3082 new_rate = new->max_sdu * 1000000ULL;
3083 if (new->sdu_arr_time)
3084 new_rate = div_u64(new_rate, new->sdu_arr_time);
3085 cur_rate = cur_rate + new_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003086 if (cur_rate)
3087 agg->sdu_arr_time = div64_u64(
3088 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003089 }
3090 }
3091}
3092
3093static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3094{
3095 struct hci_ext_fs tx_fs;
3096 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003097
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003098 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003099
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003100 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3101 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3102 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3103 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3104 return 0;
3105
3106 l2cap_aggregate_fs(&chan->tx_fs,
3107 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3108 l2cap_aggregate_fs(&chan->rx_fs,
3109 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3110 hci_chan_modify(chan, &tx_fs, &rx_fs);
3111 return 1;
3112}
3113
3114static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3115 struct hci_ext_fs *old,
3116 struct hci_ext_fs *agg)
3117{
3118 *agg = *cur;
3119 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3120 u64 cur_rate;
3121 u64 old_rate;
3122 cur_rate = cur->max_sdu * 1000000ULL;
3123 if (cur->sdu_arr_time)
3124 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3125 old_rate = old->max_sdu * 1000000ULL;
3126 if (old->sdu_arr_time)
3127 old_rate = div_u64(old_rate, old->sdu_arr_time);
3128 cur_rate = cur_rate - old_rate;
Peter Krystad55f8d4c2011-12-01 15:46:50 -08003129 if (cur_rate)
3130 agg->sdu_arr_time = div64_u64(
3131 agg->max_sdu * 1000000ULL, cur_rate);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003132 }
3133}
3134
3135static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3136{
3137 struct hci_ext_fs tx_fs;
3138 struct hci_ext_fs rx_fs;
3139
3140 BT_DBG("chan %p", chan);
3141
3142 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3143 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3144 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3145 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3146 return 0;
3147
3148 l2cap_deaggregate_fs(&chan->tx_fs,
3149 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3150 l2cap_deaggregate_fs(&chan->rx_fs,
3151 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3152 hci_chan_modify(chan, &tx_fs, &rx_fs);
3153 return 1;
3154}
3155
3156static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3157{
3158 struct hci_dev *hdev;
3159 struct hci_conn *hcon;
3160 struct hci_chan *chan;
3161
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08003162 hdev = hci_dev_get(amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003163 if (!hdev)
3164 return NULL;
3165
3166 BT_DBG("hdev %s", hdev->name);
3167
3168 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003169 if (!hcon) {
3170 chan = NULL;
3171 goto done;
3172 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003173
3174 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3175 if (chan) {
3176 l2cap_aggregate(chan, pi);
3177 hci_chan_hold(chan);
Peter Krystadf7dcc792011-11-14 15:11:58 -08003178 goto done;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003179 }
3180
3181 if (bt_sk(pi)->parent) {
3182 /* Incoming connection */
3183 chan = hci_chan_accept(hcon,
3184 (struct hci_ext_fs *) &pi->local_fs,
3185 (struct hci_ext_fs *) &pi->remote_fs);
3186 } else {
3187 /* Outgoing connection */
3188 chan = hci_chan_create(hcon,
3189 (struct hci_ext_fs *) &pi->local_fs,
3190 (struct hci_ext_fs *) &pi->remote_fs);
3191 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08003192done:
3193 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003194 return chan;
3195}
3196
3197int l2cap_build_conf_req(struct sock *sk, void *data)
3198{
3199 struct l2cap_pinfo *pi = l2cap_pi(sk);
3200 struct l2cap_conf_req *req = data;
3201 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3202 void *ptr = req->data;
3203
3204 BT_DBG("sk %p", sk);
3205
3206 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003207 goto done;
3208
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003209 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003210 case L2CAP_MODE_STREAMING:
3211 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003212 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003213 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003214
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003215 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003216 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003217 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003218 break;
3219 }
3220
3221done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003222 if (pi->imtu != L2CAP_DEFAULT_MTU)
3223 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003225 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003226 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003227 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3228 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003229 break;
3230
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003231 rfc.txwin_size = 0;
3232 rfc.max_transmit = 0;
3233 rfc.retrans_timeout = 0;
3234 rfc.monitor_timeout = 0;
3235 rfc.max_pdu_size = 0;
3236
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003237 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3238 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003239 break;
3240
3241 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 l2cap_setup_txwin(pi);
3243 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3244 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3245 else
3246 rfc.txwin_size = pi->tx_win;
3247 rfc.max_transmit = pi->max_tx;
3248 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3249 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003250 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003251 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3252 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003253
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3255 (unsigned long) &rfc);
3256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003257 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3258 pi->extended_control) {
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3260 pi->tx_win);
3261 }
3262
3263 if (pi->amp_id) {
3264 /* default best effort extended flow spec */
3265 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3266 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3267 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3268 sizeof(fs), (unsigned long) &fs);
3269 }
3270
3271 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003272 break;
3273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003274 if (pi->fcs == L2CAP_FCS_NONE ||
3275 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3276 pi->fcs = L2CAP_FCS_NONE;
3277 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003278 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003279 break;
3280
3281 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003282 rfc.txwin_size = 0;
3283 rfc.max_transmit = 0;
3284 rfc.retrans_timeout = 0;
3285 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003286 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003287 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3288 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003289
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3291 (unsigned long) &rfc);
3292
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003293 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3294 pi->extended_control) {
3295 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3296 }
3297
3298 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003299 break;
3300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003301 if (pi->fcs == L2CAP_FCS_NONE ||
3302 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3303 pi->fcs = L2CAP_FCS_NONE;
3304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003305 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003306 break;
3307 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003308
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003309 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003310 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003311
3312 return ptr - data;
3313}
3314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003315
3316static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003317{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003318 struct l2cap_pinfo *pi = l2cap_pi(sk);
3319 struct l2cap_conf_req *req = data;
3320 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3321 void *ptr = req->data;
3322 u32 be_flush_to;
3323
3324 BT_DBG("sk %p", sk);
3325
3326 /* convert to milliseconds, round up */
3327 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3328
3329 switch (pi->mode) {
3330 case L2CAP_MODE_ERTM:
3331 rfc.mode = L2CAP_MODE_ERTM;
3332 rfc.txwin_size = pi->tx_win;
3333 rfc.max_transmit = pi->max_tx;
3334 if (pi->amp_move_id) {
3335 rfc.retrans_timeout =
3336 cpu_to_le16((3 * be_flush_to) + 500);
3337 rfc.monitor_timeout =
3338 cpu_to_le16((3 * be_flush_to) + 500);
3339 } else {
3340 rfc.retrans_timeout =
3341 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3342 rfc.monitor_timeout =
3343 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3344 }
3345 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3346 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3347 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3348
3349 break;
3350
3351 default:
3352 return -ECONNREFUSED;
3353 }
3354
3355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3356 (unsigned long) &rfc);
3357
3358 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3359
3360 /* TODO assign fcs for br/edr based on socket config option */
3361 if (pi->amp_move_id)
3362 pi->local_conf.fcs = L2CAP_FCS_NONE;
3363 else
3364 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3365
3366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3367 pi->local_conf.fcs);
3368
3369 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3370 }
3371
3372 req->dcid = cpu_to_le16(pi->dcid);
3373 req->flags = cpu_to_le16(0);
3374
3375 return ptr - data;
3376}
3377
3378static int l2cap_parse_conf_req(struct sock *sk, void *data)
3379{
3380 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003381 struct l2cap_conf_rsp *rsp = data;
3382 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003383 void *req = pi->conf_req;
3384 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003385 int type, hint, olen;
3386 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003387 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003388 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003389 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003390 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003392 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003393
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003394 while (len >= L2CAP_CONF_OPT_SIZE) {
3395 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003396
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003397 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003398 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003399
3400 switch (type) {
3401 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003402 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003403 break;
3404
3405 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003406 pi->flush_to = val;
3407 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3408 result = L2CAP_CONF_UNACCEPT;
3409 else
3410 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003411 break;
3412
3413 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003414 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3415 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003416 break;
3417
Marcel Holtmann6464f352007-10-20 13:39:51 +02003418 case L2CAP_CONF_RFC:
3419 if (olen == sizeof(rfc))
3420 memcpy(&rfc, (void *) val, olen);
3421 break;
3422
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003423 case L2CAP_CONF_FCS:
3424 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003425 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3426 pi->remote_conf.fcs = val;
3427 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003428
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003429 case L2CAP_CONF_EXT_FS:
3430 if (olen == sizeof(fs)) {
3431 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3432 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3433 result = L2CAP_CONF_UNACCEPT;
3434 break;
3435 }
3436 memcpy(&fs, (void *) val, olen);
3437 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3438 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3439 break;
3440 }
3441 pi->remote_conf.flush_to =
3442 le32_to_cpu(fs.flush_to);
3443 pi->remote_fs.id = fs.id;
3444 pi->remote_fs.type = fs.type;
3445 pi->remote_fs.max_sdu =
3446 le16_to_cpu(fs.max_sdu);
3447 pi->remote_fs.sdu_arr_time =
3448 le32_to_cpu(fs.sdu_arr_time);
3449 pi->remote_fs.acc_latency =
3450 le32_to_cpu(fs.acc_latency);
3451 pi->remote_fs.flush_to =
3452 le32_to_cpu(fs.flush_to);
3453 }
3454 break;
3455
3456 case L2CAP_CONF_EXT_WINDOW:
3457 pi->extended_control = 1;
3458 pi->remote_tx_win = val;
3459 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3460 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003461 break;
3462
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003463 default:
3464 if (hint)
3465 break;
3466
3467 result = L2CAP_CONF_UNKNOWN;
3468 *((u8 *) ptr++) = type;
3469 break;
3470 }
3471 }
3472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003473 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003474 goto done;
3475
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003476 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003477 case L2CAP_MODE_STREAMING:
3478 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3480 pi->mode = l2cap_select_mode(rfc.mode,
3481 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003482 break;
3483 }
3484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003485 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003486 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003487
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003488 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003489 }
3490
3491done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003492 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003493 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003494 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003496 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003497 return -ECONNREFUSED;
3498
3499 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3500 sizeof(rfc), (unsigned long) &rfc);
3501 }
3502
3503
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003504 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3505 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3506 return -ECONNREFUSED;
3507
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003508 if (result == L2CAP_CONF_SUCCESS) {
3509 /* Configure output options and let the other side know
3510 * which ones we don't like. */
3511
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003512 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003513 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003514 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003515 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003516 else {
3517 pi->omtu = mtu;
3518 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3519 }
3520 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003521
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003522 switch (rfc.mode) {
3523 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003524 pi->fcs = L2CAP_FCS_NONE;
3525 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003526 break;
3527
3528 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003529 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3530 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003531
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003532 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003533
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003534 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003535
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003536 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003537 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003538 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003539 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003540
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003541 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003542
3543 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3544 sizeof(rfc), (unsigned long) &rfc);
3545
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003546 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3548 sizeof(fs), (unsigned long) &fs);
3549
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003550 break;
3551
3552 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003553 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003554
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003555 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003556
3557 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3558 sizeof(rfc), (unsigned long) &rfc);
3559
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003560 break;
3561
3562 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003563 result = L2CAP_CONF_UNACCEPT;
3564
3565 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003566 rfc.mode = pi->mode;
3567 }
3568
3569 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3570 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3571 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3572 result = L2CAP_CONF_PENDING;
3573
3574 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3575 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003576 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003577 /* Trigger logical link creation only on AMP */
3578
Peter Krystadf453bb32011-07-19 17:23:34 -07003579 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003580 if (!chan)
3581 return -ECONNREFUSED;
3582
3583 chan->l2cap_sk = sk;
3584 if (chan->state == BT_CONNECTED)
3585 l2cap_create_cfm(chan, 0);
3586 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003587 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003588
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003589 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003590 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003591 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003592 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003593 rsp->result = cpu_to_le16(result);
3594 rsp->flags = cpu_to_le16(0x0000);
3595
3596 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003597}
3598
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003599static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003600{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003601 struct l2cap_pinfo *pi = l2cap_pi(sk);
3602 struct l2cap_conf_rsp *rsp = data;
3603 void *ptr = rsp->data;
3604 void *req = pi->conf_req;
3605 int len = pi->conf_len;
3606 int type, hint, olen;
3607 unsigned long val;
3608 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3609 struct l2cap_conf_ext_fs fs;
3610 u16 mtu = pi->omtu;
3611 u16 tx_win = pi->remote_tx_win;
3612 u16 result = L2CAP_CONF_SUCCESS;
3613
3614 BT_DBG("sk %p", sk);
3615
3616 while (len >= L2CAP_CONF_OPT_SIZE) {
3617 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3618
3619 hint = type & L2CAP_CONF_HINT;
3620 type &= L2CAP_CONF_MASK;
3621
3622 switch (type) {
3623 case L2CAP_CONF_MTU:
3624 mtu = val;
3625 break;
3626
3627 case L2CAP_CONF_FLUSH_TO:
3628 if (pi->amp_move_id)
3629 result = L2CAP_CONF_UNACCEPT;
3630 else
3631 pi->remote_conf.flush_to = val;
3632 break;
3633
3634 case L2CAP_CONF_QOS:
3635 if (pi->amp_move_id)
3636 result = L2CAP_CONF_UNACCEPT;
3637 break;
3638
3639 case L2CAP_CONF_RFC:
3640 if (olen == sizeof(rfc))
3641 memcpy(&rfc, (void *) val, olen);
3642 if (pi->mode != rfc.mode ||
3643 rfc.mode == L2CAP_MODE_BASIC)
3644 result = L2CAP_CONF_UNACCEPT;
3645 break;
3646
3647 case L2CAP_CONF_FCS:
3648 pi->remote_conf.fcs = val;
3649 break;
3650
3651 case L2CAP_CONF_EXT_FS:
3652 if (olen == sizeof(fs)) {
3653 memcpy(&fs, (void *) val, olen);
3654 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3655 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3656 else {
3657 pi->remote_conf.flush_to =
3658 le32_to_cpu(fs.flush_to);
3659 }
3660 }
3661 break;
3662
3663 case L2CAP_CONF_EXT_WINDOW:
3664 tx_win = val;
3665 break;
3666
3667 default:
3668 if (hint)
3669 break;
3670
3671 result = L2CAP_CONF_UNKNOWN;
3672 *((u8 *) ptr++) = type;
3673 break;
3674 }
3675 }
3676
3677 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3678 result, pi->mode, rfc.mode);
3679
3680 if (result == L2CAP_CONF_SUCCESS) {
3681 /* Configure output options and let the other side know
3682 * which ones we don't like. */
3683
3684 /* Don't allow mtu to decrease. */
3685 if (mtu < pi->omtu)
3686 result = L2CAP_CONF_UNACCEPT;
3687
3688 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3689
3690 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3691
3692 /* Don't allow extended transmit window to change. */
3693 if (tx_win != pi->remote_tx_win) {
3694 result = L2CAP_CONF_UNACCEPT;
3695 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3696 pi->remote_tx_win);
3697 }
3698
3699 if (rfc.mode == L2CAP_MODE_ERTM) {
3700 pi->remote_conf.retrans_timeout =
3701 le16_to_cpu(rfc.retrans_timeout);
3702 pi->remote_conf.monitor_timeout =
3703 le16_to_cpu(rfc.monitor_timeout);
3704
3705 BT_DBG("remote conf monitor timeout %d",
3706 pi->remote_conf.monitor_timeout);
3707
3708 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3709 sizeof(rfc), (unsigned long) &rfc);
3710 }
3711
3712 }
3713
3714 if (result != L2CAP_CONF_SUCCESS)
3715 goto done;
3716
3717 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3718
3719 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3720 pi->flush_to = pi->remote_conf.flush_to;
3721 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3722
3723 if (pi->amp_move_id)
3724 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3725 else
3726 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3727 BT_DBG("mode %d monitor timeout %d",
3728 pi->mode, pi->monitor_timeout);
3729
3730 }
3731
3732done:
3733 rsp->scid = cpu_to_le16(pi->dcid);
3734 rsp->result = cpu_to_le16(result);
3735 rsp->flags = cpu_to_le16(0x0000);
3736
3737 return ptr - data;
3738}
3739
3740static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3741{
3742 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003743 struct l2cap_conf_req *req = data;
3744 void *ptr = req->data;
3745 int type, olen;
3746 unsigned long val;
3747 struct l2cap_conf_rfc rfc;
3748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003749 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003750
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003751 /* Initialize rfc in case no rfc option is received */
3752 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003753 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3754 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3755 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003756
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003757 while (len >= L2CAP_CONF_OPT_SIZE) {
3758 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3759
3760 switch (type) {
3761 case L2CAP_CONF_MTU:
3762 if (val < L2CAP_DEFAULT_MIN_MTU) {
3763 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003764 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003765 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003766 pi->imtu = val;
3767 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003768 break;
3769
3770 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003771 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003772 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003773 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003774 break;
3775
3776 case L2CAP_CONF_RFC:
3777 if (olen == sizeof(rfc))
3778 memcpy(&rfc, (void *)val, olen);
3779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003780 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3781 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003782 return -ECONNREFUSED;
3783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003784 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003785
3786 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3787 sizeof(rfc), (unsigned long) &rfc);
3788 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003789
3790 case L2CAP_CONF_EXT_WINDOW:
3791 pi->tx_win = val;
3792
3793 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3794 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3795
3796 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3797 2, pi->tx_win);
3798 break;
3799
3800 default:
3801 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003802 }
3803 }
3804
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003805 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003806 return -ECONNREFUSED;
3807
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003808 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003809
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003810 if (*result == L2CAP_CONF_SUCCESS) {
3811 switch (rfc.mode) {
3812 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003813 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3814 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3815 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003816 break;
3817 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003818 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003819 }
3820 }
3821
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003822 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003823 req->flags = cpu_to_le16(0x0000);
3824
3825 return ptr - data;
3826}
3827
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003828static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003829{
3830 struct l2cap_conf_rsp *rsp = data;
3831 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003833 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003834
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003835 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003836 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003837 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003838
3839 return ptr - data;
3840}
3841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003842static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003843{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003844 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003845 int type, olen;
3846 unsigned long val;
3847 struct l2cap_conf_rfc rfc;
3848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003849 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003850
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003851 /* Initialize rfc in case no rfc option is received */
3852 rfc.mode = pi->mode;
Mat Martineauab043552011-12-05 15:54:44 -08003853 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3854 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3855 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Mat Martineau6b0bcce2011-11-29 16:17:02 -08003856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003857 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003858 return;
3859
3860 while (len >= L2CAP_CONF_OPT_SIZE) {
3861 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3862
3863 switch (type) {
3864 case L2CAP_CONF_RFC:
3865 if (olen == sizeof(rfc))
3866 memcpy(&rfc, (void *)val, olen);
3867 goto done;
3868 }
3869 }
3870
3871done:
3872 switch (rfc.mode) {
3873 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003874 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3875 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3876 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003877 break;
3878 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003879 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003880 }
3881}
3882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003883static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3884{
3885 struct l2cap_pinfo *pi = l2cap_pi(sk);
3886 int type, olen;
3887 unsigned long val;
3888 struct l2cap_conf_ext_fs fs;
3889
3890 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3891
3892 while (len >= L2CAP_CONF_OPT_SIZE) {
3893 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3894 if ((type == L2CAP_CONF_EXT_FS) &&
3895 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3896 memcpy(&fs, (void *)val, olen);
3897 pi->local_fs.id = fs.id;
3898 pi->local_fs.type = fs.type;
3899 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3900 pi->local_fs.sdu_arr_time =
3901 le32_to_cpu(fs.sdu_arr_time);
3902 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3903 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3904 break;
3905 }
3906 }
3907
3908}
3909
3910static int l2cap_finish_amp_move(struct sock *sk)
3911{
3912 struct l2cap_pinfo *pi;
3913 int err;
3914
3915 BT_DBG("sk %p", sk);
3916
3917 pi = l2cap_pi(sk);
3918
3919 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3920 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3921
3922 if (pi->ampcon)
3923 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3924 else
3925 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3926
3927 err = l2cap_setup_resegment(sk);
3928
3929 return err;
3930}
3931
3932static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3933 u16 result)
3934{
3935 int err = 0;
3936 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3937 struct l2cap_pinfo *pi = l2cap_pi(sk);
3938
3939 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3940
3941 if (pi->reconf_state == L2CAP_RECONF_NONE)
3942 return -ECONNREFUSED;
3943
3944 if (result == L2CAP_CONF_SUCCESS) {
3945 while (len >= L2CAP_CONF_OPT_SIZE) {
3946 int type, olen;
3947 unsigned long val;
3948
3949 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3950
3951 if (type == L2CAP_CONF_RFC) {
3952 if (olen == sizeof(rfc))
3953 memcpy(&rfc, (void *)val, olen);
3954 if (rfc.mode != pi->mode &&
3955 rfc.mode != L2CAP_MODE_ERTM) {
3956 err = -ECONNREFUSED;
3957 goto done;
3958 }
3959 break;
3960 }
3961 }
3962 }
3963
3964done:
3965 l2cap_ertm_stop_ack_timer(pi);
3966 l2cap_ertm_stop_retrans_timer(pi);
3967 l2cap_ertm_stop_monitor_timer(pi);
3968
3969 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3970 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3971
3972 /* Respond to poll */
3973 err = l2cap_answer_move_poll(sk);
3974
3975 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3976
3977 /* If moving to BR/EDR, use default timeout defined by
3978 * the spec */
3979 if (pi->amp_move_id == 0)
3980 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3981
3982 if (pi->mode == L2CAP_MODE_ERTM) {
3983 l2cap_ertm_tx(sk, NULL, NULL,
3984 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3985 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3986 }
3987 }
3988
3989 return err;
3990}
3991
3992
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003993static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3994{
3995 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3996
3997 if (rej->reason != 0x0000)
3998 return 0;
3999
4000 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
4001 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004002 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01004003
4004 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004005 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004006
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004007 l2cap_conn_start(conn);
4008 }
4009
4010 return 0;
4011}
4012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004013static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
4014 struct l2cap_cmd_hdr *cmd,
4015 u8 *data, u8 rsp_code,
4016 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004018 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
4020 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04004021 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004022 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004023
4024 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004025 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004026
4027 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
4028
4029 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004030 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
4031 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07004032 result = L2CAP_CR_BAD_PSM;
4033 goto sendresp;
4034 }
4035
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00004036 bh_lock_sock(parent);
4037
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004038 /* Check if the ACL is secure enough (if not SDP) */
4039 if (psm != cpu_to_le16(0x0001) &&
4040 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01004041 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02004042 result = L2CAP_CR_SEC_BLOCK;
4043 goto response;
4044 }
4045
Linus Torvalds1da177e2005-04-16 15:20:36 -07004046 result = L2CAP_CR_NO_MEM;
4047
4048 /* Check for backlog size */
4049 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004050 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004051 goto response;
4052 }
4053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004054 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
4055 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004056 goto response;
4057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004058 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059
4060 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061 if (__l2cap_get_chan_by_dcid(list, scid)) {
4062 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004063 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004064 l2cap_sock_kill(sk);
4065 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004066 goto response;
4067 }
4068
4069 hci_conn_hold(conn->hcon);
4070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004071 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004072 bacpy(&bt_sk(sk)->src, conn->src);
4073 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004074 l2cap_pi(sk)->psm = psm;
4075 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004076
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004077 bt_accept_enqueue(parent, sk);
4078
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004079 __l2cap_chan_add(conn, sk);
4080 dcid = l2cap_pi(sk)->scid;
4081 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004082
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004083 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004084
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004085 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004086
Marcel Holtmann984947d2009-02-06 23:35:19 +01004087 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004088 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004089 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004090 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004091 result = L2CAP_CR_PEND;
4092 status = L2CAP_CS_AUTHOR_PEND;
4093 parent->sk_data_ready(parent, 0);
4094 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004095 /* Force pending result for AMP controllers.
4096 * The connection will succeed after the
4097 * physical link is up. */
4098 if (amp_id) {
4099 sk->sk_state = BT_CONNECT2;
4100 result = L2CAP_CR_PEND;
4101 } else {
4102 sk->sk_state = BT_CONFIG;
4103 result = L2CAP_CR_SUCCESS;
4104 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004105 status = L2CAP_CS_NO_INFO;
4106 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004107 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004108 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004109 result = L2CAP_CR_PEND;
4110 status = L2CAP_CS_AUTHEN_PEND;
4111 }
4112 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004113 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004114 result = L2CAP_CR_PEND;
4115 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004116 }
4117
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004118 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119
4120response:
4121 bh_unlock_sock(parent);
4122
4123sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004124 rsp.scid = cpu_to_le16(scid);
4125 rsp.dcid = cpu_to_le16(dcid);
4126 rsp.result = cpu_to_le16(result);
4127 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004128 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004130 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004131 struct l2cap_info_req info;
4132 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4133
4134 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4135 conn->info_ident = l2cap_get_ident(conn);
4136
4137 mod_timer(&conn->info_timer, jiffies +
4138 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4139
4140 l2cap_send_cmd(conn, conn->info_ident,
4141 L2CAP_INFO_REQ, sizeof(info), &info);
4142 }
4143
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004144 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004145 result == L2CAP_CR_SUCCESS) {
4146 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004147 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004148 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004149 l2cap_build_conf_req(sk, buf), buf);
4150 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004151 }
4152
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004153 return sk;
4154}
4155
4156static inline int l2cap_connect_req(struct l2cap_conn *conn,
4157 struct l2cap_cmd_hdr *cmd, u8 *data)
4158{
4159 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004160 return 0;
4161}
4162
4163static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4164{
4165 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4166 u16 scid, dcid, result, status;
4167 struct sock *sk;
4168 u8 req[128];
4169
4170 scid = __le16_to_cpu(rsp->scid);
4171 dcid = __le16_to_cpu(rsp->dcid);
4172 result = __le16_to_cpu(rsp->result);
4173 status = __le16_to_cpu(rsp->status);
4174
4175 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4176
4177 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004178 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4179 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004180 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004181 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004182 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4183 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004184 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 }
4186
4187 switch (result) {
4188 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004189 sk->sk_state = BT_CONFIG;
4190 l2cap_pi(sk)->ident = 0;
4191 l2cap_pi(sk)->dcid = dcid;
4192 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004194 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004195 break;
4196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004197 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4198
Linus Torvalds1da177e2005-04-16 15:20:36 -07004199 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004200 l2cap_build_conf_req(sk, req), req);
4201 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004202 break;
4203
4204 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004205 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004206 break;
4207
4208 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004209 /* don't delete l2cap channel if sk is owned by user */
4210 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004211 sk->sk_state = BT_DISCONN;
4212 l2cap_sock_clear_timer(sk);
4213 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004214 break;
4215 }
4216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004217 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004218 break;
4219 }
4220
4221 bh_unlock_sock(sk);
4222 return 0;
4223}
4224
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004225static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004226{
4227 /* FCS is enabled only in ERTM or streaming mode, if one or both
4228 * sides request it.
4229 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004230 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4231 pi->fcs = L2CAP_FCS_NONE;
4232 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4233 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004234}
4235
Al Viro88219a02007-07-29 00:17:25 -07004236static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237{
4238 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4239 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004240 u8 rspbuf[64];
4241 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004243 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004244 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
4246 dcid = __le16_to_cpu(req->dcid);
4247 flags = __le16_to_cpu(req->flags);
4248
4249 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004251 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4252 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 return -ENOENT;
4254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004255 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4256 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4257 sk->sk_state, l2cap_pi(sk)->rx_state,
4258 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4259 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004260
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004261 /* Detect a reconfig request due to channel move between
4262 * BR/EDR and AMP
4263 */
4264 if (sk->sk_state == BT_CONNECTED &&
4265 l2cap_pi(sk)->rx_state ==
4266 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4267 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4268
4269 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4270 amp_move_reconf = 1;
4271
4272 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004273 struct l2cap_cmd_rej rej;
4274
4275 rej.reason = cpu_to_le16(0x0002);
4276 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4277 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004278 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004279 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004280
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004281 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004282 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004283 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004284 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004285 l2cap_build_conf_rsp(sk, rspbuf,
4286 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004287 goto unlock;
4288 }
4289
4290 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004291 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4292 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004293
4294 if (flags & 0x0001) {
4295 /* Incomplete config. Send empty response. */
4296 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004297 l2cap_build_conf_rsp(sk, rspbuf,
4298 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004299 goto unlock;
4300 }
4301
4302 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004303 if (!amp_move_reconf)
4304 len = l2cap_parse_conf_req(sk, rspbuf);
4305 else
4306 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4307
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004308 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004309 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004310 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004311 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004313 l2cap_pi(sk)->conf_ident = cmd->ident;
4314 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4315
4316 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4317 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4318 !l2cap_pi(sk)->amp_id) {
4319 /* Send success response right after pending if using
4320 * lockstep config on BR/EDR
4321 */
4322 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4323 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4324 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4325 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004326
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004327 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004328 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004330 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004331 goto unlock;
4332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004333 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004335 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4336 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004338 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4339 set_default_fcs(l2cap_pi(sk));
4340
4341 sk->sk_state = BT_CONNECTED;
4342
4343 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4344 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4345 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004346
Linus Torvalds1da177e2005-04-16 15:20:36 -07004347 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004348 goto unlock;
4349 }
4350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004351 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004352 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004353 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004354 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004355 l2cap_build_conf_req(sk, buf), buf);
4356 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004357 }
4358
4359unlock:
4360 bh_unlock_sock(sk);
4361 return 0;
4362}
4363
4364static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4365{
4366 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4367 u16 scid, flags, result;
4368 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004369 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004370 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004371
4372 scid = __le16_to_cpu(rsp->scid);
4373 flags = __le16_to_cpu(rsp->flags);
4374 result = __le16_to_cpu(rsp->result);
4375
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004376 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4377 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004379 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4380 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004381 return 0;
4382
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004383 pi = l2cap_pi(sk);
4384
4385 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4386 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4387 goto done;
4388 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004389
Linus Torvalds1da177e2005-04-16 15:20:36 -07004390 switch (result) {
4391 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004392 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4393 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4394 /* Lockstep procedure requires a pending response
4395 * before success.
4396 */
4397 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4398 goto done;
4399 }
4400
4401 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004402 break;
4403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004404 case L2CAP_CONF_PENDING:
4405 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4406 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4407 goto done;
4408 }
4409
4410 l2cap_conf_rfc_get(sk, rsp->data, len);
4411
4412 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4413
Peter Krystadf453bb32011-07-19 17:23:34 -07004414 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004415
4416 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4417 struct hci_chan *chan;
4418
4419 /* Already sent a 'pending' response, so set up
4420 * the logical link now
4421 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004422 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004423 if (!chan) {
4424 l2cap_send_disconn_req(pi->conn, sk,
4425 ECONNRESET);
4426 goto done;
4427 }
4428
4429 chan->l2cap_sk = sk;
4430 if (chan->state == BT_CONNECTED)
4431 l2cap_create_cfm(chan, 0);
4432 }
4433
4434 goto done;
4435
Linus Torvalds1da177e2005-04-16 15:20:36 -07004436 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004437 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004438 char req[64];
4439
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004440 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004441 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004442 goto done;
4443 }
4444
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004445 /* throw out any old stored conf requests */
4446 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004447 len = l2cap_parse_conf_rsp(sk, rsp->data,
4448 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004449 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004450 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004451 goto done;
4452 }
4453
4454 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4455 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004456 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004457 if (result != L2CAP_CONF_SUCCESS)
4458 goto done;
4459 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004460 }
4461
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004462 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004463 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004464 l2cap_sock_set_timer(sk, HZ * 5);
4465 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004466 goto done;
4467 }
4468
4469 if (flags & 0x01)
4470 goto done;
4471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004472 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004474 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4475 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004477 sk->sk_state = BT_CONNECTED;
4478
4479 if (pi->mode == L2CAP_MODE_ERTM ||
4480 pi->mode == L2CAP_MODE_STREAMING)
4481 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004482
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 l2cap_chan_ready(sk);
4484 }
4485
4486done:
4487 bh_unlock_sock(sk);
4488 return 0;
4489}
4490
4491static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4492{
4493 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4494 struct l2cap_disconn_rsp rsp;
4495 u16 dcid, scid;
4496 struct sock *sk;
4497
4498 scid = __le16_to_cpu(req->scid);
4499 dcid = __le16_to_cpu(req->dcid);
4500
4501 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004503 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4504 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004505 return 0;
4506
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004507 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4508 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004511 /* Only do cleanup if a disconnect request was not sent already */
4512 if (sk->sk_state != BT_DISCONN) {
4513 sk->sk_shutdown = SHUTDOWN_MASK;
4514
4515 skb_queue_purge(TX_QUEUE(sk));
4516
4517 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4518 skb_queue_purge(SREJ_QUEUE(sk));
4519
4520 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4521 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4522 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4523 }
4524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004525
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004526 /* don't delete l2cap channel if sk is owned by user */
4527 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004528 sk->sk_state = BT_DISCONN;
4529 l2cap_sock_clear_timer(sk);
4530 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004531 bh_unlock_sock(sk);
4532 return 0;
4533 }
4534
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004535 l2cap_chan_del(sk, ECONNRESET);
4536
Linus Torvalds1da177e2005-04-16 15:20:36 -07004537 bh_unlock_sock(sk);
4538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004539 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004540 return 0;
4541}
4542
4543static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4544{
4545 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4546 u16 dcid, scid;
4547 struct sock *sk;
4548
4549 scid = __le16_to_cpu(rsp->scid);
4550 dcid = __le16_to_cpu(rsp->dcid);
4551
4552 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004554 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4555 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004556 return 0;
4557
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004558 /* don't delete l2cap channel if sk is owned by user */
4559 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004560 sk->sk_state = BT_DISCONN;
4561 l2cap_sock_clear_timer(sk);
4562 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004563 bh_unlock_sock(sk);
4564 return 0;
4565 }
4566
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004567 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004568 bh_unlock_sock(sk);
4569
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004570 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004571 return 0;
4572}
4573
4574static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4575{
4576 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004577 u16 type;
4578
4579 type = __le16_to_cpu(req->type);
4580
4581 BT_DBG("type 0x%4.4x", type);
4582
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004583 if (type == L2CAP_IT_FEAT_MASK) {
4584 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004585 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004586 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4587 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4588 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004589 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004590 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004591 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004592 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004593 l2cap_send_cmd(conn, cmd->ident,
4594 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004595 } else if (type == L2CAP_IT_FIXED_CHAN) {
4596 u8 buf[12];
4597 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4598 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4599 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4600 memcpy(buf + 4, l2cap_fixed_chan, 8);
4601 l2cap_send_cmd(conn, cmd->ident,
4602 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004603 } else {
4604 struct l2cap_info_rsp rsp;
4605 rsp.type = cpu_to_le16(type);
4606 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4607 l2cap_send_cmd(conn, cmd->ident,
4608 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4609 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004610
4611 return 0;
4612}
4613
4614static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4615{
4616 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4617 u16 type, result;
4618
4619 type = __le16_to_cpu(rsp->type);
4620 result = __le16_to_cpu(rsp->result);
4621
4622 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4623
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004624 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4625 if (cmd->ident != conn->info_ident ||
4626 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4627 return 0;
4628
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004629 del_timer(&conn->info_timer);
4630
Ville Tervoadb08ed2010-08-04 09:43:33 +03004631 if (result != L2CAP_IR_SUCCESS) {
4632 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4633 conn->info_ident = 0;
4634
4635 l2cap_conn_start(conn);
4636
4637 return 0;
4638 }
4639
Marcel Holtmann984947d2009-02-06 23:35:19 +01004640 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004641 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004642
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004643 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004644 struct l2cap_info_req req;
4645 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4646
4647 conn->info_ident = l2cap_get_ident(conn);
4648
4649 l2cap_send_cmd(conn, conn->info_ident,
4650 L2CAP_INFO_REQ, sizeof(req), &req);
4651 } else {
4652 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4653 conn->info_ident = 0;
4654
4655 l2cap_conn_start(conn);
4656 }
4657 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004658 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004660 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004661
4662 l2cap_conn_start(conn);
4663 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004664
Linus Torvalds1da177e2005-04-16 15:20:36 -07004665 return 0;
4666}
4667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004668static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4669 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4670{
4671 struct l2cap_move_chan_req req;
4672 u8 ident;
4673
4674 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4675 (int) dest_amp_id);
4676
4677 ident = l2cap_get_ident(conn);
4678 if (pi)
4679 pi->ident = ident;
4680
4681 req.icid = cpu_to_le16(icid);
4682 req.dest_amp_id = dest_amp_id;
4683
4684 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4685}
4686
4687static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4688 u16 icid, u16 result)
4689{
4690 struct l2cap_move_chan_rsp rsp;
4691
4692 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4693
4694 rsp.icid = cpu_to_le16(icid);
4695 rsp.result = cpu_to_le16(result);
4696
4697 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4698}
4699
4700static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4701 struct l2cap_pinfo *pi, u16 icid, u16 result)
4702{
4703 struct l2cap_move_chan_cfm cfm;
4704 u8 ident;
4705
4706 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4707
4708 ident = l2cap_get_ident(conn);
4709 if (pi)
4710 pi->ident = ident;
4711
4712 cfm.icid = cpu_to_le16(icid);
4713 cfm.result = cpu_to_le16(result);
4714
4715 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4716}
4717
4718static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4719 u16 icid)
4720{
4721 struct l2cap_move_chan_cfm_rsp rsp;
4722
4723 BT_DBG("icid %d", (int) icid);
4724
4725 rsp.icid = cpu_to_le16(icid);
4726 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4727}
4728
4729static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4730 struct l2cap_cmd_hdr *cmd, u8 *data)
4731{
4732 struct l2cap_create_chan_req *req =
4733 (struct l2cap_create_chan_req *) data;
4734 struct sock *sk;
4735 u16 psm, scid;
4736
4737 psm = le16_to_cpu(req->psm);
4738 scid = le16_to_cpu(req->scid);
4739
4740 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4741 (int) req->amp_id);
4742
4743 if (req->amp_id) {
4744 struct hci_dev *hdev;
4745
4746 /* Validate AMP controller id */
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004747 hdev = hci_dev_get(req->amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004748 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4749 struct l2cap_create_chan_rsp rsp;
4750
4751 rsp.dcid = 0;
4752 rsp.scid = cpu_to_le16(scid);
4753 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4754 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4755
4756 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4757 sizeof(rsp), &rsp);
4758
4759 if (hdev)
4760 hci_dev_put(hdev);
4761
4762 return 0;
4763 }
4764
4765 hci_dev_put(hdev);
4766 }
4767
4768 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4769 req->amp_id);
4770
Mat Martineau55f2a622011-09-19 13:20:17 -07004771 if (sk)
4772 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004773
Mat Martineau55f2a622011-09-19 13:20:17 -07004774 if (sk && req->amp_id &&
4775 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004776 amp_accept_physical(conn, req->amp_id, sk);
4777
4778 return 0;
4779}
4780
4781static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4782 struct l2cap_cmd_hdr *cmd, u8 *data)
4783{
4784 BT_DBG("conn %p", conn);
4785
4786 return l2cap_connect_rsp(conn, cmd, data);
4787}
4788
4789static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4790 struct l2cap_cmd_hdr *cmd, u8 *data)
4791{
4792 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4793 struct sock *sk;
4794 struct l2cap_pinfo *pi;
4795 u16 icid = 0;
4796 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4797
4798 icid = le16_to_cpu(req->icid);
4799
4800 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4801
4802 read_lock(&conn->chan_list.lock);
4803 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4804 read_unlock(&conn->chan_list.lock);
4805
4806 if (!sk)
4807 goto send_move_response;
4808
4809 lock_sock(sk);
4810 pi = l2cap_pi(sk);
4811
4812 if (pi->scid < L2CAP_CID_DYN_START ||
4813 (pi->mode != L2CAP_MODE_ERTM &&
4814 pi->mode != L2CAP_MODE_STREAMING)) {
4815 goto send_move_response;
4816 }
4817
4818 if (pi->amp_id == req->dest_amp_id) {
4819 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4820 goto send_move_response;
4821 }
4822
4823 if (req->dest_amp_id) {
4824 struct hci_dev *hdev;
Peter Krystad4e1c9fa2011-11-10 12:28:45 -08004825 hdev = hci_dev_get(req->dest_amp_id);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004826 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4827 if (hdev)
4828 hci_dev_put(hdev);
4829
4830 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4831 goto send_move_response;
4832 }
Peter Krystadf7dcc792011-11-14 15:11:58 -08004833 hci_dev_put(hdev);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004834 }
4835
4836 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4837 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4838 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4839 bacmp(conn->src, conn->dst) > 0) {
4840 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4841 goto send_move_response;
4842 }
4843
4844 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4845 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4846 goto send_move_response;
4847 }
4848
4849 pi->amp_move_cmd_ident = cmd->ident;
4850 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4851 l2cap_amp_move_setup(sk);
4852 pi->amp_move_id = req->dest_amp_id;
4853 icid = pi->dcid;
4854
4855 if (req->dest_amp_id == 0) {
4856 /* Moving to BR/EDR */
4857 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4858 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4859 result = L2CAP_MOVE_CHAN_PENDING;
4860 } else {
4861 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4862 result = L2CAP_MOVE_CHAN_SUCCESS;
4863 }
4864 } else {
4865 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4866 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4867 result = L2CAP_MOVE_CHAN_PENDING;
4868 }
4869
4870send_move_response:
4871 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4872
4873 if (sk)
4874 release_sock(sk);
4875
4876 return 0;
4877}
4878
4879static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4880 struct l2cap_cmd_hdr *cmd, u8 *data)
4881{
4882 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4883 struct sock *sk;
4884 struct l2cap_pinfo *pi;
4885 u16 icid, result;
4886
4887 icid = le16_to_cpu(rsp->icid);
4888 result = le16_to_cpu(rsp->result);
4889
4890 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4891
4892 switch (result) {
4893 case L2CAP_MOVE_CHAN_SUCCESS:
4894 case L2CAP_MOVE_CHAN_PENDING:
4895 read_lock(&conn->chan_list.lock);
4896 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4897 read_unlock(&conn->chan_list.lock);
4898
4899 if (!sk) {
4900 l2cap_send_move_chan_cfm(conn, NULL, icid,
4901 L2CAP_MOVE_CHAN_UNCONFIRMED);
4902 break;
4903 }
4904
4905 lock_sock(sk);
4906 pi = l2cap_pi(sk);
4907
4908 l2cap_sock_clear_timer(sk);
4909 if (result == L2CAP_MOVE_CHAN_PENDING)
4910 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4911
4912 if (pi->amp_move_state ==
4913 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4914 /* Move confirm will be sent when logical link
4915 * is complete.
4916 */
4917 pi->amp_move_state =
4918 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4919 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4920 pi->amp_move_state ==
4921 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4922 /* Logical link is up or moving to BR/EDR,
4923 * proceed with move */
4924 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4925 pi->amp_move_state =
4926 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4927 } else {
4928 pi->amp_move_state =
4929 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4930 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4931 L2CAP_MOVE_CHAN_CONFIRMED);
4932 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4933 }
4934 } else if (pi->amp_move_state ==
4935 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4936 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4937 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4938 struct hci_chan *chan;
4939 /* Moving to AMP */
4940 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4941 /* Remote is ready, send confirm immediately
4942 * after logical link is ready
4943 */
4944 pi->amp_move_state =
4945 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4946 } else {
4947 /* Both logical link and move success
4948 * are required to confirm
4949 */
4950 pi->amp_move_state =
4951 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4952 }
4953 pi->remote_fs = default_fs;
4954 pi->local_fs = default_fs;
4955 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4956 if (!chan) {
4957 /* Logical link not available */
4958 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4959 L2CAP_MOVE_CHAN_UNCONFIRMED);
4960 break;
4961 }
4962 if (chan->state == BT_CONNECTED) {
4963 /* Logical link is already ready to go */
4964 pi->ampchan = chan;
4965 pi->ampcon = chan->conn;
4966 pi->ampcon->l2cap_data = pi->conn;
4967 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4968 /* Can confirm now */
4969 l2cap_send_move_chan_cfm(conn, pi,
4970 pi->scid,
4971 L2CAP_MOVE_CHAN_CONFIRMED);
4972 } else {
4973 /* Now only need move success
4974 * required to confirm
4975 */
4976 pi->amp_move_state =
4977 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4978 }
4979 } else
4980 chan->l2cap_sk = sk;
4981 } else {
4982 /* Any other amp move state means the move failed. */
4983 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4984 L2CAP_MOVE_CHAN_UNCONFIRMED);
4985 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4986 }
4987 break;
4988 default:
4989 /* Failed (including collision case) */
4990 read_lock(&conn->chan_list.lock);
4991 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4992 read_unlock(&conn->chan_list.lock);
4993
4994 if (!sk) {
4995 /* Could not locate channel, icid is best guess */
4996 l2cap_send_move_chan_cfm(conn, NULL, icid,
4997 L2CAP_MOVE_CHAN_UNCONFIRMED);
4998 break;
4999 }
5000
5001 lock_sock(sk);
5002 pi = l2cap_pi(sk);
5003
5004 l2cap_sock_clear_timer(sk);
5005
5006 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5007 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
5008 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
5009 else {
5010 /* Cleanup - cancel move */
5011 pi->amp_move_id = pi->amp_id;
5012 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5013 l2cap_amp_move_revert(sk);
5014 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5015 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005016 }
5017
5018 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
5019 L2CAP_MOVE_CHAN_UNCONFIRMED);
5020 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5021 break;
5022 }
5023
5024 if (sk)
5025 release_sock(sk);
5026
5027 return 0;
5028}
5029
5030static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
5031 struct l2cap_cmd_hdr *cmd, u8 *data)
5032{
5033 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
5034 struct sock *sk;
5035 u16 icid, result;
5036
5037 icid = le16_to_cpu(cfm->icid);
5038 result = le16_to_cpu(cfm->result);
5039
5040 BT_DBG("icid %d, result %d", (int) icid, (int) result);
5041
5042 read_lock(&conn->chan_list.lock);
5043 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
5044 read_unlock(&conn->chan_list.lock);
5045
5046 if (!sk) {
5047 BT_DBG("Bad channel (%d)", (int) icid);
5048 goto send_move_confirm_response;
5049 }
5050
5051 lock_sock(sk);
5052
5053 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
5054 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5055 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
5056 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5057 if ((!l2cap_pi(sk)->amp_id) &&
5058 (l2cap_pi(sk)->ampchan)) {
5059 /* Have moved off of AMP, free the channel */
Peter Krystadd6a9ceb2011-12-01 15:44:54 -08005060 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005061 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5062 l2cap_pi(sk));
5063 l2cap_pi(sk)->ampchan = NULL;
5064 l2cap_pi(sk)->ampcon = NULL;
5065 }
5066 l2cap_amp_move_success(sk);
5067 } else {
5068 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5069 l2cap_amp_move_revert(sk);
5070 }
5071 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5072 } else if (l2cap_pi(sk)->amp_move_state ==
5073 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5074 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5075 }
5076
5077send_move_confirm_response:
5078 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5079
5080 if (sk)
5081 release_sock(sk);
5082
5083 return 0;
5084}
5085
5086static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5087 struct l2cap_cmd_hdr *cmd, u8 *data)
5088{
5089 struct l2cap_move_chan_cfm_rsp *rsp =
5090 (struct l2cap_move_chan_cfm_rsp *) data;
5091 struct sock *sk;
5092 u16 icid;
5093
5094 icid = le16_to_cpu(rsp->icid);
5095
5096 BT_DBG("icid %d", (int) icid);
5097
5098 read_lock(&conn->chan_list.lock);
5099 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5100 read_unlock(&conn->chan_list.lock);
5101
5102 if (!sk)
5103 return 0;
5104
5105 lock_sock(sk);
5106
5107 l2cap_sock_clear_timer(sk);
5108
5109 if (l2cap_pi(sk)->amp_move_state ==
5110 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5111 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5112 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5113
5114 if (!l2cap_pi(sk)->amp_id) {
5115 /* Have moved off of AMP, free the channel */
5116 l2cap_pi(sk)->ampcon = NULL;
5117 if (l2cap_pi(sk)->ampchan) {
Peter Krystadd6a9ceb2011-12-01 15:44:54 -08005118 if (!hci_chan_put(l2cap_pi(sk)->ampchan))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005119 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5120 l2cap_pi(sk));
5121 }
5122 l2cap_pi(sk)->ampchan = NULL;
5123 }
5124
5125 l2cap_amp_move_success(sk);
5126
5127 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5128 }
5129
5130 release_sock(sk);
5131
5132 return 0;
5133}
5134
5135static void l2cap_amp_signal_worker(struct work_struct *work)
5136{
5137 int err = 0;
5138 struct l2cap_amp_signal_work *ampwork =
5139 container_of(work, struct l2cap_amp_signal_work, work);
5140
5141 switch (ampwork->cmd.code) {
5142 case L2CAP_MOVE_CHAN_REQ:
5143 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5144 ampwork->data);
5145 break;
5146
5147 case L2CAP_MOVE_CHAN_RSP:
5148 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5149 ampwork->data);
5150 break;
5151
5152 case L2CAP_MOVE_CHAN_CFM:
5153 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5154 ampwork->data);
5155 break;
5156
5157 case L2CAP_MOVE_CHAN_CFM_RSP:
5158 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5159 &ampwork->cmd, ampwork->data);
5160 break;
5161
5162 default:
5163 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5164 err = -EINVAL;
5165 break;
5166 }
5167
5168 if (err) {
5169 struct l2cap_cmd_rej rej;
5170 BT_DBG("error %d", err);
5171
5172 /* In this context, commands are only rejected with
5173 * "command not understood", code 0.
5174 */
5175 rej.reason = cpu_to_le16(0);
5176 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5177 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5178 }
5179
5180 kfree_skb(ampwork->skb);
5181 kfree(ampwork);
5182}
5183
5184void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5185 struct sock *sk)
5186{
5187 struct l2cap_pinfo *pi;
5188
5189 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5190 (int) local_id, (int) remote_id, sk);
5191
5192 lock_sock(sk);
5193
5194 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5195 release_sock(sk);
5196 return;
5197 }
5198
5199 pi = l2cap_pi(sk);
5200
5201 if (sk->sk_state != BT_CONNECTED) {
5202 if (bt_sk(sk)->parent) {
5203 struct l2cap_conn_rsp rsp;
5204 char buf[128];
5205 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5206 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5207
5208 /* Incoming channel on AMP */
5209 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5210 /* Send successful response */
5211 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5212 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5213 } else {
5214 /* Send negative response */
5215 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5216 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5217 }
5218
5219 l2cap_send_cmd(pi->conn, pi->ident,
5220 L2CAP_CREATE_CHAN_RSP,
5221 sizeof(rsp), &rsp);
5222
5223 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5224 sk->sk_state = BT_CONFIG;
5225 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5226 l2cap_send_cmd(pi->conn,
5227 l2cap_get_ident(pi->conn),
5228 L2CAP_CONF_REQ,
5229 l2cap_build_conf_req(sk, buf), buf);
5230 l2cap_pi(sk)->num_conf_req++;
5231 }
5232 } else {
5233 /* Outgoing channel on AMP */
5234 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5235 /* Revert to BR/EDR connect */
5236 l2cap_send_conn_req(sk);
5237 } else {
5238 pi->amp_id = local_id;
5239 l2cap_send_create_chan_req(sk, remote_id);
5240 }
5241 }
5242 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5243 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5244 l2cap_amp_move_setup(sk);
5245 pi->amp_move_id = local_id;
5246 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5247
5248 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5249 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5250 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5251 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5252 struct hci_chan *chan;
5253 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5254 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5255 pi->remote_fs = default_fs;
5256 pi->local_fs = default_fs;
5257 chan = l2cap_chan_admit(local_id, pi);
5258 if (chan) {
5259 if (chan->state == BT_CONNECTED) {
5260 /* Logical link is ready to go */
5261 pi->ampchan = chan;
5262 pi->ampcon = chan->conn;
5263 pi->ampcon->l2cap_data = pi->conn;
5264 pi->amp_move_state =
5265 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5266 l2cap_send_move_chan_rsp(pi->conn,
5267 pi->amp_move_cmd_ident, pi->dcid,
5268 L2CAP_MOVE_CHAN_SUCCESS);
5269 } else {
5270 /* Wait for logical link to be ready */
5271 chan->l2cap_sk = sk;
5272 pi->amp_move_state =
5273 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5274 }
5275 } else {
5276 /* Logical link not available */
5277 l2cap_send_move_chan_rsp(pi->conn,
5278 pi->amp_move_cmd_ident, pi->dcid,
5279 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5280 }
5281 } else {
5282 BT_DBG("result %d, role %d, local_busy %d", result,
5283 (int) pi->amp_move_role,
5284 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5285
5286 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5287 if (result == -EINVAL)
5288 l2cap_send_move_chan_rsp(pi->conn,
5289 pi->amp_move_cmd_ident, pi->dcid,
5290 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5291 else
5292 l2cap_send_move_chan_rsp(pi->conn,
5293 pi->amp_move_cmd_ident, pi->dcid,
5294 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5295 }
5296
5297 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5298 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5299
5300 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5301 l2cap_rmem_available(sk))
5302 l2cap_ertm_tx(sk, 0, 0,
5303 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5304
5305 /* Restart data transmission */
5306 l2cap_ertm_send(sk);
5307 }
5308
5309 release_sock(sk);
5310}
5311
5312int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5313{
5314 struct l2cap_pinfo *pi;
5315 struct sock *sk;
5316
5317 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5318
5319 sk = chan->l2cap_sk;
5320
5321 BT_DBG("sk %p", sk);
5322
5323 lock_sock(sk);
5324
5325 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5326 release_sock(sk);
5327 return 0;
5328 }
5329
5330 pi = l2cap_pi(sk);
5331
5332 if ((!status) && (chan != NULL)) {
5333 pi->ampchan = chan;
5334 pi->ampcon = chan->conn;
5335 pi->ampcon->l2cap_data = pi->conn;
5336
5337 if (sk->sk_state != BT_CONNECTED) {
5338 struct l2cap_conf_rsp rsp;
5339
5340 /* Must use spinlock to prevent concurrent
5341 * execution of l2cap_config_rsp()
5342 */
5343 bh_lock_sock(sk);
5344 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5345 l2cap_build_conf_rsp(sk, &rsp,
5346 L2CAP_CONF_SUCCESS, 0), &rsp);
5347 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5348
5349 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5350 set_default_fcs(l2cap_pi(sk));
5351
5352 sk->sk_state = BT_CONNECTED;
5353
5354 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5355 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5356 l2cap_ertm_init(sk);
5357
5358 l2cap_chan_ready(sk);
5359 }
5360 bh_unlock_sock(sk);
5361 } else if (pi->amp_move_state ==
5362 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5363 /* Move confirm will be sent after a success
5364 * response is received
5365 */
5366 pi->amp_move_state =
5367 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5368 } else if (pi->amp_move_state ==
5369 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5370 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5371 pi->amp_move_state =
5372 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5373 else if (pi->amp_move_role ==
5374 L2CAP_AMP_MOVE_INITIATOR) {
5375 pi->amp_move_state =
5376 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5377 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5378 L2CAP_MOVE_CHAN_SUCCESS);
5379 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5380 } else if (pi->amp_move_role ==
5381 L2CAP_AMP_MOVE_RESPONDER) {
5382 pi->amp_move_state =
5383 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5384 l2cap_send_move_chan_rsp(pi->conn,
5385 pi->amp_move_cmd_ident, pi->dcid,
5386 L2CAP_MOVE_CHAN_SUCCESS);
5387 }
5388 } else {
5389 /* Move was not in expected state, free the
5390 * logical link
5391 */
5392 hci_chan_put(pi->ampchan);
5393 pi->ampcon = NULL;
5394 pi->ampchan = NULL;
5395 }
5396 } else {
5397 /* Logical link setup failed. */
5398
5399 if (sk->sk_state != BT_CONNECTED)
5400 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5401 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5402 l2cap_amp_move_revert(sk);
5403 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5404 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5405 l2cap_send_move_chan_rsp(pi->conn,
5406 pi->amp_move_cmd_ident, pi->dcid,
5407 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5408 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5409 if ((pi->amp_move_state ==
5410 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5411 (pi->amp_move_state ==
5412 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5413 /* Remote has only sent pending or
5414 * success responses, clean up
5415 */
5416 l2cap_amp_move_revert(sk);
5417 l2cap_pi(sk)->amp_move_role =
5418 L2CAP_AMP_MOVE_NONE;
5419 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5420 }
5421
5422 /* Other amp move states imply that the move
5423 * has already aborted
5424 */
5425 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5426 L2CAP_MOVE_CHAN_UNCONFIRMED);
5427 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5428 }
5429
5430 pi->ampcon = NULL;
5431 pi->ampchan = NULL;
5432 }
5433
5434 release_sock(sk);
5435 return 0;
5436}
5437
5438static void l2cap_logical_link_worker(struct work_struct *work)
5439{
5440 struct l2cap_logical_link_work *log_link_work =
5441 container_of(work, struct l2cap_logical_link_work, work);
5442
5443 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5444 kfree(log_link_work);
5445}
5446
5447static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5448{
5449 struct l2cap_logical_link_work *amp_work;
5450
5451 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5452 if (!amp_work)
5453 return -ENOMEM;
5454
5455 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5456 amp_work->chan = chan;
5457 amp_work->status = status;
5458 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5459 kfree(amp_work);
5460 return -ENOMEM;
5461 }
5462
5463 return 0;
5464}
5465
5466int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5467{
5468 struct l2cap_conn *conn = chan->conn->l2cap_data;
5469
5470 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5471
5472 /* TODO: if failed status restore previous fs */
5473 return 0;
5474}
5475
5476int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5477{
5478 struct l2cap_chan_list *l;
5479 struct l2cap_conn *conn = chan->conn->l2cap_data;
5480 struct sock *sk;
5481
5482 BT_DBG("chan %p conn %p", chan, conn);
5483
5484 if (!conn)
5485 return 0;
5486
5487 l = &conn->chan_list;
5488
5489 read_lock(&l->lock);
5490
5491 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5492 bh_lock_sock(sk);
5493 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5494 if (l2cap_pi(sk)->ampchan == chan) {
5495 l2cap_pi(sk)->ampchan = NULL;
Peter Krystad1f8a8a52011-12-01 14:18:37 -08005496 l2cap_pi(sk)->ampcon = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005497 l2cap_amp_move_init(sk);
5498 }
5499 bh_unlock_sock(sk);
5500 }
5501
5502 read_unlock(&l->lock);
5503
5504 return 0;
5505
5506
5507}
5508
5509static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5510 u8 *data, struct sk_buff *skb)
5511{
5512 struct l2cap_amp_signal_work *amp_work;
5513
5514 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5515 if (!amp_work)
5516 return -ENOMEM;
5517
5518 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5519 amp_work->conn = conn;
5520 amp_work->cmd = *cmd;
5521 amp_work->data = data;
5522 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5523 if (!amp_work->skb) {
5524 kfree(amp_work);
5525 return -ENOMEM;
5526 }
5527
5528 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5529 kfree_skb(amp_work->skb);
5530 kfree(amp_work);
5531 return -ENOMEM;
5532 }
5533
5534 return 0;
5535}
5536
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005537static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005538 u16 to_multiplier)
5539{
5540 u16 max_latency;
5541
5542 if (min > max || min < 6 || max > 3200)
5543 return -EINVAL;
5544
5545 if (to_multiplier < 10 || to_multiplier > 3200)
5546 return -EINVAL;
5547
5548 if (max >= to_multiplier * 8)
5549 return -EINVAL;
5550
5551 max_latency = (to_multiplier * 8 / max) - 1;
5552 if (latency > 499 || latency > max_latency)
5553 return -EINVAL;
5554
5555 return 0;
5556}
5557
5558static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5559 struct l2cap_cmd_hdr *cmd, u8 *data)
5560{
5561 struct hci_conn *hcon = conn->hcon;
5562 struct l2cap_conn_param_update_req *req;
5563 struct l2cap_conn_param_update_rsp rsp;
5564 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005565 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005566
5567 if (!(hcon->link_mode & HCI_LM_MASTER))
5568 return -EINVAL;
5569
5570 cmd_len = __le16_to_cpu(cmd->len);
5571 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5572 return -EPROTO;
5573
5574 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005575 min = __le16_to_cpu(req->min);
5576 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005577 latency = __le16_to_cpu(req->latency);
5578 to_multiplier = __le16_to_cpu(req->to_multiplier);
5579
5580 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5581 min, max, latency, to_multiplier);
5582
5583 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005584
5585 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5586 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005587 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5588 else
5589 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5590
5591 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5592 sizeof(rsp), &rsp);
5593
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005594 if (!err)
5595 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5596
Claudio Takahaside731152011-02-11 19:28:55 -02005597 return 0;
5598}
5599
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005600static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005601 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5602 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005603{
5604 int err = 0;
5605
5606 switch (cmd->code) {
5607 case L2CAP_COMMAND_REJ:
5608 l2cap_command_rej(conn, cmd, data);
5609 break;
5610
5611 case L2CAP_CONN_REQ:
5612 err = l2cap_connect_req(conn, cmd, data);
5613 break;
5614
5615 case L2CAP_CONN_RSP:
5616 err = l2cap_connect_rsp(conn, cmd, data);
5617 break;
5618
5619 case L2CAP_CONF_REQ:
5620 err = l2cap_config_req(conn, cmd, cmd_len, data);
5621 break;
5622
5623 case L2CAP_CONF_RSP:
5624 err = l2cap_config_rsp(conn, cmd, data);
5625 break;
5626
5627 case L2CAP_DISCONN_REQ:
5628 err = l2cap_disconnect_req(conn, cmd, data);
5629 break;
5630
5631 case L2CAP_DISCONN_RSP:
5632 err = l2cap_disconnect_rsp(conn, cmd, data);
5633 break;
5634
5635 case L2CAP_ECHO_REQ:
5636 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5637 break;
5638
5639 case L2CAP_ECHO_RSP:
5640 break;
5641
5642 case L2CAP_INFO_REQ:
5643 err = l2cap_information_req(conn, cmd, data);
5644 break;
5645
5646 case L2CAP_INFO_RSP:
5647 err = l2cap_information_rsp(conn, cmd, data);
5648 break;
5649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005650 case L2CAP_CREATE_CHAN_REQ:
5651 err = l2cap_create_channel_req(conn, cmd, data);
5652 break;
5653
5654 case L2CAP_CREATE_CHAN_RSP:
5655 err = l2cap_create_channel_rsp(conn, cmd, data);
5656 break;
5657
5658 case L2CAP_MOVE_CHAN_REQ:
5659 case L2CAP_MOVE_CHAN_RSP:
5660 case L2CAP_MOVE_CHAN_CFM:
5661 case L2CAP_MOVE_CHAN_CFM_RSP:
5662 err = l2cap_sig_amp(conn, cmd, data, skb);
5663 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005664 default:
5665 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5666 err = -EINVAL;
5667 break;
5668 }
5669
5670 return err;
5671}
5672
5673static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5674 struct l2cap_cmd_hdr *cmd, u8 *data)
5675{
5676 switch (cmd->code) {
5677 case L2CAP_COMMAND_REJ:
5678 return 0;
5679
5680 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005681 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005682
5683 case L2CAP_CONN_PARAM_UPDATE_RSP:
5684 return 0;
5685
5686 default:
5687 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5688 return -EINVAL;
5689 }
5690}
5691
5692static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5693 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005694{
5695 u8 *data = skb->data;
5696 int len = skb->len;
5697 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005698 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005699
5700 l2cap_raw_recv(conn, skb);
5701
5702 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005703 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005704 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5705 data += L2CAP_CMD_HDR_SIZE;
5706 len -= L2CAP_CMD_HDR_SIZE;
5707
Al Viro88219a02007-07-29 00:17:25 -07005708 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005709
Al Viro88219a02007-07-29 00:17:25 -07005710 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005711
Al Viro88219a02007-07-29 00:17:25 -07005712 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005713 BT_DBG("corrupted command");
5714 break;
5715 }
5716
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005717 if (conn->hcon->type == LE_LINK)
5718 err = l2cap_le_sig_cmd(conn, &cmd, data);
5719 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005720 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5721 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005722
5723 if (err) {
5724 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005725
5726 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005727
5728 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005729 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005730 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5731 }
5732
Al Viro88219a02007-07-29 00:17:25 -07005733 data += cmd_len;
5734 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005735 }
5736
5737 kfree_skb(skb);
5738}
5739
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005740static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005741{
5742 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005743 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005744
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005745 if (pi->extended_control)
5746 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5747 else
5748 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5749
5750 if (pi->fcs == L2CAP_FCS_CRC16) {
5751 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005752 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5753 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005755 if (our_fcs != rcv_fcs) {
5756 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005757 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005758 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005759 }
5760 return 0;
5761}
5762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005763static void l2cap_ertm_pass_to_tx(struct sock *sk,
5764 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005765{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005766 BT_DBG("sk %p, control %p", sk, control);
5767 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005768}
5769
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005770static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5771 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005772{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005773 BT_DBG("sk %p, control %p", sk, control);
5774 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5775}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005777static void l2cap_ertm_resend(struct sock *sk)
5778{
5779 struct bt_l2cap_control control;
5780 struct l2cap_pinfo *pi;
5781 struct sk_buff *skb;
5782 struct sk_buff *tx_skb;
5783 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005785 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005787 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005789 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5790 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005792 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5793 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5794 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005795
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005796 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5797 seq = l2cap_seq_list_pop(&pi->retrans_list);
5798
5799 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5800 if (!skb) {
5801 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5802 (int) seq);
5803 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005804 }
5805
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005806 bt_cb(skb)->retries += 1;
5807 control = bt_cb(skb)->control;
5808
5809 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5810 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5811 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5812 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005813 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005814 }
5815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005816 control.reqseq = pi->buffer_seq;
5817 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5818 control.final = 1;
5819 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5820 } else {
5821 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005822 }
5823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005824 if (skb_cloned(skb)) {
5825 /* Cloned sk_buffs are read-only, so we need a
5826 * writeable copy
5827 */
5828 tx_skb = skb_copy(skb, GFP_ATOMIC);
5829 } else {
5830 tx_skb = skb_clone(skb, GFP_ATOMIC);
5831 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005833 /* Update skb contents */
5834 if (pi->extended_control) {
5835 put_unaligned_le32(__pack_extended_control(&control),
5836 tx_skb->data + L2CAP_HDR_SIZE);
5837 } else {
5838 put_unaligned_le16(__pack_enhanced_control(&control),
5839 tx_skb->data + L2CAP_HDR_SIZE);
5840 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005842 if (pi->fcs == L2CAP_FCS_CRC16)
5843 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005844
Mat Martineau2f0cd842011-10-20 14:34:26 -07005845 sock_hold(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005846 tx_skb->sk = sk;
5847 tx_skb->destructor = l2cap_skb_destructor;
5848 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005849
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005850 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005852 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005853
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005854 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005855 }
5856}
5857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005858static inline void l2cap_ertm_retransmit(struct sock *sk,
5859 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005860{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005861 BT_DBG("sk %p, control %p", sk, control);
5862
5863 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5864 l2cap_ertm_resend(sk);
5865}
5866
5867static void l2cap_ertm_retransmit_all(struct sock *sk,
5868 struct bt_l2cap_control *control)
5869{
5870 struct l2cap_pinfo *pi;
5871 struct sk_buff *skb;
5872
5873 BT_DBG("sk %p, control %p", sk, control);
5874
5875 pi = l2cap_pi(sk);
5876
5877 if (control->poll)
5878 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5879
5880 l2cap_seq_list_clear(&pi->retrans_list);
5881
5882 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5883 return;
5884
5885 if (pi->unacked_frames) {
5886 skb_queue_walk(TX_QUEUE(sk), skb) {
5887 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5888 skb == sk->sk_send_head)
5889 break;
5890 }
5891
5892 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5893 if (skb == sk->sk_send_head)
5894 break;
5895
5896 l2cap_seq_list_append(&pi->retrans_list,
5897 bt_cb(skb)->control.txseq);
5898 }
5899
5900 l2cap_ertm_resend(sk);
5901 }
5902}
5903
5904static inline void append_skb_frag(struct sk_buff *skb,
5905 struct sk_buff *new_frag, struct sk_buff **last_frag)
5906{
5907 /* skb->len reflects data in skb as well as all fragments
5908 skb->data_len reflects only data in fragments
5909 */
5910 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5911
5912 if (!skb_has_frag_list(skb))
5913 skb_shinfo(skb)->frag_list = new_frag;
5914
5915 new_frag->next = NULL;
5916
5917 (*last_frag)->next = new_frag;
5918 *last_frag = new_frag;
5919
5920 skb->len += new_frag->len;
5921 skb->data_len += new_frag->len;
5922 skb->truesize += new_frag->truesize;
5923}
5924
5925static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5926 struct bt_l2cap_control *control, struct sk_buff *skb)
5927{
5928 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005929 int err = -EINVAL;
5930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005931 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5932 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005934 if (!control)
5935 return err;
5936
5937 pi = l2cap_pi(sk);
5938
5939 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5940 control->frame_type, control->sar, control->txseq,
5941 control->reqseq, control->final);
5942
5943 switch (control->sar) {
5944 case L2CAP_SAR_UNSEGMENTED:
5945 if (pi->sdu) {
5946 BT_DBG("Unexpected unsegmented PDU during reassembly");
5947 kfree_skb(pi->sdu);
5948 pi->sdu = NULL;
5949 pi->sdu_last_frag = NULL;
5950 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005951 }
5952
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005953 BT_DBG("Unsegmented");
5954 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005955 break;
5956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005957 case L2CAP_SAR_START:
5958 if (pi->sdu) {
5959 BT_DBG("Unexpected start PDU during reassembly");
5960 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005961 }
5962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005963 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005964 skb_pull(skb, 2);
5965
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005966 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005967 err = -EMSGSIZE;
5968 break;
5969 }
5970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005971 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005972 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005973
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005974 pi->sdu = skb;
5975 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005977 BT_DBG("Start");
5978
5979 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005980 err = 0;
5981 break;
5982
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005983 case L2CAP_SAR_CONTINUE:
5984 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005985 break;
5986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005987 append_skb_frag(pi->sdu, skb,
5988 &pi->sdu_last_frag);
5989 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005991 if (pi->sdu->len >= pi->sdu_len)
5992 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005994 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5995
5996 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005997 break;
5998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005999 case L2CAP_SAR_END:
6000 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006001 break;
6002
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006003 append_skb_frag(pi->sdu, skb,
6004 &pi->sdu_last_frag);
6005 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006006
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006007 if (pi->sdu->len != pi->sdu_len)
6008 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006009
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006010 BT_DBG("End, reassembled %d", pi->sdu->len);
6011 /* If the sender used tiny PDUs, the rcv queuing could fail.
6012 * Applications that have issues here should use a larger
6013 * sk_rcvbuf.
6014 */
6015 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03006016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006017 if (!err) {
6018 /* Reassembly complete */
6019 pi->sdu = NULL;
6020 pi->sdu_last_frag = NULL;
6021 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006022 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006023 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006025 default:
6026 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006027 break;
6028 }
6029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006030 if (err) {
6031 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
6032 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
6033 if (pi->sdu) {
6034 kfree_skb(pi->sdu);
6035 pi->sdu = NULL;
6036 }
6037 pi->sdu_last_frag = NULL;
6038 pi->sdu_len = 0;
6039 if (skb)
6040 kfree_skb(skb);
6041 }
6042
6043 /* Update local busy state */
6044 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
6045 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
6046
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03006047 return err;
6048}
6049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006050static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03006051{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006052 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006053 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
6054 * until a gap is encountered.
6055 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006057 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006059 BT_DBG("sk %p", sk);
6060 pi = l2cap_pi(sk);
6061
6062 while (l2cap_rmem_available(sk)) {
6063 struct sk_buff *skb;
6064 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6065 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6066
6067 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6068
6069 if (!skb)
6070 break;
6071
6072 skb_unlink(skb, SREJ_QUEUE(sk));
6073 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6074 err = l2cap_ertm_rx_expected_iframe(sk,
6075 &bt_cb(skb)->control, skb);
6076 if (err)
6077 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006078 }
6079
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006080 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6081 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6082 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006083 }
6084
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006085 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006086}
6087
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006088static void l2cap_ertm_handle_srej(struct sock *sk,
6089 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006090{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006091 struct l2cap_pinfo *pi;
6092 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006093
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006094 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006096 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006098 if (control->reqseq == pi->next_tx_seq) {
6099 BT_DBG("Invalid reqseq %d, disconnecting",
6100 (int) control->reqseq);
6101 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006102 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006103 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006105 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006106
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006107 if (skb == NULL) {
6108 BT_DBG("Seq %d not available for retransmission",
6109 (int) control->reqseq);
6110 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006111 }
6112
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006113 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6114 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6115 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6116 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006117 }
6118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006119 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006120
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006121 if (control->poll) {
6122 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006123
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006124 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6125 l2cap_ertm_retransmit(sk, control);
6126 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006128 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6129 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6130 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006131 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006132 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006133 l2cap_ertm_pass_to_tx_fbit(sk, control);
6134
6135 if (control->final) {
6136 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6137 (pi->srej_save_reqseq == control->reqseq)) {
6138 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6139 } else {
6140 l2cap_ertm_retransmit(sk, control);
6141 }
6142 } else {
6143 l2cap_ertm_retransmit(sk, control);
6144 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6145 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6146 pi->srej_save_reqseq = control->reqseq;
6147 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006148 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006149 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006150}
6151
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006152static void l2cap_ertm_handle_rej(struct sock *sk,
6153 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006154{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006155 struct l2cap_pinfo *pi;
6156 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006158 BT_DBG("sk %p, control %p", sk, control);
6159
6160 pi = l2cap_pi(sk);
6161
6162 if (control->reqseq == pi->next_tx_seq) {
6163 BT_DBG("Invalid reqseq %d, disconnecting",
6164 (int) control->reqseq);
6165 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6166 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006167 }
6168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006169 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006170
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006171 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6172 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6173 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6174 return;
6175 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006176
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006177 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6178
6179 l2cap_ertm_pass_to_tx(sk, control);
6180
6181 if (control->final) {
6182 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6183 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6184 else
6185 l2cap_ertm_retransmit_all(sk, control);
6186 } else {
6187 l2cap_ertm_retransmit_all(sk, control);
6188 l2cap_ertm_send(sk);
6189 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6190 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6191 }
6192}
6193
6194static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6195{
6196 struct l2cap_pinfo *pi;
6197
6198 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6199 pi = l2cap_pi(sk);
6200
6201 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6202 (int)pi->expected_tx_seq);
6203
6204 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6205 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6206 /* See notes below regarding "double poll" and
6207 * invalid packets.
6208 */
6209 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6210 BT_DBG("Invalid/Ignore - txseq outside "
6211 "tx window after SREJ sent");
6212 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6213 } else {
6214 BT_DBG("Invalid - bad txseq within tx "
6215 "window after SREJ sent");
6216 return L2CAP_ERTM_TXSEQ_INVALID;
6217 }
6218 }
6219
6220 if (pi->srej_list.head == txseq) {
6221 BT_DBG("Expected SREJ");
6222 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6223 }
6224
6225 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6226 BT_DBG("Duplicate SREJ - txseq already stored");
6227 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6228 }
6229
6230 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6231 BT_DBG("Unexpected SREJ - txseq not requested "
6232 "with SREJ");
6233 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6234 }
6235 }
6236
6237 if (pi->expected_tx_seq == txseq) {
6238 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6239 BT_DBG("Invalid - txseq outside tx window");
6240 return L2CAP_ERTM_TXSEQ_INVALID;
6241 } else {
6242 BT_DBG("Expected");
6243 return L2CAP_ERTM_TXSEQ_EXPECTED;
6244 }
6245 }
6246
6247 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6248 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6249 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6250 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6251 }
6252
6253 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6254 /* A source of invalid packets is a "double poll" condition,
6255 * where delays cause us to send multiple poll packets. If
6256 * the remote stack receives and processes both polls,
6257 * sequence numbers can wrap around in such a way that a
6258 * resent frame has a sequence number that looks like new data
6259 * with a sequence gap. This would trigger an erroneous SREJ
6260 * request.
6261 *
6262 * Fortunately, this is impossible with a tx window that's
6263 * less than half of the maximum sequence number, which allows
6264 * invalid frames to be safely ignored.
6265 *
6266 * With tx window sizes greater than half of the tx window
6267 * maximum, the frame is invalid and cannot be ignored. This
6268 * causes a disconnect.
6269 */
6270
6271 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6272 BT_DBG("Invalid/Ignore - txseq outside tx window");
6273 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6274 } else {
6275 BT_DBG("Invalid - txseq outside tx window");
6276 return L2CAP_ERTM_TXSEQ_INVALID;
6277 }
6278 } else {
6279 BT_DBG("Unexpected - txseq indicates missing frames");
6280 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6281 }
6282}
6283
6284static int l2cap_ertm_rx_state_recv(struct sock *sk,
6285 struct bt_l2cap_control *control,
6286 struct sk_buff *skb, u8 event)
6287{
6288 struct l2cap_pinfo *pi;
6289 int err = 0;
6290 bool skb_in_use = 0;
6291
6292 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6293 (int)event);
6294 pi = l2cap_pi(sk);
6295
6296 switch (event) {
6297 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6298 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6299 case L2CAP_ERTM_TXSEQ_EXPECTED:
6300 l2cap_ertm_pass_to_tx(sk, control);
6301
6302 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6303 BT_DBG("Busy, discarding expected seq %d",
6304 control->txseq);
6305 break;
6306 }
6307
6308 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6309 pi->buffer_seq = pi->expected_tx_seq;
6310 skb_in_use = 1;
6311
6312 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6313 if (err)
6314 break;
6315
6316 if (control->final) {
6317 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6318 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6319 else {
6320 control->final = 0;
6321 l2cap_ertm_retransmit_all(sk, control);
6322 l2cap_ertm_send(sk);
6323 }
6324 }
6325
6326 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6327 l2cap_ertm_send_ack(sk);
6328 break;
6329 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6330 l2cap_ertm_pass_to_tx(sk, control);
6331
6332 /* Can't issue SREJ frames in the local busy state.
6333 * Drop this frame, it will be seen as missing
6334 * when local busy is exited.
6335 */
6336 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6337 BT_DBG("Busy, discarding unexpected seq %d",
6338 control->txseq);
6339 break;
6340 }
6341
6342 /* There was a gap in the sequence, so an SREJ
6343 * must be sent for each missing frame. The
6344 * current frame is stored for later use.
6345 */
6346 skb_queue_tail(SREJ_QUEUE(sk), skb);
6347 skb_in_use = 1;
6348 BT_DBG("Queued %p (queue len %d)", skb,
6349 skb_queue_len(SREJ_QUEUE(sk)));
6350
6351 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6352 l2cap_seq_list_clear(&pi->srej_list);
6353 l2cap_ertm_send_srej(sk, control->txseq);
6354
6355 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6356 break;
6357 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6358 l2cap_ertm_pass_to_tx(sk, control);
6359 break;
6360 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6361 break;
6362 case L2CAP_ERTM_TXSEQ_INVALID:
6363 default:
6364 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6365 ECONNRESET);
6366 break;
6367 }
6368 break;
6369 case L2CAP_ERTM_EVENT_RECV_RR:
6370 l2cap_ertm_pass_to_tx(sk, control);
6371 if (control->final) {
6372 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6373
6374 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6375 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6376 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6377 pi->amp_move_state ==
6378 L2CAP_AMP_STATE_WAIT_PREPARE) {
6379 control->final = 0;
6380 l2cap_ertm_retransmit_all(sk, control);
6381 }
6382
6383 l2cap_ertm_send(sk);
6384 } else if (control->poll) {
6385 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6386 } else {
6387 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6388 pi->unacked_frames)
6389 l2cap_ertm_start_retrans_timer(pi);
6390 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6391 l2cap_ertm_send(sk);
6392 }
6393 break;
6394 case L2CAP_ERTM_EVENT_RECV_RNR:
6395 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6396 l2cap_ertm_pass_to_tx(sk, control);
6397 if (control && control->poll) {
6398 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6399 l2cap_ertm_send_rr_or_rnr(sk, 0);
6400 }
6401 l2cap_ertm_stop_retrans_timer(pi);
6402 l2cap_seq_list_clear(&pi->retrans_list);
6403 break;
6404 case L2CAP_ERTM_EVENT_RECV_REJ:
6405 l2cap_ertm_handle_rej(sk, control);
6406 break;
6407 case L2CAP_ERTM_EVENT_RECV_SREJ:
6408 l2cap_ertm_handle_srej(sk, control);
6409 break;
6410 default:
6411 break;
6412 }
6413
6414 if (skb && !skb_in_use) {
6415 BT_DBG("Freeing %p", skb);
6416 kfree_skb(skb);
6417 }
6418
6419 return err;
6420}
6421
6422static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6423 struct bt_l2cap_control *control,
6424 struct sk_buff *skb, u8 event)
6425{
6426 struct l2cap_pinfo *pi;
6427 int err = 0;
6428 u16 txseq = control->txseq;
6429 bool skb_in_use = 0;
6430
6431 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6432 (int)event);
6433 pi = l2cap_pi(sk);
6434
6435 switch (event) {
6436 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6437 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6438 case L2CAP_ERTM_TXSEQ_EXPECTED:
6439 /* Keep frame for reassembly later */
6440 l2cap_ertm_pass_to_tx(sk, control);
6441 skb_queue_tail(SREJ_QUEUE(sk), skb);
6442 skb_in_use = 1;
6443 BT_DBG("Queued %p (queue len %d)", skb,
6444 skb_queue_len(SREJ_QUEUE(sk)));
6445
6446 pi->expected_tx_seq = __next_seq(txseq, pi);
6447 break;
6448 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6449 l2cap_seq_list_pop(&pi->srej_list);
6450
6451 l2cap_ertm_pass_to_tx(sk, control);
6452 skb_queue_tail(SREJ_QUEUE(sk), skb);
6453 skb_in_use = 1;
6454 BT_DBG("Queued %p (queue len %d)", skb,
6455 skb_queue_len(SREJ_QUEUE(sk)));
6456
6457 err = l2cap_ertm_rx_queued_iframes(sk);
6458 if (err)
6459 break;
6460
6461 break;
6462 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6463 /* Got a frame that can't be reassembled yet.
6464 * Save it for later, and send SREJs to cover
6465 * the missing frames.
6466 */
6467 skb_queue_tail(SREJ_QUEUE(sk), skb);
6468 skb_in_use = 1;
6469 BT_DBG("Queued %p (queue len %d)", skb,
6470 skb_queue_len(SREJ_QUEUE(sk)));
6471
6472 l2cap_ertm_pass_to_tx(sk, control);
6473 l2cap_ertm_send_srej(sk, control->txseq);
6474 break;
6475 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6476 /* This frame was requested with an SREJ, but
6477 * some expected retransmitted frames are
6478 * missing. Request retransmission of missing
6479 * SREJ'd frames.
6480 */
6481 skb_queue_tail(SREJ_QUEUE(sk), skb);
6482 skb_in_use = 1;
6483 BT_DBG("Queued %p (queue len %d)", skb,
6484 skb_queue_len(SREJ_QUEUE(sk)));
6485
6486 l2cap_ertm_pass_to_tx(sk, control);
6487 l2cap_ertm_send_srej_list(sk, control->txseq);
6488 break;
6489 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6490 /* We've already queued this frame. Drop this copy. */
6491 l2cap_ertm_pass_to_tx(sk, control);
6492 break;
6493 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6494 /* Expecting a later sequence number, so this frame
6495 * was already received. Ignore it completely.
6496 */
6497 break;
6498 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6499 break;
6500 case L2CAP_ERTM_TXSEQ_INVALID:
6501 default:
6502 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6503 ECONNRESET);
6504 break;
6505 }
6506 break;
6507 case L2CAP_ERTM_EVENT_RECV_RR:
6508 l2cap_ertm_pass_to_tx(sk, control);
6509 if (control->final) {
6510 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6511
6512 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6513 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6514 else {
6515 control->final = 0;
6516 l2cap_ertm_retransmit_all(sk, control);
6517 }
6518
6519 l2cap_ertm_send(sk);
6520 } else if (control->poll) {
6521 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6522 pi->unacked_frames) {
6523 l2cap_ertm_start_retrans_timer(pi);
6524 }
6525 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6526 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6527 l2cap_ertm_send_srej_tail(sk);
6528 } else {
6529 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6530 pi->unacked_frames) {
6531 l2cap_ertm_start_retrans_timer(pi);
6532 }
6533 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6534 l2cap_ertm_send_ack(sk);
6535 }
6536 break;
6537 case L2CAP_ERTM_EVENT_RECV_RNR:
6538 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6539 l2cap_ertm_pass_to_tx(sk, control);
6540 if (control->poll)
6541 l2cap_ertm_send_srej_tail(sk);
6542 else {
6543 struct bt_l2cap_control rr_control;
6544 memset(&rr_control, 0, sizeof(rr_control));
6545 rr_control.frame_type = 's';
6546 rr_control.super = L2CAP_SFRAME_RR;
6547 rr_control.reqseq = pi->buffer_seq;
6548 l2cap_ertm_send_sframe(sk, &rr_control);
6549 }
6550
6551 break;
6552 case L2CAP_ERTM_EVENT_RECV_REJ:
6553 l2cap_ertm_handle_rej(sk, control);
6554 break;
6555 case L2CAP_ERTM_EVENT_RECV_SREJ:
6556 l2cap_ertm_handle_srej(sk, control);
6557 break;
6558 }
6559
6560 if (skb && !skb_in_use) {
6561 BT_DBG("Freeing %p", skb);
6562 kfree_skb(skb);
6563 }
6564
6565 return err;
6566}
6567
6568static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6569 struct bt_l2cap_control *control,
6570 struct sk_buff *skb, u8 event)
6571{
6572 struct l2cap_pinfo *pi;
6573 int err = 0;
6574 bool skb_in_use = 0;
6575
6576 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6577 (int)event);
6578 pi = l2cap_pi(sk);
6579
6580 /* Only handle expected frames, to avoid state changes. */
6581
6582 switch (event) {
6583 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6584 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6585 L2CAP_ERTM_TXSEQ_EXPECTED) {
6586 l2cap_ertm_pass_to_tx(sk, control);
6587
6588 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6589 BT_DBG("Busy, discarding expected seq %d",
6590 control->txseq);
6591 break;
6592 }
6593
6594 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6595 pi->buffer_seq = pi->expected_tx_seq;
6596 skb_in_use = 1;
6597
6598 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6599 if (err)
6600 break;
6601
6602 if (control->final) {
6603 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6604 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6605 else
6606 control->final = 0;
6607 }
6608 }
6609 break;
6610 case L2CAP_ERTM_EVENT_RECV_RR:
6611 case L2CAP_ERTM_EVENT_RECV_RNR:
6612 case L2CAP_ERTM_EVENT_RECV_REJ:
6613 l2cap_ertm_process_reqseq(sk, control->reqseq);
6614 break;
6615 case L2CAP_ERTM_EVENT_RECV_SREJ:
6616 /* Ignore */
6617 break;
6618 default:
6619 break;
6620 }
6621
6622 if (skb && !skb_in_use) {
6623 BT_DBG("Freeing %p", skb);
6624 kfree_skb(skb);
6625 }
6626
6627 return err;
6628}
6629
6630static int l2cap_answer_move_poll(struct sock *sk)
6631{
6632 struct l2cap_pinfo *pi;
6633 struct bt_l2cap_control control;
6634 int err = 0;
6635
6636 BT_DBG("sk %p", sk);
6637
6638 pi = l2cap_pi(sk);
6639
6640 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6641
6642 if (!skb_queue_empty(TX_QUEUE(sk)))
6643 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6644 else
6645 sk->sk_send_head = NULL;
6646
6647 /* Rewind next_tx_seq to the point expected
6648 * by the receiver.
6649 */
6650 pi->next_tx_seq = pi->amp_move_reqseq;
6651 pi->unacked_frames = 0;
6652
6653 err = l2cap_finish_amp_move(sk);
6654
6655 if (err)
6656 return err;
6657
6658 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6659 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6660
6661 memset(&control, 0, sizeof(control));
6662 control.reqseq = pi->amp_move_reqseq;
6663
6664 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6665 err = -EPROTO;
6666 else
6667 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6668 pi->amp_move_event);
6669
6670 return err;
6671}
6672
6673static void l2cap_amp_move_setup(struct sock *sk)
6674{
6675 struct l2cap_pinfo *pi;
6676 struct sk_buff *skb;
6677
6678 BT_DBG("sk %p", sk);
6679
6680 pi = l2cap_pi(sk);
6681
6682 l2cap_ertm_stop_ack_timer(pi);
6683 l2cap_ertm_stop_retrans_timer(pi);
6684 l2cap_ertm_stop_monitor_timer(pi);
6685
6686 pi->retry_count = 0;
6687 skb_queue_walk(TX_QUEUE(sk), skb) {
6688 if (bt_cb(skb)->retries)
6689 bt_cb(skb)->retries = 1;
6690 else
6691 break;
6692 }
6693
6694 pi->expected_tx_seq = pi->buffer_seq;
6695
6696 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6697 l2cap_seq_list_clear(&pi->retrans_list);
6698 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6699 skb_queue_purge(SREJ_QUEUE(sk));
6700
6701 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6702 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6703
6704 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6705 pi->rx_state);
6706
6707 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6708}
6709
6710static void l2cap_amp_move_revert(struct sock *sk)
6711{
6712 struct l2cap_pinfo *pi;
6713
6714 BT_DBG("sk %p", sk);
6715
6716 pi = l2cap_pi(sk);
6717
6718 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6719 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6720 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6721 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6722 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6723}
6724
6725static int l2cap_amp_move_reconf(struct sock *sk)
6726{
6727 struct l2cap_pinfo *pi;
6728 u8 buf[64];
6729 int err = 0;
6730
6731 BT_DBG("sk %p", sk);
6732
6733 pi = l2cap_pi(sk);
6734
6735 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6736 l2cap_build_amp_reconf_req(sk, buf), buf);
6737 return err;
6738}
6739
6740static void l2cap_amp_move_success(struct sock *sk)
6741{
6742 struct l2cap_pinfo *pi;
6743
6744 BT_DBG("sk %p", sk);
6745
6746 pi = l2cap_pi(sk);
6747
6748 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6749 int err = 0;
6750 /* Send reconfigure request */
6751 if (pi->mode == L2CAP_MODE_ERTM) {
6752 pi->reconf_state = L2CAP_RECONF_INT;
6753 if (enable_reconfig)
6754 err = l2cap_amp_move_reconf(sk);
6755
6756 if (err || !enable_reconfig) {
6757 pi->reconf_state = L2CAP_RECONF_NONE;
6758 l2cap_ertm_tx(sk, NULL, NULL,
6759 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6760 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6761 }
6762 } else
6763 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6764 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6765 if (pi->mode == L2CAP_MODE_ERTM)
6766 pi->rx_state =
6767 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6768 else
6769 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6770 }
6771}
6772
6773static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6774{
6775 /* Make sure reqseq is for a packet that has been sent but not acked */
6776 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6777 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6778}
6779
6780static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6781 struct sk_buff *skb)
6782{
6783 struct l2cap_pinfo *pi;
6784 int err = 0;
6785
6786 BT_DBG("sk %p, control %p, skb %p, state %d",
6787 sk, control, skb, l2cap_pi(sk)->rx_state);
6788
6789 pi = l2cap_pi(sk);
6790
6791 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6792 L2CAP_ERTM_TXSEQ_EXPECTED) {
6793 l2cap_ertm_pass_to_tx(sk, control);
6794
6795 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6796 __next_seq(pi->buffer_seq, pi));
6797
6798 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6799
6800 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6801 } else {
6802 if (pi->sdu) {
6803 kfree_skb(pi->sdu);
6804 pi->sdu = NULL;
6805 }
6806 pi->sdu_last_frag = NULL;
6807 pi->sdu_len = 0;
6808
6809 if (skb) {
6810 BT_DBG("Freeing %p", skb);
6811 kfree_skb(skb);
6812 }
6813 }
6814
6815 pi->last_acked_seq = control->txseq;
6816 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6817
6818 return err;
6819}
6820
6821static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6822 struct sk_buff *skb, u8 event)
6823{
6824 struct l2cap_pinfo *pi;
6825 int err = 0;
6826
6827 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6828 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6829
6830 pi = l2cap_pi(sk);
6831
6832 if (__valid_reqseq(pi, control->reqseq)) {
6833 switch (pi->rx_state) {
6834 case L2CAP_ERTM_RX_STATE_RECV:
6835 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6836 break;
6837 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6838 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6839 event);
6840 break;
6841 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6842 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6843 event);
6844 break;
6845 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6846 if (control->final) {
6847 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6848 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6849
6850 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6851 l2cap_ertm_process_reqseq(sk, control->reqseq);
6852
6853 if (!skb_queue_empty(TX_QUEUE(sk)))
6854 sk->sk_send_head =
6855 skb_peek(TX_QUEUE(sk));
6856 else
6857 sk->sk_send_head = NULL;
6858
6859 /* Rewind next_tx_seq to the point expected
6860 * by the receiver.
6861 */
6862 pi->next_tx_seq = control->reqseq;
6863 pi->unacked_frames = 0;
6864
6865 if (pi->ampcon)
6866 pi->conn->mtu =
6867 pi->ampcon->hdev->acl_mtu;
6868 else
6869 pi->conn->mtu =
6870 pi->conn->hcon->hdev->acl_mtu;
6871
6872 err = l2cap_setup_resegment(sk);
6873
6874 if (err)
6875 break;
6876
6877 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6878 event);
6879 }
6880 break;
6881 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6882 if (control->poll) {
6883 pi->amp_move_reqseq = control->reqseq;
6884 pi->amp_move_event = event;
6885 err = l2cap_answer_move_poll(sk);
6886 }
6887 break;
6888 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6889 if (control->poll) {
6890 pi->amp_move_reqseq = control->reqseq;
6891 pi->amp_move_event = event;
6892
6893 BT_DBG("amp_move_role 0x%2.2x, "
6894 "reconf_state 0x%2.2x",
6895 pi->amp_move_role, pi->reconf_state);
6896
6897 if (pi->reconf_state == L2CAP_RECONF_ACC)
6898 err = l2cap_amp_move_reconf(sk);
6899 else
6900 err = l2cap_answer_move_poll(sk);
6901 }
6902 break;
6903 default:
6904 /* shut it down */
6905 break;
6906 }
6907 } else {
6908 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6909 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6910 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6911 }
6912
6913 return err;
6914}
6915
6916void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6917{
6918 lock_sock(sk);
6919
6920 l2cap_pi(sk)->fixed_channel = 1;
6921
6922 l2cap_pi(sk)->imtu = opt->imtu;
6923 l2cap_pi(sk)->omtu = opt->omtu;
6924 l2cap_pi(sk)->remote_mps = opt->omtu;
6925 l2cap_pi(sk)->mps = opt->omtu;
6926 l2cap_pi(sk)->flush_to = opt->flush_to;
6927 l2cap_pi(sk)->mode = opt->mode;
6928 l2cap_pi(sk)->fcs = opt->fcs;
6929 l2cap_pi(sk)->max_tx = opt->max_tx;
6930 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6931 l2cap_pi(sk)->tx_win = opt->txwin_size;
6932 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6933 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6934 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6935
6936 if (opt->mode == L2CAP_MODE_ERTM ||
6937 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6938 l2cap_ertm_init(sk);
6939
6940 release_sock(sk);
6941
6942 return;
6943}
6944
6945static const u8 l2cap_ertm_rx_func_to_event[4] = {
6946 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6947 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6948};
6949
6950int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6951{
6952 struct l2cap_pinfo *pi;
6953 struct bt_l2cap_control *control;
6954 u16 len;
6955 u8 event;
6956 pi = l2cap_pi(sk);
6957
6958 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6959
6960 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006961 goto drop;
6962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006963 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006964 case L2CAP_MODE_BASIC:
6965 /* If socket recv buffers overflows we drop data here
6966 * which is *bad* because L2CAP has to be reliable.
6967 * But we don't have any other choice. L2CAP doesn't
6968 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006969
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006970 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006971 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006972
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006973 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006974 goto done;
6975 break;
6976
6977 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006978 case L2CAP_MODE_STREAMING:
6979 control = &bt_cb(skb)->control;
6980 if (pi->extended_control) {
6981 __get_extended_control(get_unaligned_le32(skb->data),
6982 control);
6983 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006984 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006985 __get_enhanced_control(get_unaligned_le16(skb->data),
6986 control);
6987 skb_pull(skb, 2);
6988 }
6989
6990 len = skb->len;
6991
6992 if (l2cap_check_fcs(pi, skb))
6993 goto drop;
6994
6995 if ((control->frame_type == 'i') &&
6996 (control->sar == L2CAP_SAR_START))
6997 len -= 2;
6998
6999 if (pi->fcs == L2CAP_FCS_CRC16)
7000 len -= 2;
7001
7002 /*
7003 * We can just drop the corrupted I-frame here.
7004 * Receiver will miss it and start proper recovery
7005 * procedures and ask for retransmission.
7006 */
7007 if (len > pi->mps) {
7008 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
7009 goto drop;
7010 }
7011
7012 if (control->frame_type == 'i') {
7013
7014 int err;
7015
7016 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
7017 control->sar, control->reqseq, control->final,
7018 control->txseq);
7019
7020 /* Validate F-bit - F=0 always valid, F=1 only
7021 * valid in TX WAIT_F
7022 */
7023 if (control->final && (pi->tx_state !=
7024 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007025 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007026
7027 if (pi->mode != L2CAP_MODE_STREAMING) {
7028 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
7029 err = l2cap_ertm_rx(sk, control, skb, event);
7030 } else
7031 err = l2cap_strm_rx(sk, control, skb);
7032 if (err)
7033 l2cap_send_disconn_req(pi->conn, sk,
7034 ECONNRESET);
7035 } else {
7036 /* Only I-frames are expected in streaming mode */
7037 if (pi->mode == L2CAP_MODE_STREAMING)
7038 goto drop;
7039
7040 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
7041 control->reqseq, control->final, control->poll,
7042 control->super);
7043
7044 if (len != 0) {
7045 l2cap_send_disconn_req(pi->conn, sk,
7046 ECONNRESET);
7047 goto drop;
7048 }
7049
7050 /* Validate F and P bits */
7051 if (control->final &&
7052 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
7053 || control->poll))
7054 goto drop;
7055
7056 event = l2cap_ertm_rx_func_to_event[control->super];
7057 if (l2cap_ertm_rx(sk, control, skb, event))
7058 l2cap_send_disconn_req(pi->conn, sk,
7059 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007060 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007061
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007062 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007063
7064 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007065 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007066 break;
7067 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007068
7069drop:
7070 kfree_skb(skb);
7071
7072done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007073 return 0;
7074}
7075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007076void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7077{
7078 lock_sock(sk);
7079 l2cap_data_channel(sk, skb);
7080 release_sock(sk);
7081}
7082
Al Viro8e036fc2007-07-29 00:16:36 -07007083static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007084{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007085 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007087 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7088 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007089 goto drop;
7090
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007091 bh_lock_sock(sk);
7092
Linus Torvalds1da177e2005-04-16 15:20:36 -07007093 BT_DBG("sk %p, len %d", sk, skb->len);
7094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007095 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096 goto drop;
7097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007098 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 goto drop;
7100
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007101 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007102 goto done;
7103
7104drop:
7105 kfree_skb(skb);
7106
7107done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007108 if (sk)
7109 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007110 return 0;
7111}
7112
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007113static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7114{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007115 struct sock *sk;
Brian Gix7eaa64d2011-10-19 13:17:42 -07007116 struct sk_buff *skb_rsp;
7117 struct l2cap_hdr *lh;
7118 u8 err_rsp[] = {L2CAP_ATT_ERROR, 0x00, 0x00, 0x00,
7119 L2CAP_ATT_NOT_SUPPORTED};
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007120
Inga Stotlandf214b6e2011-10-11 08:56:15 -07007121 sk = l2cap_get_sock_by_fixed_scid(0, cid, conn->src, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007122 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007123 goto drop;
7124
7125 bh_lock_sock(sk);
7126
7127 BT_DBG("sk %p, len %d", sk, skb->len);
7128
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007129 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007130 goto drop;
7131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007132 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007133 goto drop;
7134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007135 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007136 goto done;
7137
7138drop:
Brian Gix7eaa64d2011-10-19 13:17:42 -07007139 if (skb->data[0] & L2CAP_ATT_RESPONSE_BIT &&
7140 skb->data[0] != L2CAP_ATT_INDICATE)
7141 goto free_skb;
7142
7143 /* If this is an incoming PDU that requires a response, respond with
7144 * a generic error so remote device doesn't hang */
7145
7146 skb_rsp = bt_skb_alloc(sizeof(err_rsp) + L2CAP_HDR_SIZE, GFP_ATOMIC);
7147 if (!skb_rsp)
7148 goto free_skb;
7149
7150 lh = (struct l2cap_hdr *) skb_put(skb_rsp, L2CAP_HDR_SIZE);
7151 lh->len = cpu_to_le16(sizeof(err_rsp));
7152 lh->cid = cpu_to_le16(L2CAP_CID_LE_DATA);
7153 err_rsp[1] = skb->data[0];
7154 memcpy(skb_put(skb_rsp, sizeof(err_rsp)), err_rsp, sizeof(err_rsp));
7155 hci_send_acl(conn->hcon, NULL, skb_rsp, 0);
7156
7157free_skb:
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007158 kfree_skb(skb);
7159
7160done:
7161 if (sk)
7162 bh_unlock_sock(sk);
7163 return 0;
7164}
7165
Linus Torvalds1da177e2005-04-16 15:20:36 -07007166static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7167{
7168 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007169 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007170 u16 cid, len;
7171 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007172
7173 skb_pull(skb, L2CAP_HDR_SIZE);
7174 cid = __le16_to_cpu(lh->cid);
7175 len = __le16_to_cpu(lh->len);
7176
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007177 if (len != skb->len) {
7178 kfree_skb(skb);
7179 return;
7180 }
7181
Linus Torvalds1da177e2005-04-16 15:20:36 -07007182 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7183
7184 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007185 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007186 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007187 l2cap_sig_channel(conn, skb);
7188 break;
7189
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007190 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007191 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192 skb_pull(skb, 2);
7193 l2cap_conless_channel(conn, psm, skb);
7194 break;
7195
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007196 case L2CAP_CID_LE_DATA:
7197 l2cap_att_channel(conn, cid, skb);
7198 break;
7199
Anderson Brigliaea370122011-06-07 18:46:31 -03007200 case L2CAP_CID_SMP:
7201 if (smp_sig_channel(conn, skb))
7202 l2cap_conn_del(conn->hcon, EACCES);
7203 break;
7204
Linus Torvalds1da177e2005-04-16 15:20:36 -07007205 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007206 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7207 if (sk) {
7208 if (sock_owned_by_user(sk)) {
7209 BT_DBG("backlog sk %p", sk);
7210 if (sk_add_backlog(sk, skb))
7211 kfree_skb(skb);
7212 } else
7213 l2cap_data_channel(sk, skb);
7214
7215 bh_unlock_sock(sk);
7216 } else if (cid == L2CAP_CID_A2MP) {
7217 BT_DBG("A2MP");
7218 amp_conn_ind(conn, skb);
7219 } else {
7220 BT_DBG("unknown cid 0x%4.4x", cid);
7221 kfree_skb(skb);
7222 }
7223
Linus Torvalds1da177e2005-04-16 15:20:36 -07007224 break;
7225 }
7226}
7227
7228/* ---- L2CAP interface with lower layer (HCI) ---- */
7229
7230static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7231{
7232 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007233 register struct sock *sk;
7234 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007235
7236 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007237 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007238
7239 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7240
7241 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007242 read_lock(&l2cap_sk_list.lock);
7243 sk_for_each(sk, node, &l2cap_sk_list.head) {
7244 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245 continue;
7246
7247 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007248 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007249 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007250 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007252 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7253 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007254 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007255 lm2 |= HCI_LM_MASTER;
7256 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007257 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007258 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259
7260 return exact ? lm1 : lm2;
7261}
7262
7263static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7264{
Marcel Holtmann01394182006-07-03 10:02:46 +02007265 struct l2cap_conn *conn;
7266
Linus Torvalds1da177e2005-04-16 15:20:36 -07007267 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7268
Ville Tervoacd7d372011-02-10 22:38:49 -03007269 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007270 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007271
7272 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007273 conn = l2cap_conn_add(hcon, status);
7274 if (conn)
7275 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007276 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007277 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007278
7279 return 0;
7280}
7281
Marcel Holtmann2950f212009-02-12 14:02:50 +01007282static int l2cap_disconn_ind(struct hci_conn *hcon)
7283{
7284 struct l2cap_conn *conn = hcon->l2cap_data;
7285
7286 BT_DBG("hcon %p", hcon);
7287
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007288 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007289 return 0x13;
7290
7291 return conn->disc_reason;
7292}
7293
7294static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007295{
7296 BT_DBG("hcon %p reason %d", hcon, reason);
7297
Ville Tervoacd7d372011-02-10 22:38:49 -03007298 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007299 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007300
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007301 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007302
Linus Torvalds1da177e2005-04-16 15:20:36 -07007303 return 0;
7304}
7305
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007306static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007307{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007308 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007309 return;
7310
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007311 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007312 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7313 l2cap_sock_clear_timer(sk);
7314 l2cap_sock_set_timer(sk, HZ * 5);
7315 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7316 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007317 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007318 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7319 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007320 }
7321}
7322
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007323static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007324{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007325 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007326 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007327 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007328
Marcel Holtmann01394182006-07-03 10:02:46 +02007329 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007330 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007331
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007332 l = &conn->chan_list;
7333
Linus Torvalds1da177e2005-04-16 15:20:36 -07007334 BT_DBG("conn %p", conn);
7335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007336 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007338 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007339 bh_lock_sock(sk);
7340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007341 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007343 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007344 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007345 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007346
Brian Gixe9ceb522011-09-22 10:46:35 -07007347 del_timer(&hcon->smp_timer);
Brian Gixa68668b2011-08-11 15:49:36 -07007348 l2cap_chan_ready(sk);
7349 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007350
7351 bh_unlock_sock(sk);
7352 continue;
7353 }
7354
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007355 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007356 bh_unlock_sock(sk);
7357 continue;
7358 }
7359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007360 if (!status && (sk->sk_state == BT_CONNECTED ||
7361 sk->sk_state == BT_CONFIG)) {
7362 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007363 bh_unlock_sock(sk);
7364 continue;
7365 }
7366
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007367 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007368 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007369 l2cap_pi(sk)->conf_state |=
7370 L2CAP_CONF_CONNECT_PEND;
7371 if (l2cap_pi(sk)->amp_pref ==
7372 BT_AMP_POLICY_PREFER_AMP) {
7373 amp_create_physical(l2cap_pi(sk)->conn,
7374 sk);
7375 } else
7376 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007377 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007378 l2cap_sock_clear_timer(sk);
7379 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007380 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007381 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007382 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007383 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007384
7385 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007386 if (l2cap_pi(sk)->amp_id) {
7387 amp_accept_physical(conn,
7388 l2cap_pi(sk)->amp_id, sk);
7389 bh_unlock_sock(sk);
7390 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007391 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007392
7393 sk->sk_state = BT_CONFIG;
7394 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007395 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007396 sk->sk_state = BT_DISCONN;
7397 l2cap_sock_set_timer(sk, HZ / 10);
7398 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007399 }
7400
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007401 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7402 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7403 rsp.result = cpu_to_le16(result);
7404 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7405 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7406 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007407 }
7408
Linus Torvalds1da177e2005-04-16 15:20:36 -07007409 bh_unlock_sock(sk);
7410 }
7411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007412 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007413
Linus Torvalds1da177e2005-04-16 15:20:36 -07007414 return 0;
7415}
7416
7417static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7418{
7419 struct l2cap_conn *conn = hcon->l2cap_data;
7420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007421 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7422 goto drop;
7423
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007424 if (!conn)
7425 conn = l2cap_conn_add(hcon, 0);
7426
7427 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007428 goto drop;
7429
7430 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7431
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007432 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007433 struct l2cap_hdr *hdr;
7434 int len;
7435
7436 if (conn->rx_len) {
7437 BT_ERR("Unexpected start frame (len %d)", skb->len);
7438 kfree_skb(conn->rx_skb);
7439 conn->rx_skb = NULL;
7440 conn->rx_len = 0;
7441 l2cap_conn_unreliable(conn, ECOMM);
7442 }
7443
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007444 /* Start fragment always begin with Basic L2CAP header */
7445 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007446 BT_ERR("Frame is too short (len %d)", skb->len);
7447 l2cap_conn_unreliable(conn, ECOMM);
7448 goto drop;
7449 }
7450
7451 hdr = (struct l2cap_hdr *) skb->data;
7452 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7453
7454 if (len == skb->len) {
7455 /* Complete frame received */
7456 l2cap_recv_frame(conn, skb);
7457 return 0;
7458 }
7459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007460 if (flags & ACL_CONT) {
7461 BT_ERR("Complete frame is incomplete "
7462 "(len %d, expected len %d)",
7463 skb->len, len);
7464 l2cap_conn_unreliable(conn, ECOMM);
7465 goto drop;
7466 }
7467
Linus Torvalds1da177e2005-04-16 15:20:36 -07007468 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7469
7470 if (skb->len > len) {
7471 BT_ERR("Frame is too long (len %d, expected len %d)",
7472 skb->len, len);
7473 l2cap_conn_unreliable(conn, ECOMM);
7474 goto drop;
7475 }
7476
7477 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007478 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7479 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480 goto drop;
7481
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007482 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007483 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484 conn->rx_len = len - skb->len;
7485 } else {
7486 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7487
7488 if (!conn->rx_len) {
7489 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7490 l2cap_conn_unreliable(conn, ECOMM);
7491 goto drop;
7492 }
7493
7494 if (skb->len > conn->rx_len) {
7495 BT_ERR("Fragment is too long (len %d, expected %d)",
7496 skb->len, conn->rx_len);
7497 kfree_skb(conn->rx_skb);
7498 conn->rx_skb = NULL;
7499 conn->rx_len = 0;
7500 l2cap_conn_unreliable(conn, ECOMM);
7501 goto drop;
7502 }
7503
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007504 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007505 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506 conn->rx_len -= skb->len;
7507
7508 if (!conn->rx_len) {
7509 /* Complete frame received */
7510 l2cap_recv_frame(conn, conn->rx_skb);
7511 conn->rx_skb = NULL;
7512 }
7513 }
7514
7515drop:
7516 kfree_skb(skb);
7517 return 0;
7518}
7519
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007520static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007521{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007522 struct sock *sk;
7523 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007525 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007526
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007527 sk_for_each(sk, node, &l2cap_sk_list.head) {
7528 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007529
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007530 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007531 batostr(&bt_sk(sk)->src),
7532 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007533 sk->sk_state, __le16_to_cpu(pi->psm),
7534 pi->scid, pi->dcid,
7535 pi->imtu, pi->omtu, pi->sec_level,
7536 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007539 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007540
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007541 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542}
7543
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007544static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7545{
7546 return single_open(file, l2cap_debugfs_show, inode->i_private);
7547}
7548
7549static const struct file_operations l2cap_debugfs_fops = {
7550 .open = l2cap_debugfs_open,
7551 .read = seq_read,
7552 .llseek = seq_lseek,
7553 .release = single_release,
7554};
7555
7556static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007557
Linus Torvalds1da177e2005-04-16 15:20:36 -07007558static struct hci_proto l2cap_hci_proto = {
7559 .name = "L2CAP",
7560 .id = HCI_PROTO_L2CAP,
7561 .connect_ind = l2cap_connect_ind,
7562 .connect_cfm = l2cap_connect_cfm,
7563 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007564 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007565 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007566 .recv_acldata = l2cap_recv_acldata,
7567 .create_cfm = l2cap_create_cfm,
7568 .modify_cfm = l2cap_modify_cfm,
7569 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007570};
7571
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007572int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007573{
7574 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007575
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007576 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007577 if (err < 0)
7578 return err;
7579
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007580 _l2cap_wq = create_singlethread_workqueue("l2cap");
7581 if (!_l2cap_wq) {
7582 err = -ENOMEM;
7583 goto error;
7584 }
7585
Linus Torvalds1da177e2005-04-16 15:20:36 -07007586 err = hci_register_proto(&l2cap_hci_proto);
7587 if (err < 0) {
7588 BT_ERR("L2CAP protocol registration failed");
7589 bt_sock_unregister(BTPROTO_L2CAP);
7590 goto error;
7591 }
7592
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007593 if (bt_debugfs) {
7594 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7595 bt_debugfs, NULL, &l2cap_debugfs_fops);
7596 if (!l2cap_debugfs)
7597 BT_ERR("Failed to create L2CAP debug file");
7598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007599
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007600 if (amp_init() < 0) {
7601 BT_ERR("AMP Manager initialization failed");
7602 goto error;
7603 }
7604
Linus Torvalds1da177e2005-04-16 15:20:36 -07007605 return 0;
7606
7607error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007608 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007609 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007610 return err;
7611}
7612
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007613void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007614{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007615 amp_exit();
7616
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007617 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007618
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007619 flush_workqueue(_l2cap_wq);
7620 destroy_workqueue(_l2cap_wq);
7621
Linus Torvalds1da177e2005-04-16 15:20:36 -07007622 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7623 BT_ERR("L2CAP protocol unregistration failed");
7624
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007625 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007626}
7627
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007628module_param(disable_ertm, bool, 0644);
7629MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007630
7631module_param(enable_reconfig, bool, 0644);
7632MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");