blob: 3bb58f9c34a1413177696c64a719043c7af24633 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
552 hci_chan_put(l2cap_pi(sk)->ampchan);
553 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
554 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
555 l2cap_pi(sk));
556 }
557 l2cap_pi(sk)->ampchan = NULL;
558 l2cap_pi(sk)->amp_id = 0;
559 }
560
561 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200562 sock_set_flag(sk, SOCK_ZAPPED);
563
564 if (err)
565 sk->sk_err = err;
566
567 if (parent) {
568 bt_accept_unlink(sk);
569 parent->sk_data_ready(parent, 0);
570 } else
571 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
576 if (l2cap_pi(sk)->sdu)
577 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
583 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300584 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (sk->sk_type == SOCK_RAW) {
590 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530591 case BT_SECURITY_HIGH:
592 return HCI_AT_DEDICATED_BONDING_MITM;
593 case BT_SECURITY_MEDIUM:
594 return HCI_AT_DEDICATED_BONDING;
595 default:
596 return HCI_AT_NO_BONDING;
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
599 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
600 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530603 return HCI_AT_NO_BONDING_MITM;
604 else
605 return HCI_AT_NO_BONDING;
606 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530608 case BT_SECURITY_HIGH:
609 return HCI_AT_GENERAL_BONDING_MITM;
610 case BT_SECURITY_MEDIUM:
611 return HCI_AT_GENERAL_BONDING;
612 default:
613 return HCI_AT_NO_BONDING;
614 }
615 }
616}
617
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200618/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100622 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
627 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200631{
632 u8 id;
633
634 /* Get next available identificator.
635 * 1 - 128 are used by kernel.
636 * 129 - 199 are reserved.
637 * 200 - 254 are used by utilities like l2ping, etc.
638 */
639
640 spin_lock_bh(&conn->lock);
641
642 if (++conn->tx_ident > 128)
643 conn->tx_ident = 1;
644
645 id = conn->tx_ident;
646
647 spin_unlock_bh(&conn->lock);
648
649 return id;
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void apply_fcs(struct sk_buff *skb)
653{
654 size_t len;
655 u16 partial_crc;
656 struct sk_buff *iter;
657 struct sk_buff *final_frag = skb;
658
659 if (skb_has_frag_list(skb))
660 len = skb_headlen(skb);
661 else
662 len = skb->len - L2CAP_FCS_SIZE;
663
664 partial_crc = crc16(0, (u8 *) skb->data, len);
665
666 skb_walk_frags(skb, iter) {
667 len = iter->len;
668 if (!iter->next)
669 len -= L2CAP_FCS_SIZE;
670
671 partial_crc = crc16(partial_crc, iter->data, len);
672 final_frag = iter;
673 }
674
675 put_unaligned_le16(partial_crc,
676 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
677}
678
679void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200680{
681 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200682 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200683
684 BT_DBG("code 0x%2.2x", code);
685
686 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300687 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200688
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200689 if (lmp_no_flush_capable(conn->hcon->hdev))
690 flags = ACL_START_NO_FLUSH;
691 else
692 flags = ACL_START;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697}
698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300702}
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300705{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 struct l2cap_conn_req req;
707 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
708 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
713 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300717{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 struct l2cap_create_chan_req req;
719 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
720 req.psm = l2cap_pi(sk)->psm;
721 req.amp_id = amp_id;
722
723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
725
726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100735 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
736 return;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
739 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
742 amp_create_physical(l2cap_pi(sk)->conn, sk);
743 else
744 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200745 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200746 } else {
747 struct l2cap_info_req req;
748 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
749
750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
751 conn->info_ident = l2cap_get_ident(conn);
752
753 mod_timer(&conn->info_timer, jiffies +
754 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
755
756 l2cap_send_cmd(conn, conn->info_ident,
757 L2CAP_INFO_REQ, sizeof(req), &req);
758 }
759}
760
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300761static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
762{
763 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300764 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300765 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
766
767 switch (mode) {
768 case L2CAP_MODE_ERTM:
769 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
770 case L2CAP_MODE_STREAMING:
771 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
772 default:
773 return 0x00;
774 }
775}
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300778{
779 struct l2cap_disconn_req req;
780
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300781 if (!conn)
782 return;
783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
787 skb_queue_purge(SREJ_QUEUE(sk));
788
789 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
790 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300792 }
793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
795 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300796 l2cap_send_cmd(conn, l2cap_get_ident(conn),
797 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300800 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200804static void l2cap_conn_start(struct l2cap_conn *conn)
805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 struct l2cap_chan_list *l = &conn->chan_list;
807 struct sock_del_list del, *tmp1, *tmp2;
808 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200809
810 BT_DBG("conn %p", conn);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817 bh_lock_sock(sk);
818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 if (sk->sk_type != SOCK_SEQPACKET &&
820 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200821 bh_unlock_sock(sk);
822 continue;
823 }
824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 if (sk->sk_state == BT_CONNECT) {
826 if (!l2cap_check_security(sk) ||
827 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300828 bh_unlock_sock(sk);
829 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200830 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
833 conn->feat_mask)
834 && l2cap_pi(sk)->conf_state &
835 L2CAP_CONF_STATE2_DEVICE) {
836 tmp1 = kzalloc(sizeof(struct sock_del_list),
837 GFP_ATOMIC);
838 tmp1->sk = sk;
839 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300840 bh_unlock_sock(sk);
841 continue;
842 }
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
847 amp_create_physical(l2cap_pi(sk)->conn, sk);
848 else
849 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200852 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300853 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
855 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100858 if (bt_sk(sk)->defer_setup) {
859 struct sock *parent = bt_sk(sk)->parent;
860 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
861 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700862 if (parent)
863 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100864
865 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
868 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
869 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200870 } else {
871 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
872 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
873 }
874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
876 l2cap_pi(sk)->amp_id) {
877 amp_accept_physical(conn,
878 l2cap_pi(sk)->amp_id, sk);
879 bh_unlock_sock(sk);
880 continue;
881 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
884 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
885
886 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300887 rsp.result != L2CAP_CR_SUCCESS) {
888 bh_unlock_sock(sk);
889 continue;
890 }
891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300893 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 l2cap_build_conf_req(sk, buf), buf);
895 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200896 }
897
898 bh_unlock_sock(sk);
899 }
900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 read_unlock(&l->lock);
902
903 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
904 bh_lock_sock(tmp1->sk);
905 __l2cap_sock_close(tmp1->sk, ECONNRESET);
906 bh_unlock_sock(tmp1->sk);
907 list_del(&tmp1->list);
908 kfree(tmp1);
909 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200910}
911
Ville Tervob62f3282011-02-10 22:38:50 -0300912/* Find socket with cid and source bdaddr.
913 * Returns closest match, locked.
914 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300916{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 struct sock *sk = NULL, *sk1 = NULL;
918 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 sk_for_each(sk, node, &l2cap_sk_list.head) {
923 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300924 continue;
925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300927 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 if (!bacmp(&bt_sk(sk)->src, src))
929 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300930
931 /* Closest match */
932 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300934 }
935 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300940}
941
942static void l2cap_le_conn_ready(struct l2cap_conn *conn)
943{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 struct l2cap_chan_list *list = &conn->chan_list;
945 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300946
947 BT_DBG("");
948
949 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300951 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300953 return;
954
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300955 bh_lock_sock(parent);
956
Ville Tervob62f3282011-02-10 22:38:50 -0300957 /* Check for backlog size */
958 if (sk_acceptq_is_full(parent)) {
959 BT_DBG("backlog full %d", parent->sk_ack_backlog);
960 goto clean;
961 }
962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
964 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300965 goto clean;
966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300968
969 hci_conn_hold(conn->hcon);
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -0300972 bacpy(&bt_sk(sk)->src, conn->src);
973 bacpy(&bt_sk(sk)->dst, conn->dst);
974
Gustavo F. Padovand1010242011-03-25 00:39:48 -0300975 bt_accept_enqueue(parent, sk);
976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -0300982 parent->sk_data_ready(parent, 0);
983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300985
986clean:
987 bh_unlock_sock(parent);
988}
989
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200990static void l2cap_conn_ready(struct l2cap_conn *conn)
991{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 struct l2cap_chan_list *l = &conn->chan_list;
993 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200994
995 BT_DBG("conn %p", conn);
996
Ville Tervob62f3282011-02-10 22:38:50 -0300997 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
998 l2cap_le_conn_ready(conn);
999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001001
Brian Gixa68668b2011-08-11 15:49:36 -07001002 if (l->head) {
1003 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1004 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001005
Brian Gixa68668b2011-08-11 15:49:36 -07001006 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001007 u8 sec_level = l2cap_pi(sk)->sec_level;
1008 u8 pending_sec = conn->hcon->pending_sec_level;
1009
1010 if (pending_sec > sec_level)
1011 sec_level = pending_sec;
1012
1013 if (smp_conn_security(conn, sec_level))
Brian Gixa68668b2011-08-11 15:49:36 -07001014 l2cap_chan_ready(sk);
Ville Tervoacd7d372011-02-10 22:38:49 -03001015
Brian Gixa68668b2011-08-11 15:49:36 -07001016 } else if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1020 sk->sk_state_change(sk);
1021 } else if (sk->sk_state == BT_CONNECT)
1022 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001023
Brian Gixa68668b2011-08-11 15:49:36 -07001024 bh_unlock_sock(sk);
1025 }
1026 } else if (conn->hcon->type == LE_LINK) {
1027 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001028 }
1029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001030 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001031}
1032
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001033/* Notify sockets that we cannot guaranty reliability anymore */
1034static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1035{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 struct l2cap_chan_list *l = &conn->chan_list;
1037 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001038
1039 BT_DBG("conn %p", conn);
1040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001041 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1044 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001045 sk->sk_err = err;
1046 }
1047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001048 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001049}
1050
1051static void l2cap_info_timeout(unsigned long arg)
1052{
1053 struct l2cap_conn *conn = (void *) arg;
1054
Marcel Holtmann984947d2009-02-06 23:35:19 +01001055 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001056 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001057
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001058 l2cap_conn_start(conn);
1059}
1060
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001061static void security_timeout(unsigned long arg)
1062{
1063 struct l2cap_conn *conn = (void *) arg;
1064
1065 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1066}
1067
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1069{
Marcel Holtmann01394182006-07-03 10:02:46 +02001070 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071
Marcel Holtmann01394182006-07-03 10:02:46 +02001072 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073 return conn;
1074
Marcel Holtmann01394182006-07-03 10:02:46 +02001075 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1076 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078
1079 hcon->l2cap_data = conn;
1080 conn->hcon = hcon;
1081
Marcel Holtmann01394182006-07-03 10:02:46 +02001082 BT_DBG("hcon %p conn %p", hcon, conn);
1083
Ville Tervoacd7d372011-02-10 22:38:49 -03001084 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1085 conn->mtu = hcon->hdev->le_mtu;
1086 else
1087 conn->mtu = hcon->hdev->acl_mtu;
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 conn->src = &hcon->hdev->bdaddr;
1090 conn->dst = &hcon->dst;
1091
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001092 conn->feat_mask = 0;
1093
Linus Torvalds1da177e2005-04-16 15:20:36 -07001094 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001095 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001097 if (hcon->type == LE_LINK)
1098 setup_timer(&conn->security_timer, security_timeout,
1099 (unsigned long) conn);
1100 else
Ville Tervob62f3282011-02-10 22:38:50 -03001101 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001102 (unsigned long) conn);
1103
Marcel Holtmann2950f212009-02-12 14:02:50 +01001104 conn->disc_reason = 0x13;
1105
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106 return conn;
1107}
1108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001109static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001110{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001111 struct l2cap_conn *conn = hcon->l2cap_data;
1112 struct sock *sk;
1113 struct sock *next;
1114
1115 if (!conn)
1116 return;
1117
1118 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1119
1120 if ((conn->hcon == hcon) && (conn->rx_skb))
1121 kfree_skb(conn->rx_skb);
1122
1123 BT_DBG("conn->hcon %p", conn->hcon);
1124
1125 /* Kill channels */
1126 for (sk = conn->chan_list.head; sk; ) {
1127 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1128 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1129 next = l2cap_pi(sk)->next_c;
1130 bh_lock_sock(sk);
1131 l2cap_chan_del(sk, err);
1132 bh_unlock_sock(sk);
1133 l2cap_sock_kill(sk);
1134 sk = next;
1135 } else
1136 sk = l2cap_pi(sk)->next_c;
1137 }
1138
1139 if (conn->hcon == hcon) {
1140 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1141 del_timer_sync(&conn->info_timer);
1142
1143 hcon->l2cap_data = NULL;
1144
1145 kfree(conn);
1146 }
1147}
1148
1149static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1150{
1151 struct l2cap_chan_list *l = &conn->chan_list;
1152 write_lock_bh(&l->lock);
1153 __l2cap_chan_add(conn, sk);
1154 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001155}
1156
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
1159/* Find socket with psm and source bdaddr.
1160 * Returns closest match.
1161 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 struct sock *sk = NULL, *sk1 = NULL;
1165 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001167 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001168
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001169 sk_for_each(sk, node, &l2cap_sk_list.head) {
1170 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 continue;
1172
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001173 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 if (!bacmp(&bt_sk(sk)->src, src))
1176 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177
1178 /* Closest match */
1179 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001180 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001181 }
1182 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001185
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001186 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187}
1188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001189int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001190{
1191 bdaddr_t *src = &bt_sk(sk)->src;
1192 bdaddr_t *dst = &bt_sk(sk)->dst;
1193 struct l2cap_conn *conn;
1194 struct hci_conn *hcon;
1195 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001196 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001197 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001199 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001201
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001202 hdev = hci_get_route(dst, src);
1203 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204 return -EHOSTUNREACH;
1205
1206 hci_dev_lock_bh(hdev);
1207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001210 if (l2cap_pi(sk)->fixed_channel) {
1211 /* Fixed channels piggyback on existing ACL connections */
1212 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1213 if (!hcon || !hcon->l2cap_data) {
1214 err = -ENOTCONN;
1215 goto done;
1216 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001218 conn = hcon->l2cap_data;
1219 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001220 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001221 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001222 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001223 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001224 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001225 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001227 if (IS_ERR(hcon)) {
1228 err = PTR_ERR(hcon);
1229 goto done;
1230 }
1231
1232 conn = l2cap_conn_add(hcon, 0);
1233 if (!conn) {
1234 hci_conn_put(hcon);
1235 err = -ENOMEM;
1236 goto done;
1237 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001238 }
1239
Linus Torvalds1da177e2005-04-16 15:20:36 -07001240 /* Update source addr of the socket */
1241 bacpy(src, conn->src);
1242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001244
Brian Gixa68668b2011-08-11 15:49:36 -07001245 if ((l2cap_pi(sk)->fixed_channel) ||
1246 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1247 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001248 sk->sk_state = BT_CONNECTED;
1249 sk->sk_state_change(sk);
1250 } else {
1251 sk->sk_state = BT_CONNECT;
1252 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1253 sk->sk_state_change(sk);
1254
1255 if (hcon->state == BT_CONNECTED) {
1256 if (sk->sk_type != SOCK_SEQPACKET &&
1257 sk->sk_type != SOCK_STREAM) {
1258 l2cap_sock_clear_timer(sk);
1259 if (l2cap_check_security(sk)) {
1260 sk->sk_state = BT_CONNECTED;
1261 sk->sk_state_change(sk);
1262 }
1263 } else
1264 l2cap_do_start(sk);
1265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001266 }
1267
Ville Tervo30e76272011-02-22 16:10:53 -03001268 err = 0;
1269
Linus Torvalds1da177e2005-04-16 15:20:36 -07001270done:
1271 hci_dev_unlock_bh(hdev);
1272 hci_dev_put(hdev);
1273 return err;
1274}
1275
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001276int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001277{
1278 DECLARE_WAITQUEUE(wait, current);
1279 int err = 0;
1280 int timeo = HZ/5;
1281
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001282 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001283 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1284 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1285 set_current_state(TASK_INTERRUPTIBLE);
1286
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001287 if (!timeo)
1288 timeo = HZ/5;
1289
1290 if (signal_pending(current)) {
1291 err = sock_intr_errno(timeo);
1292 break;
1293 }
1294
1295 release_sock(sk);
1296 timeo = schedule_timeout(timeo);
1297 lock_sock(sk);
1298
1299 err = sock_error(sk);
1300 if (err)
1301 break;
1302 }
1303 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001304 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001305 return err;
1306}
1307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001308static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001309{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 struct l2cap_pinfo *pi =
1311 container_of(work, struct l2cap_pinfo, tx_work);
1312 struct sock *sk = (struct sock *)pi;
1313 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 lock_sock(sk);
1316 l2cap_ertm_send(sk);
1317 release_sock(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001318}
1319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001321{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 struct sock *sk = skb->sk;
1323 int queued;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1326 if (queued < L2CAP_MIN_ERTM_QUEUED)
1327 queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001328}
1329
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001330void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001331{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001332 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001333
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001334 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001335
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001336 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1337 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1338 BT_DBG("Sending on AMP connection %p %p",
1339 pi->ampcon, pi->ampchan);
1340 if (pi->ampchan)
1341 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1342 ACL_COMPLETE);
1343 else
1344 kfree_skb(skb);
1345 } else {
1346 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001347
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001348 bt_cb(skb)->force_active = pi->force_active;
1349 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1352 !l2cap_pi(sk)->flushable)
1353 flags = ACL_START_NO_FLUSH;
1354 else
1355 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001356
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001357 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001358 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001359}
1360
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001361int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001362{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001363 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001364 struct l2cap_pinfo *pi = l2cap_pi(sk);
1365 struct bt_l2cap_control *control;
1366 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001369
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001370 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001371 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001372
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001373 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1374 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001375
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001376 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1377 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1378 return 0;
1379
1380 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1381 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1382 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1383
1384 skb = sk->sk_send_head;
1385
1386 bt_cb(skb)->retries = 1;
1387 control = &bt_cb(skb)->control;
1388
1389 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1390 control->final = 1;
1391 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1392 }
1393 control->reqseq = pi->buffer_seq;
1394 pi->last_acked_seq = pi->buffer_seq;
1395 control->txseq = pi->next_tx_seq;
1396
1397 if (pi->extended_control) {
1398 put_unaligned_le32(__pack_extended_control(control),
1399 skb->data + L2CAP_HDR_SIZE);
1400 } else {
1401 put_unaligned_le16(__pack_enhanced_control(control),
1402 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001403 }
1404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001405 if (pi->fcs == L2CAP_FCS_CRC16)
1406 apply_fcs(skb);
1407
1408 /* Clone after data has been modified. Data is assumed to be
1409 read-only (for locking purposes) on cloned sk_buffs.
1410 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001411 tx_skb = skb_clone(skb, GFP_ATOMIC);
1412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 tx_skb->sk = sk;
1414 tx_skb->destructor = l2cap_skb_destructor;
1415 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001417 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001418
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001419 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001421 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1424 pi->unacked_frames += 1;
1425 pi->frames_sent += 1;
1426 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001428 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1429 sk->sk_send_head = NULL;
1430 else
1431 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1432 }
1433
1434 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1435 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1436 atomic_read(&pi->ertm_queued));
1437
1438 return sent;
1439}
1440
1441int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1442{
1443 struct sk_buff *skb;
1444 struct l2cap_pinfo *pi = l2cap_pi(sk);
1445 struct bt_l2cap_control *control;
1446 int sent = 0;
1447
1448 BT_DBG("sk %p, skbs %p", sk, skbs);
1449
1450 if (sk->sk_state != BT_CONNECTED)
1451 return -ENOTCONN;
1452
1453 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1454 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1455 return 0;
1456
1457 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1458
1459 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1460 while (!skb_queue_empty(TX_QUEUE(sk))) {
1461
1462 skb = skb_dequeue(TX_QUEUE(sk));
1463
1464 BT_DBG("skb %p", skb);
1465
1466 bt_cb(skb)->retries = 1;
1467 control = &bt_cb(skb)->control;
1468
1469 BT_DBG("control %p", control);
1470
1471 control->reqseq = 0;
1472 control->txseq = pi->next_tx_seq;
1473
1474 if (pi->extended_control) {
1475 put_unaligned_le32(__pack_extended_control(control),
1476 skb->data + L2CAP_HDR_SIZE);
1477 } else {
1478 put_unaligned_le16(__pack_enhanced_control(control),
1479 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001480 }
1481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 if (pi->fcs == L2CAP_FCS_CRC16)
1483 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001484
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001485 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001487 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1490 pi->frames_sent += 1;
1491 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001492 }
1493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494 BT_DBG("Sent %d", sent);
1495
1496 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001497}
1498
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001499static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001500{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001501 while (len > 0) {
1502 if (iv->iov_len) {
1503 int copy = min_t(unsigned int, len, iv->iov_len);
1504 memcpy(kdata, iv->iov_base, copy);
1505 len -= copy;
1506 kdata += copy;
1507 iv->iov_base += copy;
1508 iv->iov_len -= copy;
1509 }
1510 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001511 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001513 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001514}
1515
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001516static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1517 int len, int count, struct sk_buff *skb,
1518 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001519{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001521 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001522 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001523 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001525 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1526 msg, (int)len, (int)count, skb);
1527
1528 if (!conn)
1529 return -ENOTCONN;
1530
1531 /* When resegmenting, data is copied from kernel space */
1532 if (reseg) {
1533 err = memcpy_fromkvec(skb_put(skb, count),
1534 (struct kvec *) msg->msg_iov, count);
1535 } else {
1536 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1537 count);
1538 }
1539
1540 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001541 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001542
1543 sent += count;
1544 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546
1547 /* Continuation fragments (no L2CAP header) */
1548 frag = &skb_shinfo(skb)->frag_list;
1549 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001550 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551 count = min_t(unsigned int, conn->mtu, len);
1552
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001553 /* Add room for the FCS if it fits */
1554 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1555 len + L2CAP_FCS_SIZE <= conn->mtu)
1556 skblen = count + L2CAP_FCS_SIZE;
1557 else
1558 skblen = count;
1559
1560 /* Don't use bt_skb_send_alloc() while resegmenting, since
1561 * it is not ok to block.
1562 */
1563 if (reseg) {
1564 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1565 if (*frag)
1566 skb_set_owner_w(*frag, sk);
1567 } else {
1568 *frag = bt_skb_send_alloc(sk, skblen,
1569 msg->msg_flags & MSG_DONTWAIT, &err);
1570 }
1571
Linus Torvalds1da177e2005-04-16 15:20:36 -07001572 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001573 return -EFAULT;
1574
1575 /* When resegmenting, data is copied from kernel space */
1576 if (reseg) {
1577 err = memcpy_fromkvec(skb_put(*frag, count),
1578 (struct kvec *) msg->msg_iov,
1579 count);
1580 } else {
1581 err = memcpy_fromiovec(skb_put(*frag, count),
1582 msg->msg_iov, count);
1583 }
1584
1585 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001586 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001587
1588 sent += count;
1589 len -= count;
1590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 final = *frag;
1592
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 frag = &(*frag)->next;
1594 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001596 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1597 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1598 if (reseg) {
1599 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1600 GFP_ATOMIC);
1601 if (*frag)
1602 skb_set_owner_w(*frag, sk);
1603 } else {
1604 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1605 msg->msg_flags & MSG_DONTWAIT,
1606 &err);
1607 }
1608
1609 if (!*frag)
1610 return -EFAULT;
1611
1612 final = *frag;
1613 }
1614
1615 skb_put(final, L2CAP_FCS_SIZE);
1616 }
1617
Linus Torvalds1da177e2005-04-16 15:20:36 -07001618 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001619}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001621struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001622{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001623 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001624 struct sk_buff *skb;
1625 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1626 struct l2cap_hdr *lh;
1627
1628 BT_DBG("sk %p len %d", sk, (int)len);
1629
1630 count = min_t(unsigned int, (conn->mtu - hlen), len);
1631 skb = bt_skb_send_alloc(sk, count + hlen,
1632 msg->msg_flags & MSG_DONTWAIT, &err);
1633 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001634 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001635
1636 /* Create L2CAP header */
1637 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001638 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001639 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001641
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001643 if (unlikely(err < 0)) {
1644 kfree_skb(skb);
1645 return ERR_PTR(err);
1646 }
1647 return skb;
1648}
1649
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001650struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001651{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001652 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001653 struct sk_buff *skb;
1654 int err, count, hlen = L2CAP_HDR_SIZE;
1655 struct l2cap_hdr *lh;
1656
1657 BT_DBG("sk %p len %d", sk, (int)len);
1658
1659 count = min_t(unsigned int, (conn->mtu - hlen), len);
1660 skb = bt_skb_send_alloc(sk, count + hlen,
1661 msg->msg_flags & MSG_DONTWAIT, &err);
1662 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001663 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001664
1665 /* Create L2CAP header */
1666 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001667 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001668 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1669
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001670 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001671 if (unlikely(err < 0)) {
1672 kfree_skb(skb);
1673 return ERR_PTR(err);
1674 }
1675 return skb;
1676}
1677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1679 struct msghdr *msg, size_t len,
1680 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001681{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001682 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 int err, count, hlen;
1684 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001685 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001687
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001688 if (l2cap_pi(sk)->extended_control)
1689 hlen = L2CAP_EXTENDED_HDR_SIZE;
1690 else
1691 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001692
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001693 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001696 if (fcs == L2CAP_FCS_CRC16)
1697 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001699 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1700 sk, msg, (int)len, (int)sdulen, hlen);
1701
1702 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1703
1704 /* Allocate extra headroom for Qualcomm PAL. This is only
1705 * necessary in two places (here and when creating sframes)
1706 * because only unfragmented iframes and sframes are sent
1707 * using AMP controllers.
1708 */
1709 if (l2cap_pi(sk)->ampcon &&
1710 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1711 reserve = BT_SKB_RESERVE_80211;
1712
1713 /* Don't use bt_skb_send_alloc() while resegmenting, since
1714 * it is not ok to block.
1715 */
1716 if (reseg) {
1717 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1718 if (skb)
1719 skb_set_owner_w(skb, sk);
1720 } else {
1721 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1722 msg->msg_flags & MSG_DONTWAIT, &err);
1723 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001724 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001725 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 if (reserve)
1728 skb_reserve(skb, reserve);
1729
1730 bt_cb(skb)->control.fcs = fcs;
1731
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001732 /* Create L2CAP header */
1733 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001734 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1735 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001737 /* Control header is populated later */
1738 if (l2cap_pi(sk)->extended_control)
1739 put_unaligned_le32(0, skb_put(skb, 4));
1740 else
1741 put_unaligned_le16(0, skb_put(skb, 2));
1742
1743 if (sdulen)
1744 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1745
1746 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001747 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001748 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001749 kfree_skb(skb);
1750 return ERR_PTR(err);
1751 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001752
1753 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001754 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001755}
1756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001757static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001758{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001759 struct l2cap_pinfo *pi;
1760 struct sk_buff *acked_skb;
1761 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001762
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001763 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001767 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1768 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001769
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001770 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1771 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1772
1773 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1774 ackseq = __next_seq(ackseq, pi)) {
1775
1776 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1777 if (acked_skb) {
1778 skb_unlink(acked_skb, TX_QUEUE(sk));
1779 kfree_skb(acked_skb);
1780 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001781 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001782 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001784 pi->expected_ack_seq = reqseq;
1785
1786 if (pi->unacked_frames == 0)
1787 l2cap_ertm_stop_retrans_timer(pi);
1788
1789 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001790}
1791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001792static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001793{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001794 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001795 int len;
1796 int reserve = 0;
1797 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 if (l2cap_pi(sk)->extended_control)
1800 len = L2CAP_EXTENDED_HDR_SIZE;
1801 else
1802 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001803
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001804 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1805 len += L2CAP_FCS_SIZE;
1806
1807 /* Allocate extra headroom for Qualcomm PAL */
1808 if (l2cap_pi(sk)->ampcon &&
1809 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1810 reserve = BT_SKB_RESERVE_80211;
1811
1812 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1813
1814 if (!skb)
1815 return ERR_PTR(-ENOMEM);
1816
1817 if (reserve)
1818 skb_reserve(skb, reserve);
1819
1820 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1821 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1822 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1823
1824 if (l2cap_pi(sk)->extended_control)
1825 put_unaligned_le32(control, skb_put(skb, 4));
1826 else
1827 put_unaligned_le16(control, skb_put(skb, 2));
1828
1829 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1830 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1831 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001832 }
1833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 return skb;
1835}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001836
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001837static void l2cap_ertm_send_sframe(struct sock *sk,
1838 struct bt_l2cap_control *control)
1839{
1840 struct l2cap_pinfo *pi;
1841 struct sk_buff *skb;
1842 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 if (control->frame_type != 's')
1847 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001848
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001849 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001851 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1852 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1853 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1854 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1855 return;
1856 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001857
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001858 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1859 control->final = 1;
1860 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1861 }
1862
1863 if (control->super == L2CAP_SFRAME_RR)
1864 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1865 else if (control->super == L2CAP_SFRAME_RNR)
1866 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1867
1868 if (control->super != L2CAP_SFRAME_SREJ) {
1869 pi->last_acked_seq = control->reqseq;
1870 l2cap_ertm_stop_ack_timer(pi);
1871 }
1872
1873 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1874 (int) control->final, (int) control->poll,
1875 (int) control->super);
1876
1877 if (pi->extended_control)
1878 control_field = __pack_extended_control(control);
1879 else
1880 control_field = __pack_enhanced_control(control);
1881
1882 skb = l2cap_create_sframe_pdu(sk, control_field);
1883 if (!IS_ERR(skb))
1884 l2cap_do_send(sk, skb);
1885}
1886
1887static void l2cap_ertm_send_ack(struct sock *sk)
1888{
1889 struct l2cap_pinfo *pi = l2cap_pi(sk);
1890 struct bt_l2cap_control control;
1891 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1892 int threshold;
1893
1894 BT_DBG("sk %p", sk);
1895 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1896 (int)pi->buffer_seq);
1897
1898 memset(&control, 0, sizeof(control));
1899 control.frame_type = 's';
1900
1901 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1902 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1903 l2cap_ertm_stop_ack_timer(pi);
1904 control.super = L2CAP_SFRAME_RNR;
1905 control.reqseq = pi->buffer_seq;
1906 l2cap_ertm_send_sframe(sk, &control);
1907 } else {
1908 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1909 l2cap_ertm_send(sk);
1910 /* If any i-frames were sent, they included an ack */
1911 if (pi->buffer_seq == pi->last_acked_seq)
1912 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001913 }
1914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001915 /* Ack now if the tx window is 3/4ths full.
1916 * Calculate without mul or div
1917 */
1918 threshold = pi->tx_win;
1919 threshold += threshold << 1;
1920 threshold >>= 2;
1921
1922 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1923 threshold);
1924
1925 if (frames_to_ack >= threshold) {
1926 l2cap_ertm_stop_ack_timer(pi);
1927 control.super = L2CAP_SFRAME_RR;
1928 control.reqseq = pi->buffer_seq;
1929 l2cap_ertm_send_sframe(sk, &control);
1930 frames_to_ack = 0;
1931 }
1932
1933 if (frames_to_ack)
1934 l2cap_ertm_start_ack_timer(pi);
1935 }
1936}
1937
1938static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1939{
1940 struct l2cap_pinfo *pi;
1941 struct bt_l2cap_control control;
1942
1943 BT_DBG("sk %p, poll %d", sk, (int) poll);
1944
1945 pi = l2cap_pi(sk);
1946
1947 memset(&control, 0, sizeof(control));
1948 control.frame_type = 's';
1949 control.poll = poll;
1950
1951 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1952 control.super = L2CAP_SFRAME_RNR;
1953 else
1954 control.super = L2CAP_SFRAME_RR;
1955
1956 control.reqseq = pi->buffer_seq;
1957 l2cap_ertm_send_sframe(sk, &control);
1958}
1959
1960static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1961{
1962 struct l2cap_pinfo *pi;
1963 struct bt_l2cap_control control;
1964
1965 BT_DBG("sk %p", sk);
1966
1967 pi = l2cap_pi(sk);
1968
1969 memset(&control, 0, sizeof(control));
1970 control.frame_type = 's';
1971 control.final = 1;
1972 control.reqseq = pi->buffer_seq;
1973 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
1974
1975 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1976 control.super = L2CAP_SFRAME_RNR;
1977 l2cap_ertm_send_sframe(sk, &control);
1978 }
1979
1980 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1981 (pi->unacked_frames > 0))
1982 l2cap_ertm_start_retrans_timer(pi);
1983
1984 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
1985
1986 /* Send pending iframes */
1987 l2cap_ertm_send(sk);
1988
1989 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1990 /* F-bit wasn't sent in an s-frame or i-frame yet, so
1991 * send it now.
1992 */
1993 control.super = L2CAP_SFRAME_RR;
1994 l2cap_ertm_send_sframe(sk, &control);
1995 }
1996}
1997
1998static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
1999{
2000 struct bt_l2cap_control control;
2001 struct l2cap_pinfo *pi;
2002 u16 seq;
2003
2004 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2005
2006 pi = l2cap_pi(sk);
2007 memset(&control, 0, sizeof(control));
2008 control.frame_type = 's';
2009 control.super = L2CAP_SFRAME_SREJ;
2010
2011 for (seq = pi->expected_tx_seq; seq != txseq;
2012 seq = __next_seq(seq, pi)) {
2013 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2014 control.reqseq = seq;
2015 l2cap_ertm_send_sframe(sk, &control);
2016 l2cap_seq_list_append(&pi->srej_list, seq);
2017 }
2018 }
2019
2020 pi->expected_tx_seq = __next_seq(txseq, pi);
2021}
2022
2023static void l2cap_ertm_send_srej_tail(struct sock *sk)
2024{
2025 struct bt_l2cap_control control;
2026 struct l2cap_pinfo *pi;
2027
2028 BT_DBG("sk %p", sk);
2029
2030 pi = l2cap_pi(sk);
2031
2032 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2033 return;
2034
2035 memset(&control, 0, sizeof(control));
2036 control.frame_type = 's';
2037 control.super = L2CAP_SFRAME_SREJ;
2038 control.reqseq = pi->srej_list.tail;
2039 l2cap_ertm_send_sframe(sk, &control);
2040}
2041
2042static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2043{
2044 struct bt_l2cap_control control;
2045 struct l2cap_pinfo *pi;
2046 u16 initial_head;
2047 u16 seq;
2048
2049 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2050
2051 pi = l2cap_pi(sk);
2052 memset(&control, 0, sizeof(control));
2053 control.frame_type = 's';
2054 control.super = L2CAP_SFRAME_SREJ;
2055
2056 /* Capture initial list head to allow only one pass through the list. */
2057 initial_head = pi->srej_list.head;
2058
2059 do {
2060 seq = l2cap_seq_list_pop(&pi->srej_list);
2061 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2062 break;
2063
2064 control.reqseq = seq;
2065 l2cap_ertm_send_sframe(sk, &control);
2066 l2cap_seq_list_append(&pi->srej_list, seq);
2067 } while (pi->srej_list.head != initial_head);
2068}
2069
2070static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2071{
2072 struct l2cap_pinfo *pi = l2cap_pi(sk);
2073 BT_DBG("sk %p", sk);
2074
2075 pi->expected_tx_seq = pi->buffer_seq;
2076 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2077 skb_queue_purge(SREJ_QUEUE(sk));
2078 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2079}
2080
2081static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2082 struct bt_l2cap_control *control,
2083 struct sk_buff_head *skbs, u8 event)
2084{
2085 struct l2cap_pinfo *pi;
2086 int err = 0;
2087
2088 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2089 (int)event);
2090 pi = l2cap_pi(sk);
2091
2092 switch (event) {
2093 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2094 if (sk->sk_send_head == NULL)
2095 sk->sk_send_head = skb_peek(skbs);
2096
2097 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2098 l2cap_ertm_send(sk);
2099 break;
2100 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2101 BT_DBG("Enter LOCAL_BUSY");
2102 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2103
2104 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2105 /* The SREJ_SENT state must be aborted if we are to
2106 * enter the LOCAL_BUSY state.
2107 */
2108 l2cap_ertm_abort_rx_srej_sent(sk);
2109 }
2110
2111 l2cap_ertm_send_ack(sk);
2112
2113 break;
2114 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2115 BT_DBG("Exit LOCAL_BUSY");
2116 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2117
2118 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2119 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2120 pi->amp_move_state =
2121 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2122 l2cap_send_move_chan_cfm(pi->conn, pi,
2123 pi->scid,
2124 L2CAP_MOVE_CHAN_CONFIRMED);
2125 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2126 } else if (pi->amp_move_role ==
2127 L2CAP_AMP_MOVE_RESPONDER) {
2128 pi->amp_move_state =
2129 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2130 l2cap_send_move_chan_rsp(pi->conn,
2131 pi->amp_move_cmd_ident,
2132 pi->dcid,
2133 L2CAP_MOVE_CHAN_SUCCESS);
2134 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002135 break;
2136 }
2137
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002138 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2139 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2140 struct bt_l2cap_control local_control;
2141
2142 memset(&local_control, 0, sizeof(local_control));
2143 local_control.frame_type = 's';
2144 local_control.super = L2CAP_SFRAME_RR;
2145 local_control.poll = 1;
2146 local_control.reqseq = pi->buffer_seq;
2147 l2cap_ertm_send_sframe(sk, &local_control);
2148
2149 pi->retry_count = 1;
2150 l2cap_ertm_start_monitor_timer(pi);
2151 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002152 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002153 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002154 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2155 l2cap_ertm_process_reqseq(sk, control->reqseq);
2156 break;
2157 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2158 l2cap_ertm_send_rr_or_rnr(sk, 1);
2159 pi->retry_count = 1;
2160 l2cap_ertm_start_monitor_timer(pi);
2161 l2cap_ertm_stop_ack_timer(pi);
2162 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2163 break;
2164 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2165 l2cap_ertm_send_rr_or_rnr(sk, 1);
2166 pi->retry_count = 1;
2167 l2cap_ertm_start_monitor_timer(pi);
2168 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2169 break;
2170 case L2CAP_ERTM_EVENT_RECV_FBIT:
2171 /* Nothing to process */
2172 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002173 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002174 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002175 }
2176
2177 return err;
2178}
2179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002180static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2181 struct bt_l2cap_control *control,
2182 struct sk_buff_head *skbs, u8 event)
2183{
2184 struct l2cap_pinfo *pi;
2185 int err = 0;
2186
2187 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2188 (int)event);
2189 pi = l2cap_pi(sk);
2190
2191 switch (event) {
2192 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2193 if (sk->sk_send_head == NULL)
2194 sk->sk_send_head = skb_peek(skbs);
2195 /* Queue data, but don't send. */
2196 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2197 break;
2198 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2199 BT_DBG("Enter LOCAL_BUSY");
2200 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2201
2202 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2203 /* The SREJ_SENT state must be aborted if we are to
2204 * enter the LOCAL_BUSY state.
2205 */
2206 l2cap_ertm_abort_rx_srej_sent(sk);
2207 }
2208
2209 l2cap_ertm_send_ack(sk);
2210
2211 break;
2212 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2213 BT_DBG("Exit LOCAL_BUSY");
2214 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2215
2216 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2217 struct bt_l2cap_control local_control;
2218 memset(&local_control, 0, sizeof(local_control));
2219 local_control.frame_type = 's';
2220 local_control.super = L2CAP_SFRAME_RR;
2221 local_control.poll = 1;
2222 local_control.reqseq = pi->buffer_seq;
2223 l2cap_ertm_send_sframe(sk, &local_control);
2224
2225 pi->retry_count = 1;
2226 l2cap_ertm_start_monitor_timer(pi);
2227 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2228 }
2229 break;
2230 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2231 l2cap_ertm_process_reqseq(sk, control->reqseq);
2232
2233 /* Fall through */
2234
2235 case L2CAP_ERTM_EVENT_RECV_FBIT:
2236 if (control && control->final) {
2237 l2cap_ertm_stop_monitor_timer(pi);
2238 if (pi->unacked_frames > 0)
2239 l2cap_ertm_start_retrans_timer(pi);
2240 pi->retry_count = 0;
2241 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2242 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2243 }
2244 break;
2245 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2246 /* Ignore */
2247 break;
2248 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2249 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2250 l2cap_ertm_send_rr_or_rnr(sk, 1);
2251 l2cap_ertm_start_monitor_timer(pi);
2252 pi->retry_count += 1;
2253 } else
2254 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2255 break;
2256 default:
2257 break;
2258 }
2259
2260 return err;
2261}
2262
2263int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2264 struct sk_buff_head *skbs, u8 event)
2265{
2266 struct l2cap_pinfo *pi;
2267 int err = 0;
2268
2269 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2270 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2271
2272 pi = l2cap_pi(sk);
2273
2274 switch (pi->tx_state) {
2275 case L2CAP_ERTM_TX_STATE_XMIT:
2276 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2277 break;
2278 case L2CAP_ERTM_TX_STATE_WAIT_F:
2279 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2280 break;
2281 default:
2282 /* Ignore event */
2283 break;
2284 }
2285
2286 return err;
2287}
2288
2289int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2290 struct msghdr *msg, size_t len, int reseg)
2291{
2292 struct sk_buff *skb;
2293 u16 sdu_len;
2294 size_t pdu_len;
2295 int err = 0;
2296 u8 sar;
2297
2298 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2299
2300 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2301 * so fragmented skbs are not used. The HCI layer's handling
2302 * of fragmented skbs is not compatible with ERTM's queueing.
2303 */
2304
2305 /* PDU size is derived from the HCI MTU */
2306 pdu_len = l2cap_pi(sk)->conn->mtu;
2307
2308 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2309 if (!l2cap_pi(sk)->ampcon)
2310 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2311
2312 /* Adjust for largest possible L2CAP overhead. */
2313 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2314
2315 /* Remote device may have requested smaller PDUs */
2316 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2317
2318 if (len <= pdu_len) {
2319 sar = L2CAP_SAR_UNSEGMENTED;
2320 sdu_len = 0;
2321 pdu_len = len;
2322 } else {
2323 sar = L2CAP_SAR_START;
2324 sdu_len = len;
2325 pdu_len -= L2CAP_SDULEN_SIZE;
2326 }
2327
2328 while (len) {
2329 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2330
2331 BT_DBG("iframe skb %p", skb);
2332
2333 if (IS_ERR(skb)) {
2334 __skb_queue_purge(seg_queue);
2335 return PTR_ERR(skb);
2336 }
2337
2338 bt_cb(skb)->control.sar = sar;
2339 __skb_queue_tail(seg_queue, skb);
2340
2341 len -= pdu_len;
2342 if (sdu_len) {
2343 sdu_len = 0;
2344 pdu_len += L2CAP_SDULEN_SIZE;
2345 }
2346
2347 if (len <= pdu_len) {
2348 sar = L2CAP_SAR_END;
2349 pdu_len = len;
2350 } else {
2351 sar = L2CAP_SAR_CONTINUE;
2352 }
2353 }
2354
2355 return err;
2356}
2357
2358static inline int is_initial_frame(u8 sar)
2359{
2360 return (sar == L2CAP_SAR_UNSEGMENTED ||
2361 sar == L2CAP_SAR_START);
2362}
2363
2364static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2365 size_t veclen)
2366{
2367 struct sk_buff *frag_iter;
2368
2369 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2370
2371 if (iv->iov_len + skb->len > veclen)
2372 return -ENOMEM;
2373
2374 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2375 iv->iov_len += skb->len;
2376
2377 skb_walk_frags(skb, frag_iter) {
2378 if (iv->iov_len + skb->len > veclen)
2379 return -ENOMEM;
2380
2381 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2382 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2383 frag_iter->len);
2384 iv->iov_len += frag_iter->len;
2385 }
2386
2387 return 0;
2388}
2389
2390int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2391{
2392 void *buf;
2393 int buflen;
2394 int err = 0;
2395 struct sk_buff *skb;
2396 struct msghdr msg;
2397 struct kvec iv;
2398 struct sk_buff_head old_frames;
2399 struct l2cap_pinfo *pi = l2cap_pi(sk);
2400
2401 BT_DBG("sk %p", sk);
2402
2403 if (skb_queue_empty(queue))
2404 return 0;
2405
2406 memset(&msg, 0, sizeof(msg));
2407 msg.msg_iov = (struct iovec *) &iv;
2408
2409 buflen = pi->omtu + L2CAP_FCS_SIZE;
2410 buf = kzalloc(buflen, GFP_TEMPORARY);
2411
2412 if (!buf) {
2413 BT_DBG("Could not allocate resegmentation buffer");
2414 return -ENOMEM;
2415 }
2416
2417 /* Move current frames off the original queue */
2418 __skb_queue_head_init(&old_frames);
2419 skb_queue_splice_tail_init(queue, &old_frames);
2420
2421 while (!skb_queue_empty(&old_frames)) {
2422 struct sk_buff_head current_sdu;
2423 u8 original_sar;
2424
2425 /* Reassemble each SDU from one or more PDUs */
2426
2427 iv.iov_base = buf;
2428 iv.iov_len = 0;
2429
2430 skb = skb_peek(&old_frames);
2431 original_sar = bt_cb(skb)->control.sar;
2432
2433 __skb_unlink(skb, &old_frames);
2434
2435 /* Append data to SDU */
2436 if (pi->extended_control)
2437 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2438 else
2439 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2440
2441 if (original_sar == L2CAP_SAR_START)
2442 skb_pull(skb, L2CAP_SDULEN_SIZE);
2443
2444 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2445
2446 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2447 iv.iov_len -= L2CAP_FCS_SIZE;
2448
2449 /* Free skb */
2450 kfree_skb(skb);
2451
2452 if (err)
2453 break;
2454
2455 while (!skb_queue_empty(&old_frames) && !err) {
2456 /* Check next frame */
2457 skb = skb_peek(&old_frames);
2458
2459 if (is_initial_frame(bt_cb(skb)->control.sar))
2460 break;
2461
2462 __skb_unlink(skb, &old_frames);
2463
2464 /* Append data to SDU */
2465 if (pi->extended_control)
2466 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2467 else
2468 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2469
2470 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2471 skb_pull(skb, L2CAP_SDULEN_SIZE);
2472
2473 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2474
2475 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2476 iv.iov_len -= L2CAP_FCS_SIZE;
2477
2478 /* Free skb */
2479 kfree_skb(skb);
2480 }
2481
2482 if (err)
2483 break;
2484
2485 /* Segment data */
2486
2487 __skb_queue_head_init(&current_sdu);
2488
2489 /* skbs for the SDU were just freed, but the
2490 * resegmenting process could produce more, smaller
2491 * skbs due to smaller PDUs and reduced HCI MTU. The
2492 * overhead from the sk_buff structs could put us over
2493 * the sk_sndbuf limit.
2494 *
2495 * Since this code is running in response to a
2496 * received poll/final packet, it cannot block.
2497 * Therefore, memory allocation needs to be allowed by
2498 * falling back to bt_skb_alloc() (with
2499 * skb_set_owner_w() to maintain sk_wmem_alloc
2500 * correctly).
2501 */
2502 msg.msg_iovlen = iv.iov_len;
2503 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2504 msg.msg_iovlen, 1);
2505
2506 if (err || skb_queue_empty(&current_sdu)) {
2507 BT_DBG("Error %d resegmenting data for socket %p",
2508 err, sk);
2509 __skb_queue_purge(&current_sdu);
2510 break;
2511 }
2512
2513 /* Fix up first PDU SAR bits */
2514 if (!is_initial_frame(original_sar)) {
2515 BT_DBG("Changing SAR bits, %d PDUs",
2516 skb_queue_len(&current_sdu));
2517 skb = skb_peek(&current_sdu);
2518
2519 if (skb_queue_len(&current_sdu) == 1) {
2520 /* Change SAR from 'unsegmented' to 'end' */
2521 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2522 } else {
2523 struct l2cap_hdr *lh;
2524 size_t hdrlen;
2525
2526 /* Change SAR from 'start' to 'continue' */
2527 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2528
2529 /* Start frames contain 2 bytes for
2530 * sdulen and continue frames don't.
2531 * Must rewrite header to eliminate
2532 * sdulen and then adjust l2cap frame
2533 * length.
2534 */
2535 if (pi->extended_control)
2536 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2537 else
2538 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2539
2540 memmove(skb->data + L2CAP_SDULEN_SIZE,
2541 skb->data, hdrlen);
2542 skb_pull(skb, L2CAP_SDULEN_SIZE);
2543 lh = (struct l2cap_hdr *)skb->data;
2544 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2545 L2CAP_SDULEN_SIZE);
2546 }
2547 }
2548
2549 /* Add to queue */
2550 skb_queue_splice_tail(&current_sdu, queue);
2551 }
2552
2553 __skb_queue_purge(&old_frames);
2554 if (err)
2555 __skb_queue_purge(queue);
2556
2557 kfree(buf);
2558
2559 BT_DBG("Queue resegmented, err=%d", err);
2560 return err;
2561}
2562
2563static void l2cap_resegment_worker(struct work_struct *work)
2564{
2565 int err = 0;
2566 struct l2cap_resegment_work *seg_work =
2567 container_of(work, struct l2cap_resegment_work, work);
2568 struct sock *sk = seg_work->sk;
2569
2570 kfree(seg_work);
2571
2572 BT_DBG("sk %p", sk);
2573 lock_sock(sk);
2574
2575 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2576 release_sock(sk);
2577 return;
2578 }
2579
2580 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2581
2582 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2583
2584 if (skb_queue_empty(TX_QUEUE(sk)))
2585 sk->sk_send_head = NULL;
2586 else
2587 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2588
2589 if (err)
2590 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2591 else
2592 l2cap_ertm_send(sk);
2593
2594 release_sock(sk);
2595}
2596
2597static int l2cap_setup_resegment(struct sock *sk)
2598{
2599 struct l2cap_resegment_work *seg_work;
2600
2601 BT_DBG("sk %p", sk);
2602
2603 if (skb_queue_empty(TX_QUEUE(sk)))
2604 return 0;
2605
2606 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2607 if (!seg_work)
2608 return -ENOMEM;
2609
2610 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
2611 seg_work->sk = sk;
2612
2613 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2614 kfree(seg_work);
2615 return -ENOMEM;
2616 }
2617
2618 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2619
2620 return 0;
2621}
2622
2623static inline int l2cap_rmem_available(struct sock *sk)
2624{
2625 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2626 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2627 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2628}
2629
2630static inline int l2cap_rmem_full(struct sock *sk)
2631{
2632 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2633 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2634 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2635}
2636
2637void l2cap_amp_move_init(struct sock *sk)
2638{
2639 BT_DBG("sk %p", sk);
2640
2641 if (!l2cap_pi(sk)->conn)
2642 return;
2643
2644 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2645 return;
2646
2647 if (l2cap_pi(sk)->amp_id == 0) {
2648 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2649 return;
2650 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2651 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2652 amp_create_physical(l2cap_pi(sk)->conn, sk);
2653 } else {
2654 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2655 l2cap_pi(sk)->amp_move_state =
2656 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2657 l2cap_pi(sk)->amp_move_id = 0;
2658 l2cap_amp_move_setup(sk);
2659 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2660 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2661 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2662 }
2663}
2664
2665static void l2cap_chan_ready(struct sock *sk)
2666{
2667 struct sock *parent = bt_sk(sk)->parent;
2668
2669 BT_DBG("sk %p, parent %p", sk, parent);
2670
2671 l2cap_pi(sk)->conf_state = 0;
2672 l2cap_sock_clear_timer(sk);
2673
2674 if (!parent) {
2675 /* Outgoing channel.
2676 * Wake up socket sleeping on connect.
2677 */
2678 sk->sk_state = BT_CONNECTED;
2679 sk->sk_state_change(sk);
2680 } else {
2681 /* Incoming channel.
2682 * Wake up socket sleeping on accept.
2683 */
2684 parent->sk_data_ready(parent, 0);
2685 }
2686}
2687
Linus Torvalds1da177e2005-04-16 15:20:36 -07002688/* Copy frame to all raw sockets on that connection */
2689static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2690{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002691 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002692 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002693 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002694
2695 BT_DBG("conn %p", conn);
2696
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002697 read_lock(&l->lock);
2698 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2699 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 continue;
2701
2702 /* Don't send frame to the socket it came from */
2703 if (skb->sk == sk)
2704 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002705 nskb = skb_clone(skb, GFP_ATOMIC);
2706 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002707 continue;
2708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002709 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002710 kfree_skb(nskb);
2711 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002712 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002713}
2714
2715/* ---- L2CAP signalling commands ---- */
2716static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2717 u8 code, u8 ident, u16 dlen, void *data)
2718{
2719 struct sk_buff *skb, **frag;
2720 struct l2cap_cmd_hdr *cmd;
2721 struct l2cap_hdr *lh;
2722 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002723 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002724
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002725 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2726 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002727
2728 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002729 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002730
2731 skb = bt_skb_alloc(count, GFP_ATOMIC);
2732 if (!skb)
2733 return NULL;
2734
2735 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002736 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002737
2738 if (conn->hcon->type == LE_LINK)
2739 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2740 else
2741 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2744 cmd->code = code;
2745 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002746 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002747
2748 if (dlen) {
2749 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2750 memcpy(skb_put(skb, count), data, count);
2751 data += count;
2752 }
2753
2754 len -= skb->len;
2755
2756 /* Continuation fragments (no L2CAP header) */
2757 frag = &skb_shinfo(skb)->frag_list;
2758 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002759 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002760
2761 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2762 if (!*frag)
2763 goto fail;
2764
2765 memcpy(skb_put(*frag, count), data, count);
2766
2767 len -= count;
2768 data += count;
2769
2770 frag = &(*frag)->next;
2771 }
2772
2773 return skb;
2774
2775fail:
2776 kfree_skb(skb);
2777 return NULL;
2778}
2779
2780static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2781{
2782 struct l2cap_conf_opt *opt = *ptr;
2783 int len;
2784
2785 len = L2CAP_CONF_OPT_SIZE + opt->len;
2786 *ptr += len;
2787
2788 *type = opt->type;
2789 *olen = opt->len;
2790
2791 switch (opt->len) {
2792 case 1:
2793 *val = *((u8 *) opt->val);
2794 break;
2795
2796 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002797 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002798 break;
2799
2800 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002801 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002802 break;
2803
2804 default:
2805 *val = (unsigned long) opt->val;
2806 break;
2807 }
2808
2809 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2810 return len;
2811}
2812
Linus Torvalds1da177e2005-04-16 15:20:36 -07002813static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2814{
2815 struct l2cap_conf_opt *opt = *ptr;
2816
2817 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2818
2819 opt->type = type;
2820 opt->len = len;
2821
2822 switch (len) {
2823 case 1:
2824 *((u8 *) opt->val) = val;
2825 break;
2826
2827 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002828 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002829 break;
2830
2831 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002832 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002833 break;
2834
2835 default:
2836 memcpy(opt->val, (void *) val, len);
2837 break;
2838 }
2839
2840 *ptr += L2CAP_CONF_OPT_SIZE + len;
2841}
2842
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002843static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002844{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002845 struct delayed_work *delayed =
2846 container_of(work, struct delayed_work, work);
2847 struct l2cap_pinfo *pi =
2848 container_of(delayed, struct l2cap_pinfo, ack_work);
2849 struct sock *sk = (struct sock *)pi;
2850 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002851
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002852 BT_DBG("sk %p", sk);
2853
2854 if (!sk)
2855 return;
2856
2857 lock_sock(sk);
2858
2859 if (!l2cap_pi(sk)->conn) {
2860 release_sock(sk);
2861 return;
2862 }
2863
2864 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2865 l2cap_pi(sk)->last_acked_seq,
2866 l2cap_pi(sk));
2867
2868 if (frames_to_ack)
2869 l2cap_ertm_send_rr_or_rnr(sk, 0);
2870
2871 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002872}
2873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002875{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002876 struct delayed_work *delayed =
2877 container_of(work, struct delayed_work, work);
2878 struct l2cap_pinfo *pi =
2879 container_of(delayed, struct l2cap_pinfo, retrans_work);
2880 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 if (!sk)
2885 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002886
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002887 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 if (!l2cap_pi(sk)->conn) {
2890 release_sock(sk);
2891 return;
2892 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002894 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2895 release_sock(sk);
2896}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002898static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2899{
2900 struct delayed_work *delayed =
2901 container_of(work, struct delayed_work, work);
2902 struct l2cap_pinfo *pi =
2903 container_of(delayed, struct l2cap_pinfo, monitor_work);
2904 struct sock *sk = (struct sock *)pi;
2905
2906 BT_DBG("sk %p", sk);
2907
2908 if (!sk)
2909 return;
2910
2911 lock_sock(sk);
2912
2913 if (!l2cap_pi(sk)->conn) {
2914 release_sock(sk);
2915 return;
2916 }
2917
2918 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2919
2920 release_sock(sk);
2921}
2922
2923static inline void l2cap_ertm_init(struct sock *sk)
2924{
2925 l2cap_pi(sk)->next_tx_seq = 0;
2926 l2cap_pi(sk)->expected_tx_seq = 0;
2927 l2cap_pi(sk)->expected_ack_seq = 0;
2928 l2cap_pi(sk)->unacked_frames = 0;
2929 l2cap_pi(sk)->buffer_seq = 0;
2930 l2cap_pi(sk)->frames_sent = 0;
2931 l2cap_pi(sk)->last_acked_seq = 0;
2932 l2cap_pi(sk)->sdu = NULL;
2933 l2cap_pi(sk)->sdu_last_frag = NULL;
2934 l2cap_pi(sk)->sdu_len = 0;
2935 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2936
2937 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2938 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2939
2940 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2941 l2cap_pi(sk)->rx_state);
2942
2943 l2cap_pi(sk)->amp_id = 0;
2944 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2945 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2946 l2cap_pi(sk)->amp_move_reqseq = 0;
2947 l2cap_pi(sk)->amp_move_event = 0;
2948
2949 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2950 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2951 l2cap_ertm_retrans_timeout);
2952 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2953 l2cap_ertm_monitor_timeout);
2954 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2955 skb_queue_head_init(SREJ_QUEUE(sk));
2956 skb_queue_head_init(TX_QUEUE(sk));
2957
2958 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2959 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
2960 l2cap_pi(sk)->remote_tx_win);
2961}
2962
2963void l2cap_ertm_destruct(struct sock *sk)
2964{
2965 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
2966 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
2967}
2968
2969void l2cap_ertm_shutdown(struct sock *sk)
2970{
2971 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
2972 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
2973 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
2974}
2975
2976void l2cap_ertm_recv_done(struct sock *sk)
2977{
2978 lock_sock(sk);
2979
2980 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
2981 release_sock(sk);
2982 return;
2983 }
2984
2985 /* Consume any queued incoming frames and update local busy status */
2986 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
2987 l2cap_ertm_rx_queued_iframes(sk))
2988 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2989 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2990 l2cap_rmem_available(sk))
2991 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
2992
2993 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002994}
2995
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03002996static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2997{
2998 switch (mode) {
2999 case L2CAP_MODE_STREAMING:
3000 case L2CAP_MODE_ERTM:
3001 if (l2cap_mode_supported(mode, remote_feat_mask))
3002 return mode;
3003 /* fall through */
3004 default:
3005 return L2CAP_MODE_BASIC;
3006 }
3007}
3008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003009static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003010{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003011 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3012 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3013 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3014 pi->extended_control = 1;
3015 } else {
3016 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3017 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3018
3019 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3020 pi->extended_control = 0;
3021 }
3022}
3023
3024static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3025 struct hci_ext_fs *new,
3026 struct hci_ext_fs *agg)
3027{
3028 *agg = *cur;
3029 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3030 /* current flow spec has known rate */
3031 if ((new->max_sdu == 0xFFFF) ||
3032 (new->sdu_arr_time == 0xFFFFFFFF)) {
3033 /* new fs has unknown rate, so aggregate is unknown */
3034 agg->max_sdu = 0xFFFF;
3035 agg->sdu_arr_time = 0xFFFFFFFF;
3036 } else {
3037 /* new fs has known rate, so aggregate is known */
3038 u64 cur_rate;
3039 u64 new_rate;
3040 cur_rate = cur->max_sdu * 1000000ULL;
3041 if (cur->sdu_arr_time)
3042 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3043 new_rate = new->max_sdu * 1000000ULL;
3044 if (new->sdu_arr_time)
3045 new_rate = div_u64(new_rate, new->sdu_arr_time);
3046 cur_rate = cur_rate + new_rate;
3047 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3048 cur_rate);
3049 }
3050 }
3051}
3052
3053static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3054{
3055 struct hci_ext_fs tx_fs;
3056 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003057
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003058 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003060 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3061 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3062 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3063 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3064 return 0;
3065
3066 l2cap_aggregate_fs(&chan->tx_fs,
3067 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3068 l2cap_aggregate_fs(&chan->rx_fs,
3069 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3070 hci_chan_modify(chan, &tx_fs, &rx_fs);
3071 return 1;
3072}
3073
3074static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3075 struct hci_ext_fs *old,
3076 struct hci_ext_fs *agg)
3077{
3078 *agg = *cur;
3079 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3080 u64 cur_rate;
3081 u64 old_rate;
3082 cur_rate = cur->max_sdu * 1000000ULL;
3083 if (cur->sdu_arr_time)
3084 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3085 old_rate = old->max_sdu * 1000000ULL;
3086 if (old->sdu_arr_time)
3087 old_rate = div_u64(old_rate, old->sdu_arr_time);
3088 cur_rate = cur_rate - old_rate;
3089 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3090 cur_rate);
3091 }
3092}
3093
3094static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3095{
3096 struct hci_ext_fs tx_fs;
3097 struct hci_ext_fs rx_fs;
3098
3099 BT_DBG("chan %p", chan);
3100
3101 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3102 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3103 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3104 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3105 return 0;
3106
3107 l2cap_deaggregate_fs(&chan->tx_fs,
3108 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3109 l2cap_deaggregate_fs(&chan->rx_fs,
3110 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3111 hci_chan_modify(chan, &tx_fs, &rx_fs);
3112 return 1;
3113}
3114
3115static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3116{
3117 struct hci_dev *hdev;
3118 struct hci_conn *hcon;
3119 struct hci_chan *chan;
3120
3121 hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
3122 if (!hdev)
3123 return NULL;
3124
3125 BT_DBG("hdev %s", hdev->name);
3126
3127 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
3128 if (!hcon)
3129 return NULL;
3130
3131 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3132 if (chan) {
3133 l2cap_aggregate(chan, pi);
3134 hci_chan_hold(chan);
3135 return chan;
3136 }
3137
3138 if (bt_sk(pi)->parent) {
3139 /* Incoming connection */
3140 chan = hci_chan_accept(hcon,
3141 (struct hci_ext_fs *) &pi->local_fs,
3142 (struct hci_ext_fs *) &pi->remote_fs);
3143 } else {
3144 /* Outgoing connection */
3145 chan = hci_chan_create(hcon,
3146 (struct hci_ext_fs *) &pi->local_fs,
3147 (struct hci_ext_fs *) &pi->remote_fs);
3148 }
3149 return chan;
3150}
3151
3152int l2cap_build_conf_req(struct sock *sk, void *data)
3153{
3154 struct l2cap_pinfo *pi = l2cap_pi(sk);
3155 struct l2cap_conf_req *req = data;
3156 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3157 void *ptr = req->data;
3158
3159 BT_DBG("sk %p", sk);
3160
3161 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003162 goto done;
3163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003164 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003165 case L2CAP_MODE_STREAMING:
3166 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003168 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003169
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003170 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003171 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003173 break;
3174 }
3175
3176done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003177 if (pi->imtu != L2CAP_DEFAULT_MTU)
3178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003179
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003180 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003181 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003182 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3183 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003184 break;
3185
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003186 rfc.txwin_size = 0;
3187 rfc.max_transmit = 0;
3188 rfc.retrans_timeout = 0;
3189 rfc.monitor_timeout = 0;
3190 rfc.max_pdu_size = 0;
3191
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003192 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3193 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003194 break;
3195
3196 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003197 l2cap_setup_txwin(pi);
3198 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3199 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3200 else
3201 rfc.txwin_size = pi->tx_win;
3202 rfc.max_transmit = pi->max_tx;
3203 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3204 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003205 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003206 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3207 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003208
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3210 (unsigned long) &rfc);
3211
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003212 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3213 pi->extended_control) {
3214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3215 pi->tx_win);
3216 }
3217
3218 if (pi->amp_id) {
3219 /* default best effort extended flow spec */
3220 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3221 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3223 sizeof(fs), (unsigned long) &fs);
3224 }
3225
3226 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003227 break;
3228
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003229 if (pi->fcs == L2CAP_FCS_NONE ||
3230 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3231 pi->fcs = L2CAP_FCS_NONE;
3232 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003233 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003234 break;
3235
3236 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003237 rfc.txwin_size = 0;
3238 rfc.max_transmit = 0;
3239 rfc.retrans_timeout = 0;
3240 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003241 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003242 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3243 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003244
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3246 (unsigned long) &rfc);
3247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003248 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3249 pi->extended_control) {
3250 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3251 }
3252
3253 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003254 break;
3255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003256 if (pi->fcs == L2CAP_FCS_NONE ||
3257 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3258 pi->fcs = L2CAP_FCS_NONE;
3259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003260 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003261 break;
3262 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003263
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003264 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003265 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003266
3267 return ptr - data;
3268}
3269
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003270
3271static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003272{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003273 struct l2cap_pinfo *pi = l2cap_pi(sk);
3274 struct l2cap_conf_req *req = data;
3275 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3276 void *ptr = req->data;
3277 u32 be_flush_to;
3278
3279 BT_DBG("sk %p", sk);
3280
3281 /* convert to milliseconds, round up */
3282 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3283
3284 switch (pi->mode) {
3285 case L2CAP_MODE_ERTM:
3286 rfc.mode = L2CAP_MODE_ERTM;
3287 rfc.txwin_size = pi->tx_win;
3288 rfc.max_transmit = pi->max_tx;
3289 if (pi->amp_move_id) {
3290 rfc.retrans_timeout =
3291 cpu_to_le16((3 * be_flush_to) + 500);
3292 rfc.monitor_timeout =
3293 cpu_to_le16((3 * be_flush_to) + 500);
3294 } else {
3295 rfc.retrans_timeout =
3296 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3297 rfc.monitor_timeout =
3298 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3299 }
3300 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3301 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3302 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3303
3304 break;
3305
3306 default:
3307 return -ECONNREFUSED;
3308 }
3309
3310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3311 (unsigned long) &rfc);
3312
3313 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3314
3315 /* TODO assign fcs for br/edr based on socket config option */
3316 if (pi->amp_move_id)
3317 pi->local_conf.fcs = L2CAP_FCS_NONE;
3318 else
3319 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3320
3321 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3322 pi->local_conf.fcs);
3323
3324 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3325 }
3326
3327 req->dcid = cpu_to_le16(pi->dcid);
3328 req->flags = cpu_to_le16(0);
3329
3330 return ptr - data;
3331}
3332
3333static int l2cap_parse_conf_req(struct sock *sk, void *data)
3334{
3335 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003336 struct l2cap_conf_rsp *rsp = data;
3337 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003338 void *req = pi->conf_req;
3339 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003340 int type, hint, olen;
3341 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003342 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003343 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003344 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003345 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003347 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003348
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003349 while (len >= L2CAP_CONF_OPT_SIZE) {
3350 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003351
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003352 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003353 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003354
3355 switch (type) {
3356 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003357 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003358 break;
3359
3360 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003361 pi->flush_to = val;
3362 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3363 result = L2CAP_CONF_UNACCEPT;
3364 else
3365 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003366 break;
3367
3368 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003369 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3370 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003371 break;
3372
Marcel Holtmann6464f352007-10-20 13:39:51 +02003373 case L2CAP_CONF_RFC:
3374 if (olen == sizeof(rfc))
3375 memcpy(&rfc, (void *) val, olen);
3376 break;
3377
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003378 case L2CAP_CONF_FCS:
3379 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003380 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3381 pi->remote_conf.fcs = val;
3382 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003383
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003384 case L2CAP_CONF_EXT_FS:
3385 if (olen == sizeof(fs)) {
3386 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3387 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3388 result = L2CAP_CONF_UNACCEPT;
3389 break;
3390 }
3391 memcpy(&fs, (void *) val, olen);
3392 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3393 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3394 break;
3395 }
3396 pi->remote_conf.flush_to =
3397 le32_to_cpu(fs.flush_to);
3398 pi->remote_fs.id = fs.id;
3399 pi->remote_fs.type = fs.type;
3400 pi->remote_fs.max_sdu =
3401 le16_to_cpu(fs.max_sdu);
3402 pi->remote_fs.sdu_arr_time =
3403 le32_to_cpu(fs.sdu_arr_time);
3404 pi->remote_fs.acc_latency =
3405 le32_to_cpu(fs.acc_latency);
3406 pi->remote_fs.flush_to =
3407 le32_to_cpu(fs.flush_to);
3408 }
3409 break;
3410
3411 case L2CAP_CONF_EXT_WINDOW:
3412 pi->extended_control = 1;
3413 pi->remote_tx_win = val;
3414 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3415 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003416 break;
3417
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003418 default:
3419 if (hint)
3420 break;
3421
3422 result = L2CAP_CONF_UNKNOWN;
3423 *((u8 *) ptr++) = type;
3424 break;
3425 }
3426 }
3427
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003428 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003429 goto done;
3430
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003431 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003432 case L2CAP_MODE_STREAMING:
3433 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003434 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3435 pi->mode = l2cap_select_mode(rfc.mode,
3436 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003437 break;
3438 }
3439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003440 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003441 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003442
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003443 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003444 }
3445
3446done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003447 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003448 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003449 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003450
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003451 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003452 return -ECONNREFUSED;
3453
3454 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3455 sizeof(rfc), (unsigned long) &rfc);
3456 }
3457
3458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003459 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3460 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3461 return -ECONNREFUSED;
3462
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003463 if (result == L2CAP_CONF_SUCCESS) {
3464 /* Configure output options and let the other side know
3465 * which ones we don't like. */
3466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003467 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003468 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003469 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003470 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003471 else {
3472 pi->omtu = mtu;
3473 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3474 }
3475 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003476
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003477 switch (rfc.mode) {
3478 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 pi->fcs = L2CAP_FCS_NONE;
3480 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003481 break;
3482
3483 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3485 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003487 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003489 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003490
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003491 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003492 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003493 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003494 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003496 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003497
3498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3499 sizeof(rfc), (unsigned long) &rfc);
3500
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003501 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3503 sizeof(fs), (unsigned long) &fs);
3504
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003505 break;
3506
3507 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003508 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003510 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003511
3512 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3513 sizeof(rfc), (unsigned long) &rfc);
3514
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003515 break;
3516
3517 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003518 result = L2CAP_CONF_UNACCEPT;
3519
3520 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003521 rfc.mode = pi->mode;
3522 }
3523
3524 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3525 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3526 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3527 result = L2CAP_CONF_PENDING;
3528
3529 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3530 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003531 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003532 /* Trigger logical link creation only on AMP */
3533
Peter Krystadf453bb32011-07-19 17:23:34 -07003534 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003535 if (!chan)
3536 return -ECONNREFUSED;
3537
3538 chan->l2cap_sk = sk;
3539 if (chan->state == BT_CONNECTED)
3540 l2cap_create_cfm(chan, 0);
3541 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003542 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003543
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003544 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003545 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003546 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003547 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003548 rsp->result = cpu_to_le16(result);
3549 rsp->flags = cpu_to_le16(0x0000);
3550
3551 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003552}
3553
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003554static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003555{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003556 struct l2cap_pinfo *pi = l2cap_pi(sk);
3557 struct l2cap_conf_rsp *rsp = data;
3558 void *ptr = rsp->data;
3559 void *req = pi->conf_req;
3560 int len = pi->conf_len;
3561 int type, hint, olen;
3562 unsigned long val;
3563 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3564 struct l2cap_conf_ext_fs fs;
3565 u16 mtu = pi->omtu;
3566 u16 tx_win = pi->remote_tx_win;
3567 u16 result = L2CAP_CONF_SUCCESS;
3568
3569 BT_DBG("sk %p", sk);
3570
3571 while (len >= L2CAP_CONF_OPT_SIZE) {
3572 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3573
3574 hint = type & L2CAP_CONF_HINT;
3575 type &= L2CAP_CONF_MASK;
3576
3577 switch (type) {
3578 case L2CAP_CONF_MTU:
3579 mtu = val;
3580 break;
3581
3582 case L2CAP_CONF_FLUSH_TO:
3583 if (pi->amp_move_id)
3584 result = L2CAP_CONF_UNACCEPT;
3585 else
3586 pi->remote_conf.flush_to = val;
3587 break;
3588
3589 case L2CAP_CONF_QOS:
3590 if (pi->amp_move_id)
3591 result = L2CAP_CONF_UNACCEPT;
3592 break;
3593
3594 case L2CAP_CONF_RFC:
3595 if (olen == sizeof(rfc))
3596 memcpy(&rfc, (void *) val, olen);
3597 if (pi->mode != rfc.mode ||
3598 rfc.mode == L2CAP_MODE_BASIC)
3599 result = L2CAP_CONF_UNACCEPT;
3600 break;
3601
3602 case L2CAP_CONF_FCS:
3603 pi->remote_conf.fcs = val;
3604 break;
3605
3606 case L2CAP_CONF_EXT_FS:
3607 if (olen == sizeof(fs)) {
3608 memcpy(&fs, (void *) val, olen);
3609 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3610 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3611 else {
3612 pi->remote_conf.flush_to =
3613 le32_to_cpu(fs.flush_to);
3614 }
3615 }
3616 break;
3617
3618 case L2CAP_CONF_EXT_WINDOW:
3619 tx_win = val;
3620 break;
3621
3622 default:
3623 if (hint)
3624 break;
3625
3626 result = L2CAP_CONF_UNKNOWN;
3627 *((u8 *) ptr++) = type;
3628 break;
3629 }
3630 }
3631
3632 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3633 result, pi->mode, rfc.mode);
3634
3635 if (result == L2CAP_CONF_SUCCESS) {
3636 /* Configure output options and let the other side know
3637 * which ones we don't like. */
3638
3639 /* Don't allow mtu to decrease. */
3640 if (mtu < pi->omtu)
3641 result = L2CAP_CONF_UNACCEPT;
3642
3643 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3644
3645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3646
3647 /* Don't allow extended transmit window to change. */
3648 if (tx_win != pi->remote_tx_win) {
3649 result = L2CAP_CONF_UNACCEPT;
3650 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3651 pi->remote_tx_win);
3652 }
3653
3654 if (rfc.mode == L2CAP_MODE_ERTM) {
3655 pi->remote_conf.retrans_timeout =
3656 le16_to_cpu(rfc.retrans_timeout);
3657 pi->remote_conf.monitor_timeout =
3658 le16_to_cpu(rfc.monitor_timeout);
3659
3660 BT_DBG("remote conf monitor timeout %d",
3661 pi->remote_conf.monitor_timeout);
3662
3663 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3664 sizeof(rfc), (unsigned long) &rfc);
3665 }
3666
3667 }
3668
3669 if (result != L2CAP_CONF_SUCCESS)
3670 goto done;
3671
3672 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3673
3674 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3675 pi->flush_to = pi->remote_conf.flush_to;
3676 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3677
3678 if (pi->amp_move_id)
3679 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3680 else
3681 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3682 BT_DBG("mode %d monitor timeout %d",
3683 pi->mode, pi->monitor_timeout);
3684
3685 }
3686
3687done:
3688 rsp->scid = cpu_to_le16(pi->dcid);
3689 rsp->result = cpu_to_le16(result);
3690 rsp->flags = cpu_to_le16(0x0000);
3691
3692 return ptr - data;
3693}
3694
3695static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3696{
3697 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003698 struct l2cap_conf_req *req = data;
3699 void *ptr = req->data;
3700 int type, olen;
3701 unsigned long val;
3702 struct l2cap_conf_rfc rfc;
3703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003704 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003705
3706 while (len >= L2CAP_CONF_OPT_SIZE) {
3707 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3708
3709 switch (type) {
3710 case L2CAP_CONF_MTU:
3711 if (val < L2CAP_DEFAULT_MIN_MTU) {
3712 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003713 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003714 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003715 pi->imtu = val;
3716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003717 break;
3718
3719 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003720 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003722 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003723 break;
3724
3725 case L2CAP_CONF_RFC:
3726 if (olen == sizeof(rfc))
3727 memcpy(&rfc, (void *)val, olen);
3728
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003729 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3730 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003731 return -ECONNREFUSED;
3732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003733 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003734
3735 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3736 sizeof(rfc), (unsigned long) &rfc);
3737 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003738
3739 case L2CAP_CONF_EXT_WINDOW:
3740 pi->tx_win = val;
3741
3742 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3743 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3744
3745 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3746 2, pi->tx_win);
3747 break;
3748
3749 default:
3750 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003751 }
3752 }
3753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003754 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003755 return -ECONNREFUSED;
3756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003757 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003758
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003759 if (*result == L2CAP_CONF_SUCCESS) {
3760 switch (rfc.mode) {
3761 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003762 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3763 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3764 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003765 break;
3766 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003767 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003768 }
3769 }
3770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003771 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003772 req->flags = cpu_to_le16(0x0000);
3773
3774 return ptr - data;
3775}
3776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003777static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778{
3779 struct l2cap_conf_rsp *rsp = data;
3780 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003782 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003784 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003785 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003786 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003787
3788 return ptr - data;
3789}
3790
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003791static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003792{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003793 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003794 int type, olen;
3795 unsigned long val;
3796 struct l2cap_conf_rfc rfc;
3797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003798 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003799
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003800 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003801 return;
3802
3803 while (len >= L2CAP_CONF_OPT_SIZE) {
3804 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3805
3806 switch (type) {
3807 case L2CAP_CONF_RFC:
3808 if (olen == sizeof(rfc))
3809 memcpy(&rfc, (void *)val, olen);
3810 goto done;
3811 }
3812 }
3813
3814done:
3815 switch (rfc.mode) {
3816 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003817 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3818 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3819 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003820 break;
3821 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003822 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003823 }
3824}
3825
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003826static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3827{
3828 struct l2cap_pinfo *pi = l2cap_pi(sk);
3829 int type, olen;
3830 unsigned long val;
3831 struct l2cap_conf_ext_fs fs;
3832
3833 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3834
3835 while (len >= L2CAP_CONF_OPT_SIZE) {
3836 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3837 if ((type == L2CAP_CONF_EXT_FS) &&
3838 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3839 memcpy(&fs, (void *)val, olen);
3840 pi->local_fs.id = fs.id;
3841 pi->local_fs.type = fs.type;
3842 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3843 pi->local_fs.sdu_arr_time =
3844 le32_to_cpu(fs.sdu_arr_time);
3845 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3846 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3847 break;
3848 }
3849 }
3850
3851}
3852
3853static int l2cap_finish_amp_move(struct sock *sk)
3854{
3855 struct l2cap_pinfo *pi;
3856 int err;
3857
3858 BT_DBG("sk %p", sk);
3859
3860 pi = l2cap_pi(sk);
3861
3862 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3863 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3864
3865 if (pi->ampcon)
3866 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3867 else
3868 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3869
3870 err = l2cap_setup_resegment(sk);
3871
3872 return err;
3873}
3874
3875static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3876 u16 result)
3877{
3878 int err = 0;
3879 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3880 struct l2cap_pinfo *pi = l2cap_pi(sk);
3881
3882 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3883
3884 if (pi->reconf_state == L2CAP_RECONF_NONE)
3885 return -ECONNREFUSED;
3886
3887 if (result == L2CAP_CONF_SUCCESS) {
3888 while (len >= L2CAP_CONF_OPT_SIZE) {
3889 int type, olen;
3890 unsigned long val;
3891
3892 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3893
3894 if (type == L2CAP_CONF_RFC) {
3895 if (olen == sizeof(rfc))
3896 memcpy(&rfc, (void *)val, olen);
3897 if (rfc.mode != pi->mode &&
3898 rfc.mode != L2CAP_MODE_ERTM) {
3899 err = -ECONNREFUSED;
3900 goto done;
3901 }
3902 break;
3903 }
3904 }
3905 }
3906
3907done:
3908 l2cap_ertm_stop_ack_timer(pi);
3909 l2cap_ertm_stop_retrans_timer(pi);
3910 l2cap_ertm_stop_monitor_timer(pi);
3911
3912 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3913 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3914
3915 /* Respond to poll */
3916 err = l2cap_answer_move_poll(sk);
3917
3918 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3919
3920 /* If moving to BR/EDR, use default timeout defined by
3921 * the spec */
3922 if (pi->amp_move_id == 0)
3923 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3924
3925 if (pi->mode == L2CAP_MODE_ERTM) {
3926 l2cap_ertm_tx(sk, NULL, NULL,
3927 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3928 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3929 }
3930 }
3931
3932 return err;
3933}
3934
3935
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003936static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3937{
3938 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3939
3940 if (rej->reason != 0x0000)
3941 return 0;
3942
3943 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3944 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003945 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003946
3947 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003948 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003949
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003950 l2cap_conn_start(conn);
3951 }
3952
3953 return 0;
3954}
3955
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003956static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
3957 struct l2cap_cmd_hdr *cmd,
3958 u8 *data, u8 rsp_code,
3959 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003960{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003961 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003962 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3963 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04003964 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003965 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003966
3967 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003968 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003969
3970 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
3971
3972 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003973 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
3974 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003975 result = L2CAP_CR_BAD_PSM;
3976 goto sendresp;
3977 }
3978
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003979 bh_lock_sock(parent);
3980
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003981 /* Check if the ACL is secure enough (if not SDP) */
3982 if (psm != cpu_to_le16(0x0001) &&
3983 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01003984 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003985 result = L2CAP_CR_SEC_BLOCK;
3986 goto response;
3987 }
3988
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 result = L2CAP_CR_NO_MEM;
3990
3991 /* Check for backlog size */
3992 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003993 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 goto response;
3995 }
3996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003997 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
3998 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 goto response;
4000
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004001 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004002
4003 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004004 if (__l2cap_get_chan_by_dcid(list, scid)) {
4005 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004006 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004007 l2cap_sock_kill(sk);
4008 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009 goto response;
4010 }
4011
4012 hci_conn_hold(conn->hcon);
4013
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004014 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004015 bacpy(&bt_sk(sk)->src, conn->src);
4016 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004017 l2cap_pi(sk)->psm = psm;
4018 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004020 bt_accept_enqueue(parent, sk);
4021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004022 __l2cap_chan_add(conn, sk);
4023 dcid = l2cap_pi(sk)->scid;
4024 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004025
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004026 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004028 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004029
Marcel Holtmann984947d2009-02-06 23:35:19 +01004030 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004031 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004032 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004033 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004034 result = L2CAP_CR_PEND;
4035 status = L2CAP_CS_AUTHOR_PEND;
4036 parent->sk_data_ready(parent, 0);
4037 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004038 /* Force pending result for AMP controllers.
4039 * The connection will succeed after the
4040 * physical link is up. */
4041 if (amp_id) {
4042 sk->sk_state = BT_CONNECT2;
4043 result = L2CAP_CR_PEND;
4044 } else {
4045 sk->sk_state = BT_CONFIG;
4046 result = L2CAP_CR_SUCCESS;
4047 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004048 status = L2CAP_CS_NO_INFO;
4049 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004050 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004052 result = L2CAP_CR_PEND;
4053 status = L2CAP_CS_AUTHEN_PEND;
4054 }
4055 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004056 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004057 result = L2CAP_CR_PEND;
4058 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004059 }
4060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004062
4063response:
4064 bh_unlock_sock(parent);
4065
4066sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004067 rsp.scid = cpu_to_le16(scid);
4068 rsp.dcid = cpu_to_le16(dcid);
4069 rsp.result = cpu_to_le16(result);
4070 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004071 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004073 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004074 struct l2cap_info_req info;
4075 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4076
4077 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4078 conn->info_ident = l2cap_get_ident(conn);
4079
4080 mod_timer(&conn->info_timer, jiffies +
4081 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4082
4083 l2cap_send_cmd(conn, conn->info_ident,
4084 L2CAP_INFO_REQ, sizeof(info), &info);
4085 }
4086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004087 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004088 result == L2CAP_CR_SUCCESS) {
4089 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004090 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004091 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004092 l2cap_build_conf_req(sk, buf), buf);
4093 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004094 }
4095
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004096 return sk;
4097}
4098
4099static inline int l2cap_connect_req(struct l2cap_conn *conn,
4100 struct l2cap_cmd_hdr *cmd, u8 *data)
4101{
4102 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004103 return 0;
4104}
4105
4106static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4107{
4108 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4109 u16 scid, dcid, result, status;
4110 struct sock *sk;
4111 u8 req[128];
4112
4113 scid = __le16_to_cpu(rsp->scid);
4114 dcid = __le16_to_cpu(rsp->dcid);
4115 result = __le16_to_cpu(rsp->result);
4116 status = __le16_to_cpu(rsp->status);
4117
4118 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4119
4120 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004121 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4122 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004123 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004124 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004125 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4126 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004127 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004128 }
4129
4130 switch (result) {
4131 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004132 sk->sk_state = BT_CONFIG;
4133 l2cap_pi(sk)->ident = 0;
4134 l2cap_pi(sk)->dcid = dcid;
4135 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004136
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004137 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004138 break;
4139
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004140 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4141
Linus Torvalds1da177e2005-04-16 15:20:36 -07004142 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004143 l2cap_build_conf_req(sk, req), req);
4144 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004145 break;
4146
4147 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004148 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004149 break;
4150
4151 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004152 /* don't delete l2cap channel if sk is owned by user */
4153 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004154 sk->sk_state = BT_DISCONN;
4155 l2cap_sock_clear_timer(sk);
4156 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004157 break;
4158 }
4159
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004160 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004161 break;
4162 }
4163
4164 bh_unlock_sock(sk);
4165 return 0;
4166}
4167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004168static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004169{
4170 /* FCS is enabled only in ERTM or streaming mode, if one or both
4171 * sides request it.
4172 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004173 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4174 pi->fcs = L2CAP_FCS_NONE;
4175 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4176 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004177}
4178
Al Viro88219a02007-07-29 00:17:25 -07004179static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180{
4181 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4182 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004183 u8 rspbuf[64];
4184 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004185 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004186 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004187 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004188
4189 dcid = __le16_to_cpu(req->dcid);
4190 flags = __le16_to_cpu(req->flags);
4191
4192 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004194 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4195 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004196 return -ENOENT;
4197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004198 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4199 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4200 sk->sk_state, l2cap_pi(sk)->rx_state,
4201 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4202 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004203
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004204 /* Detect a reconfig request due to channel move between
4205 * BR/EDR and AMP
4206 */
4207 if (sk->sk_state == BT_CONNECTED &&
4208 l2cap_pi(sk)->rx_state ==
4209 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4210 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4211
4212 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4213 amp_move_reconf = 1;
4214
4215 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004216 struct l2cap_cmd_rej rej;
4217
4218 rej.reason = cpu_to_le16(0x0002);
4219 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4220 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004221 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004222 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004223
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004224 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004225 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004226 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004227 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004228 l2cap_build_conf_rsp(sk, rspbuf,
4229 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004230 goto unlock;
4231 }
4232
4233 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004234 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4235 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004236
4237 if (flags & 0x0001) {
4238 /* Incomplete config. Send empty response. */
4239 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004240 l2cap_build_conf_rsp(sk, rspbuf,
4241 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004242 goto unlock;
4243 }
4244
4245 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004246 if (!amp_move_reconf)
4247 len = l2cap_parse_conf_req(sk, rspbuf);
4248 else
4249 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4250
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004251 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004252 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004253 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004254 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004255
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004256 l2cap_pi(sk)->conf_ident = cmd->ident;
4257 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4258
4259 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4260 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4261 !l2cap_pi(sk)->amp_id) {
4262 /* Send success response right after pending if using
4263 * lockstep config on BR/EDR
4264 */
4265 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4266 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4267 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4268 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004269
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004270 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004271 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004273 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004274 goto unlock;
4275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004276 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004277
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004278 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4279 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004281 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4282 set_default_fcs(l2cap_pi(sk));
4283
4284 sk->sk_state = BT_CONNECTED;
4285
4286 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4287 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4288 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004289
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004291 goto unlock;
4292 }
4293
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004294 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004295 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004296 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004297 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004298 l2cap_build_conf_req(sk, buf), buf);
4299 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004300 }
4301
4302unlock:
4303 bh_unlock_sock(sk);
4304 return 0;
4305}
4306
4307static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4308{
4309 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4310 u16 scid, flags, result;
4311 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004312 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004313 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314
4315 scid = __le16_to_cpu(rsp->scid);
4316 flags = __le16_to_cpu(rsp->flags);
4317 result = __le16_to_cpu(rsp->result);
4318
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004319 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4320 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004321
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004322 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4323 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004324 return 0;
4325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004326 pi = l2cap_pi(sk);
4327
4328 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4329 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4330 goto done;
4331 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004332
Linus Torvalds1da177e2005-04-16 15:20:36 -07004333 switch (result) {
4334 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004335 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4336 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4337 /* Lockstep procedure requires a pending response
4338 * before success.
4339 */
4340 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4341 goto done;
4342 }
4343
4344 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004345 break;
4346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004347 case L2CAP_CONF_PENDING:
4348 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4349 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4350 goto done;
4351 }
4352
4353 l2cap_conf_rfc_get(sk, rsp->data, len);
4354
4355 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4356
Peter Krystadf453bb32011-07-19 17:23:34 -07004357 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004358
4359 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4360 struct hci_chan *chan;
4361
4362 /* Already sent a 'pending' response, so set up
4363 * the logical link now
4364 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004365 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004366 if (!chan) {
4367 l2cap_send_disconn_req(pi->conn, sk,
4368 ECONNRESET);
4369 goto done;
4370 }
4371
4372 chan->l2cap_sk = sk;
4373 if (chan->state == BT_CONNECTED)
4374 l2cap_create_cfm(chan, 0);
4375 }
4376
4377 goto done;
4378
Linus Torvalds1da177e2005-04-16 15:20:36 -07004379 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004380 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004381 char req[64];
4382
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004383 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004384 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004385 goto done;
4386 }
4387
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004388 /* throw out any old stored conf requests */
4389 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004390 len = l2cap_parse_conf_rsp(sk, rsp->data,
4391 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004392 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004393 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004394 goto done;
4395 }
4396
4397 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4398 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004399 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004400 if (result != L2CAP_CONF_SUCCESS)
4401 goto done;
4402 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004403 }
4404
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004405 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004406 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004407 l2cap_sock_set_timer(sk, HZ * 5);
4408 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004409 goto done;
4410 }
4411
4412 if (flags & 0x01)
4413 goto done;
4414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004415 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004417 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4418 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004419
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004420 sk->sk_state = BT_CONNECTED;
4421
4422 if (pi->mode == L2CAP_MODE_ERTM ||
4423 pi->mode == L2CAP_MODE_STREAMING)
4424 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004425
Linus Torvalds1da177e2005-04-16 15:20:36 -07004426 l2cap_chan_ready(sk);
4427 }
4428
4429done:
4430 bh_unlock_sock(sk);
4431 return 0;
4432}
4433
4434static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4435{
4436 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4437 struct l2cap_disconn_rsp rsp;
4438 u16 dcid, scid;
4439 struct sock *sk;
4440
4441 scid = __le16_to_cpu(req->scid);
4442 dcid = __le16_to_cpu(req->dcid);
4443
4444 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004446 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4447 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004448 return 0;
4449
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004450 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4451 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004452 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004454 /* Only do cleanup if a disconnect request was not sent already */
4455 if (sk->sk_state != BT_DISCONN) {
4456 sk->sk_shutdown = SHUTDOWN_MASK;
4457
4458 skb_queue_purge(TX_QUEUE(sk));
4459
4460 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4461 skb_queue_purge(SREJ_QUEUE(sk));
4462
4463 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4464 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4465 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4466 }
4467 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004468
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004469 /* don't delete l2cap channel if sk is owned by user */
4470 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004471 sk->sk_state = BT_DISCONN;
4472 l2cap_sock_clear_timer(sk);
4473 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004474 bh_unlock_sock(sk);
4475 return 0;
4476 }
4477
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004478 l2cap_chan_del(sk, ECONNRESET);
4479
Linus Torvalds1da177e2005-04-16 15:20:36 -07004480 bh_unlock_sock(sk);
4481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004482 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004483 return 0;
4484}
4485
4486static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4487{
4488 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4489 u16 dcid, scid;
4490 struct sock *sk;
4491
4492 scid = __le16_to_cpu(rsp->scid);
4493 dcid = __le16_to_cpu(rsp->dcid);
4494
4495 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4496
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004497 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4498 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004499 return 0;
4500
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004501 /* don't delete l2cap channel if sk is owned by user */
4502 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004503 sk->sk_state = BT_DISCONN;
4504 l2cap_sock_clear_timer(sk);
4505 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004506 bh_unlock_sock(sk);
4507 return 0;
4508 }
4509
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004510 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004511 bh_unlock_sock(sk);
4512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004513 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004514 return 0;
4515}
4516
4517static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4518{
4519 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004520 u16 type;
4521
4522 type = __le16_to_cpu(req->type);
4523
4524 BT_DBG("type 0x%4.4x", type);
4525
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004526 if (type == L2CAP_IT_FEAT_MASK) {
4527 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004528 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004529 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4530 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4531 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004532 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004533 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004534 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004535 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004536 l2cap_send_cmd(conn, cmd->ident,
4537 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004538 } else if (type == L2CAP_IT_FIXED_CHAN) {
4539 u8 buf[12];
4540 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4541 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4542 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4543 memcpy(buf + 4, l2cap_fixed_chan, 8);
4544 l2cap_send_cmd(conn, cmd->ident,
4545 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004546 } else {
4547 struct l2cap_info_rsp rsp;
4548 rsp.type = cpu_to_le16(type);
4549 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4550 l2cap_send_cmd(conn, cmd->ident,
4551 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4552 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004553
4554 return 0;
4555}
4556
4557static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4558{
4559 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4560 u16 type, result;
4561
4562 type = __le16_to_cpu(rsp->type);
4563 result = __le16_to_cpu(rsp->result);
4564
4565 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4566
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004567 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4568 if (cmd->ident != conn->info_ident ||
4569 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4570 return 0;
4571
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004572 del_timer(&conn->info_timer);
4573
Ville Tervoadb08ed2010-08-04 09:43:33 +03004574 if (result != L2CAP_IR_SUCCESS) {
4575 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4576 conn->info_ident = 0;
4577
4578 l2cap_conn_start(conn);
4579
4580 return 0;
4581 }
4582
Marcel Holtmann984947d2009-02-06 23:35:19 +01004583 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004584 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004585
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004586 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004587 struct l2cap_info_req req;
4588 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4589
4590 conn->info_ident = l2cap_get_ident(conn);
4591
4592 l2cap_send_cmd(conn, conn->info_ident,
4593 L2CAP_INFO_REQ, sizeof(req), &req);
4594 } else {
4595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4596 conn->info_ident = 0;
4597
4598 l2cap_conn_start(conn);
4599 }
4600 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004601 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004602 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004603 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004604
4605 l2cap_conn_start(conn);
4606 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004607
Linus Torvalds1da177e2005-04-16 15:20:36 -07004608 return 0;
4609}
4610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004611static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4612 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4613{
4614 struct l2cap_move_chan_req req;
4615 u8 ident;
4616
4617 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4618 (int) dest_amp_id);
4619
4620 ident = l2cap_get_ident(conn);
4621 if (pi)
4622 pi->ident = ident;
4623
4624 req.icid = cpu_to_le16(icid);
4625 req.dest_amp_id = dest_amp_id;
4626
4627 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4628}
4629
4630static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4631 u16 icid, u16 result)
4632{
4633 struct l2cap_move_chan_rsp rsp;
4634
4635 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4636
4637 rsp.icid = cpu_to_le16(icid);
4638 rsp.result = cpu_to_le16(result);
4639
4640 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4641}
4642
4643static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4644 struct l2cap_pinfo *pi, u16 icid, u16 result)
4645{
4646 struct l2cap_move_chan_cfm cfm;
4647 u8 ident;
4648
4649 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4650
4651 ident = l2cap_get_ident(conn);
4652 if (pi)
4653 pi->ident = ident;
4654
4655 cfm.icid = cpu_to_le16(icid);
4656 cfm.result = cpu_to_le16(result);
4657
4658 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4659}
4660
4661static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4662 u16 icid)
4663{
4664 struct l2cap_move_chan_cfm_rsp rsp;
4665
4666 BT_DBG("icid %d", (int) icid);
4667
4668 rsp.icid = cpu_to_le16(icid);
4669 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4670}
4671
4672static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4673 struct l2cap_cmd_hdr *cmd, u8 *data)
4674{
4675 struct l2cap_create_chan_req *req =
4676 (struct l2cap_create_chan_req *) data;
4677 struct sock *sk;
4678 u16 psm, scid;
4679
4680 psm = le16_to_cpu(req->psm);
4681 scid = le16_to_cpu(req->scid);
4682
4683 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4684 (int) req->amp_id);
4685
4686 if (req->amp_id) {
4687 struct hci_dev *hdev;
4688
4689 /* Validate AMP controller id */
4690 hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
4691 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4692 struct l2cap_create_chan_rsp rsp;
4693
4694 rsp.dcid = 0;
4695 rsp.scid = cpu_to_le16(scid);
4696 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4697 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4698
4699 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4700 sizeof(rsp), &rsp);
4701
4702 if (hdev)
4703 hci_dev_put(hdev);
4704
4705 return 0;
4706 }
4707
4708 hci_dev_put(hdev);
4709 }
4710
4711 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4712 req->amp_id);
4713
4714 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
4715
4716 if (sk && req->amp_id)
4717 amp_accept_physical(conn, req->amp_id, sk);
4718
4719 return 0;
4720}
4721
4722static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4723 struct l2cap_cmd_hdr *cmd, u8 *data)
4724{
4725 BT_DBG("conn %p", conn);
4726
4727 return l2cap_connect_rsp(conn, cmd, data);
4728}
4729
4730static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4731 struct l2cap_cmd_hdr *cmd, u8 *data)
4732{
4733 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4734 struct sock *sk;
4735 struct l2cap_pinfo *pi;
4736 u16 icid = 0;
4737 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4738
4739 icid = le16_to_cpu(req->icid);
4740
4741 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4742
4743 read_lock(&conn->chan_list.lock);
4744 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4745 read_unlock(&conn->chan_list.lock);
4746
4747 if (!sk)
4748 goto send_move_response;
4749
4750 lock_sock(sk);
4751 pi = l2cap_pi(sk);
4752
4753 if (pi->scid < L2CAP_CID_DYN_START ||
4754 (pi->mode != L2CAP_MODE_ERTM &&
4755 pi->mode != L2CAP_MODE_STREAMING)) {
4756 goto send_move_response;
4757 }
4758
4759 if (pi->amp_id == req->dest_amp_id) {
4760 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4761 goto send_move_response;
4762 }
4763
4764 if (req->dest_amp_id) {
4765 struct hci_dev *hdev;
4766 hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
4767 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4768 if (hdev)
4769 hci_dev_put(hdev);
4770
4771 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4772 goto send_move_response;
4773 }
4774 }
4775
4776 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4777 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4778 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4779 bacmp(conn->src, conn->dst) > 0) {
4780 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4781 goto send_move_response;
4782 }
4783
4784 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4785 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4786 goto send_move_response;
4787 }
4788
4789 pi->amp_move_cmd_ident = cmd->ident;
4790 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4791 l2cap_amp_move_setup(sk);
4792 pi->amp_move_id = req->dest_amp_id;
4793 icid = pi->dcid;
4794
4795 if (req->dest_amp_id == 0) {
4796 /* Moving to BR/EDR */
4797 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4798 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4799 result = L2CAP_MOVE_CHAN_PENDING;
4800 } else {
4801 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4802 result = L2CAP_MOVE_CHAN_SUCCESS;
4803 }
4804 } else {
4805 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4806 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4807 result = L2CAP_MOVE_CHAN_PENDING;
4808 }
4809
4810send_move_response:
4811 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4812
4813 if (sk)
4814 release_sock(sk);
4815
4816 return 0;
4817}
4818
4819static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4820 struct l2cap_cmd_hdr *cmd, u8 *data)
4821{
4822 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4823 struct sock *sk;
4824 struct l2cap_pinfo *pi;
4825 u16 icid, result;
4826
4827 icid = le16_to_cpu(rsp->icid);
4828 result = le16_to_cpu(rsp->result);
4829
4830 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4831
4832 switch (result) {
4833 case L2CAP_MOVE_CHAN_SUCCESS:
4834 case L2CAP_MOVE_CHAN_PENDING:
4835 read_lock(&conn->chan_list.lock);
4836 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4837 read_unlock(&conn->chan_list.lock);
4838
4839 if (!sk) {
4840 l2cap_send_move_chan_cfm(conn, NULL, icid,
4841 L2CAP_MOVE_CHAN_UNCONFIRMED);
4842 break;
4843 }
4844
4845 lock_sock(sk);
4846 pi = l2cap_pi(sk);
4847
4848 l2cap_sock_clear_timer(sk);
4849 if (result == L2CAP_MOVE_CHAN_PENDING)
4850 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4851
4852 if (pi->amp_move_state ==
4853 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4854 /* Move confirm will be sent when logical link
4855 * is complete.
4856 */
4857 pi->amp_move_state =
4858 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4859 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4860 pi->amp_move_state ==
4861 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4862 /* Logical link is up or moving to BR/EDR,
4863 * proceed with move */
4864 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4865 pi->amp_move_state =
4866 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4867 } else {
4868 pi->amp_move_state =
4869 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4870 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4871 L2CAP_MOVE_CHAN_CONFIRMED);
4872 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4873 }
4874 } else if (pi->amp_move_state ==
4875 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4876 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4877 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4878 struct hci_chan *chan;
4879 /* Moving to AMP */
4880 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4881 /* Remote is ready, send confirm immediately
4882 * after logical link is ready
4883 */
4884 pi->amp_move_state =
4885 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4886 } else {
4887 /* Both logical link and move success
4888 * are required to confirm
4889 */
4890 pi->amp_move_state =
4891 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4892 }
4893 pi->remote_fs = default_fs;
4894 pi->local_fs = default_fs;
4895 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4896 if (!chan) {
4897 /* Logical link not available */
4898 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4899 L2CAP_MOVE_CHAN_UNCONFIRMED);
4900 break;
4901 }
4902 if (chan->state == BT_CONNECTED) {
4903 /* Logical link is already ready to go */
4904 pi->ampchan = chan;
4905 pi->ampcon = chan->conn;
4906 pi->ampcon->l2cap_data = pi->conn;
4907 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4908 /* Can confirm now */
4909 l2cap_send_move_chan_cfm(conn, pi,
4910 pi->scid,
4911 L2CAP_MOVE_CHAN_CONFIRMED);
4912 } else {
4913 /* Now only need move success
4914 * required to confirm
4915 */
4916 pi->amp_move_state =
4917 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4918 }
4919 } else
4920 chan->l2cap_sk = sk;
4921 } else {
4922 /* Any other amp move state means the move failed. */
4923 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4924 L2CAP_MOVE_CHAN_UNCONFIRMED);
4925 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4926 }
4927 break;
4928 default:
4929 /* Failed (including collision case) */
4930 read_lock(&conn->chan_list.lock);
4931 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4932 read_unlock(&conn->chan_list.lock);
4933
4934 if (!sk) {
4935 /* Could not locate channel, icid is best guess */
4936 l2cap_send_move_chan_cfm(conn, NULL, icid,
4937 L2CAP_MOVE_CHAN_UNCONFIRMED);
4938 break;
4939 }
4940
4941 lock_sock(sk);
4942 pi = l2cap_pi(sk);
4943
4944 l2cap_sock_clear_timer(sk);
4945
4946 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
4947 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
4948 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4949 else {
4950 /* Cleanup - cancel move */
4951 pi->amp_move_id = pi->amp_id;
4952 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4953 l2cap_amp_move_revert(sk);
4954 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
4955 }
4956 } else {
4957 /* State is STABLE so the confirm response is
4958 * ignored.
4959 */
4960 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4961 }
4962
4963 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4964 L2CAP_MOVE_CHAN_UNCONFIRMED);
4965 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4966 break;
4967 }
4968
4969 if (sk)
4970 release_sock(sk);
4971
4972 return 0;
4973}
4974
4975static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4976 struct l2cap_cmd_hdr *cmd, u8 *data)
4977{
4978 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
4979 struct sock *sk;
4980 u16 icid, result;
4981
4982 icid = le16_to_cpu(cfm->icid);
4983 result = le16_to_cpu(cfm->result);
4984
4985 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4986
4987 read_lock(&conn->chan_list.lock);
4988 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4989 read_unlock(&conn->chan_list.lock);
4990
4991 if (!sk) {
4992 BT_DBG("Bad channel (%d)", (int) icid);
4993 goto send_move_confirm_response;
4994 }
4995
4996 lock_sock(sk);
4997
4998 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
4999 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5000 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
5001 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5002 if ((!l2cap_pi(sk)->amp_id) &&
5003 (l2cap_pi(sk)->ampchan)) {
5004 /* Have moved off of AMP, free the channel */
5005 hci_chan_put(l2cap_pi(sk)->ampchan);
5006 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5007 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5008 l2cap_pi(sk));
5009 l2cap_pi(sk)->ampchan = NULL;
5010 l2cap_pi(sk)->ampcon = NULL;
5011 }
5012 l2cap_amp_move_success(sk);
5013 } else {
5014 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5015 l2cap_amp_move_revert(sk);
5016 }
5017 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5018 } else if (l2cap_pi(sk)->amp_move_state ==
5019 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5020 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5021 }
5022
5023send_move_confirm_response:
5024 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5025
5026 if (sk)
5027 release_sock(sk);
5028
5029 return 0;
5030}
5031
5032static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5033 struct l2cap_cmd_hdr *cmd, u8 *data)
5034{
5035 struct l2cap_move_chan_cfm_rsp *rsp =
5036 (struct l2cap_move_chan_cfm_rsp *) data;
5037 struct sock *sk;
5038 u16 icid;
5039
5040 icid = le16_to_cpu(rsp->icid);
5041
5042 BT_DBG("icid %d", (int) icid);
5043
5044 read_lock(&conn->chan_list.lock);
5045 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5046 read_unlock(&conn->chan_list.lock);
5047
5048 if (!sk)
5049 return 0;
5050
5051 lock_sock(sk);
5052
5053 l2cap_sock_clear_timer(sk);
5054
5055 if (l2cap_pi(sk)->amp_move_state ==
5056 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5057 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5058 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5059
5060 if (!l2cap_pi(sk)->amp_id) {
5061 /* Have moved off of AMP, free the channel */
5062 l2cap_pi(sk)->ampcon = NULL;
5063 if (l2cap_pi(sk)->ampchan) {
5064 hci_chan_put(l2cap_pi(sk)->ampchan);
5065 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5066 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5067 l2cap_pi(sk));
5068 }
5069 l2cap_pi(sk)->ampchan = NULL;
5070 }
5071
5072 l2cap_amp_move_success(sk);
5073
5074 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5075 }
5076
5077 release_sock(sk);
5078
5079 return 0;
5080}
5081
5082static void l2cap_amp_signal_worker(struct work_struct *work)
5083{
5084 int err = 0;
5085 struct l2cap_amp_signal_work *ampwork =
5086 container_of(work, struct l2cap_amp_signal_work, work);
5087
5088 switch (ampwork->cmd.code) {
5089 case L2CAP_MOVE_CHAN_REQ:
5090 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5091 ampwork->data);
5092 break;
5093
5094 case L2CAP_MOVE_CHAN_RSP:
5095 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5096 ampwork->data);
5097 break;
5098
5099 case L2CAP_MOVE_CHAN_CFM:
5100 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5101 ampwork->data);
5102 break;
5103
5104 case L2CAP_MOVE_CHAN_CFM_RSP:
5105 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5106 &ampwork->cmd, ampwork->data);
5107 break;
5108
5109 default:
5110 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5111 err = -EINVAL;
5112 break;
5113 }
5114
5115 if (err) {
5116 struct l2cap_cmd_rej rej;
5117 BT_DBG("error %d", err);
5118
5119 /* In this context, commands are only rejected with
5120 * "command not understood", code 0.
5121 */
5122 rej.reason = cpu_to_le16(0);
5123 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5124 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5125 }
5126
5127 kfree_skb(ampwork->skb);
5128 kfree(ampwork);
5129}
5130
5131void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5132 struct sock *sk)
5133{
5134 struct l2cap_pinfo *pi;
5135
5136 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5137 (int) local_id, (int) remote_id, sk);
5138
5139 lock_sock(sk);
5140
5141 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5142 release_sock(sk);
5143 return;
5144 }
5145
5146 pi = l2cap_pi(sk);
5147
5148 if (sk->sk_state != BT_CONNECTED) {
5149 if (bt_sk(sk)->parent) {
5150 struct l2cap_conn_rsp rsp;
5151 char buf[128];
5152 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5153 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5154
5155 /* Incoming channel on AMP */
5156 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5157 /* Send successful response */
5158 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5159 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5160 } else {
5161 /* Send negative response */
5162 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5163 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5164 }
5165
5166 l2cap_send_cmd(pi->conn, pi->ident,
5167 L2CAP_CREATE_CHAN_RSP,
5168 sizeof(rsp), &rsp);
5169
5170 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5171 sk->sk_state = BT_CONFIG;
5172 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5173 l2cap_send_cmd(pi->conn,
5174 l2cap_get_ident(pi->conn),
5175 L2CAP_CONF_REQ,
5176 l2cap_build_conf_req(sk, buf), buf);
5177 l2cap_pi(sk)->num_conf_req++;
5178 }
5179 } else {
5180 /* Outgoing channel on AMP */
5181 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5182 /* Revert to BR/EDR connect */
5183 l2cap_send_conn_req(sk);
5184 } else {
5185 pi->amp_id = local_id;
5186 l2cap_send_create_chan_req(sk, remote_id);
5187 }
5188 }
5189 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5190 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5191 l2cap_amp_move_setup(sk);
5192 pi->amp_move_id = local_id;
5193 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5194
5195 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5196 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5197 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5198 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5199 struct hci_chan *chan;
5200 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5201 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5202 pi->remote_fs = default_fs;
5203 pi->local_fs = default_fs;
5204 chan = l2cap_chan_admit(local_id, pi);
5205 if (chan) {
5206 if (chan->state == BT_CONNECTED) {
5207 /* Logical link is ready to go */
5208 pi->ampchan = chan;
5209 pi->ampcon = chan->conn;
5210 pi->ampcon->l2cap_data = pi->conn;
5211 pi->amp_move_state =
5212 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5213 l2cap_send_move_chan_rsp(pi->conn,
5214 pi->amp_move_cmd_ident, pi->dcid,
5215 L2CAP_MOVE_CHAN_SUCCESS);
5216 } else {
5217 /* Wait for logical link to be ready */
5218 chan->l2cap_sk = sk;
5219 pi->amp_move_state =
5220 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5221 }
5222 } else {
5223 /* Logical link not available */
5224 l2cap_send_move_chan_rsp(pi->conn,
5225 pi->amp_move_cmd_ident, pi->dcid,
5226 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5227 }
5228 } else {
5229 BT_DBG("result %d, role %d, local_busy %d", result,
5230 (int) pi->amp_move_role,
5231 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5232
5233 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5234 if (result == -EINVAL)
5235 l2cap_send_move_chan_rsp(pi->conn,
5236 pi->amp_move_cmd_ident, pi->dcid,
5237 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5238 else
5239 l2cap_send_move_chan_rsp(pi->conn,
5240 pi->amp_move_cmd_ident, pi->dcid,
5241 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5242 }
5243
5244 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5245 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5246
5247 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5248 l2cap_rmem_available(sk))
5249 l2cap_ertm_tx(sk, 0, 0,
5250 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5251
5252 /* Restart data transmission */
5253 l2cap_ertm_send(sk);
5254 }
5255
5256 release_sock(sk);
5257}
5258
5259int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5260{
5261 struct l2cap_pinfo *pi;
5262 struct sock *sk;
5263
5264 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5265
5266 sk = chan->l2cap_sk;
5267
5268 BT_DBG("sk %p", sk);
5269
5270 lock_sock(sk);
5271
5272 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5273 release_sock(sk);
5274 return 0;
5275 }
5276
5277 pi = l2cap_pi(sk);
5278
5279 if ((!status) && (chan != NULL)) {
5280 pi->ampchan = chan;
5281 pi->ampcon = chan->conn;
5282 pi->ampcon->l2cap_data = pi->conn;
5283
5284 if (sk->sk_state != BT_CONNECTED) {
5285 struct l2cap_conf_rsp rsp;
5286
5287 /* Must use spinlock to prevent concurrent
5288 * execution of l2cap_config_rsp()
5289 */
5290 bh_lock_sock(sk);
5291 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5292 l2cap_build_conf_rsp(sk, &rsp,
5293 L2CAP_CONF_SUCCESS, 0), &rsp);
5294 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5295
5296 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5297 set_default_fcs(l2cap_pi(sk));
5298
5299 sk->sk_state = BT_CONNECTED;
5300
5301 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5302 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5303 l2cap_ertm_init(sk);
5304
5305 l2cap_chan_ready(sk);
5306 }
5307 bh_unlock_sock(sk);
5308 } else if (pi->amp_move_state ==
5309 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5310 /* Move confirm will be sent after a success
5311 * response is received
5312 */
5313 pi->amp_move_state =
5314 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5315 } else if (pi->amp_move_state ==
5316 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5317 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5318 pi->amp_move_state =
5319 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5320 else if (pi->amp_move_role ==
5321 L2CAP_AMP_MOVE_INITIATOR) {
5322 pi->amp_move_state =
5323 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5324 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5325 L2CAP_MOVE_CHAN_SUCCESS);
5326 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5327 } else if (pi->amp_move_role ==
5328 L2CAP_AMP_MOVE_RESPONDER) {
5329 pi->amp_move_state =
5330 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5331 l2cap_send_move_chan_rsp(pi->conn,
5332 pi->amp_move_cmd_ident, pi->dcid,
5333 L2CAP_MOVE_CHAN_SUCCESS);
5334 }
5335 } else {
5336 /* Move was not in expected state, free the
5337 * logical link
5338 */
5339 hci_chan_put(pi->ampchan);
5340 pi->ampcon = NULL;
5341 pi->ampchan = NULL;
5342 }
5343 } else {
5344 /* Logical link setup failed. */
5345
5346 if (sk->sk_state != BT_CONNECTED)
5347 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5348 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5349 l2cap_amp_move_revert(sk);
5350 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5351 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5352 l2cap_send_move_chan_rsp(pi->conn,
5353 pi->amp_move_cmd_ident, pi->dcid,
5354 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5355 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5356 if ((pi->amp_move_state ==
5357 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5358 (pi->amp_move_state ==
5359 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5360 /* Remote has only sent pending or
5361 * success responses, clean up
5362 */
5363 l2cap_amp_move_revert(sk);
5364 l2cap_pi(sk)->amp_move_role =
5365 L2CAP_AMP_MOVE_NONE;
5366 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5367 }
5368
5369 /* Other amp move states imply that the move
5370 * has already aborted
5371 */
5372 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5373 L2CAP_MOVE_CHAN_UNCONFIRMED);
5374 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5375 }
5376
5377 pi->ampcon = NULL;
5378 pi->ampchan = NULL;
5379 }
5380
5381 release_sock(sk);
5382 return 0;
5383}
5384
5385static void l2cap_logical_link_worker(struct work_struct *work)
5386{
5387 struct l2cap_logical_link_work *log_link_work =
5388 container_of(work, struct l2cap_logical_link_work, work);
5389
5390 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5391 kfree(log_link_work);
5392}
5393
5394static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5395{
5396 struct l2cap_logical_link_work *amp_work;
5397
5398 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5399 if (!amp_work)
5400 return -ENOMEM;
5401
5402 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5403 amp_work->chan = chan;
5404 amp_work->status = status;
5405 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5406 kfree(amp_work);
5407 return -ENOMEM;
5408 }
5409
5410 return 0;
5411}
5412
5413int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5414{
5415 struct l2cap_conn *conn = chan->conn->l2cap_data;
5416
5417 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5418
5419 /* TODO: if failed status restore previous fs */
5420 return 0;
5421}
5422
5423int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5424{
5425 struct l2cap_chan_list *l;
5426 struct l2cap_conn *conn = chan->conn->l2cap_data;
5427 struct sock *sk;
5428
5429 BT_DBG("chan %p conn %p", chan, conn);
5430
5431 if (!conn)
5432 return 0;
5433
5434 l = &conn->chan_list;
5435
5436 read_lock(&l->lock);
5437
5438 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5439 bh_lock_sock(sk);
5440 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5441 if (l2cap_pi(sk)->ampchan == chan) {
5442 l2cap_pi(sk)->ampchan = NULL;
5443 l2cap_amp_move_init(sk);
5444 }
5445 bh_unlock_sock(sk);
5446 }
5447
5448 read_unlock(&l->lock);
5449
5450 return 0;
5451
5452
5453}
5454
5455static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5456 u8 *data, struct sk_buff *skb)
5457{
5458 struct l2cap_amp_signal_work *amp_work;
5459
5460 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5461 if (!amp_work)
5462 return -ENOMEM;
5463
5464 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5465 amp_work->conn = conn;
5466 amp_work->cmd = *cmd;
5467 amp_work->data = data;
5468 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5469 if (!amp_work->skb) {
5470 kfree(amp_work);
5471 return -ENOMEM;
5472 }
5473
5474 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5475 kfree_skb(amp_work->skb);
5476 kfree(amp_work);
5477 return -ENOMEM;
5478 }
5479
5480 return 0;
5481}
5482
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005483static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005484 u16 to_multiplier)
5485{
5486 u16 max_latency;
5487
5488 if (min > max || min < 6 || max > 3200)
5489 return -EINVAL;
5490
5491 if (to_multiplier < 10 || to_multiplier > 3200)
5492 return -EINVAL;
5493
5494 if (max >= to_multiplier * 8)
5495 return -EINVAL;
5496
5497 max_latency = (to_multiplier * 8 / max) - 1;
5498 if (latency > 499 || latency > max_latency)
5499 return -EINVAL;
5500
5501 return 0;
5502}
5503
5504static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5505 struct l2cap_cmd_hdr *cmd, u8 *data)
5506{
5507 struct hci_conn *hcon = conn->hcon;
5508 struct l2cap_conn_param_update_req *req;
5509 struct l2cap_conn_param_update_rsp rsp;
5510 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005511 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005512
5513 if (!(hcon->link_mode & HCI_LM_MASTER))
5514 return -EINVAL;
5515
5516 cmd_len = __le16_to_cpu(cmd->len);
5517 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5518 return -EPROTO;
5519
5520 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005521 min = __le16_to_cpu(req->min);
5522 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005523 latency = __le16_to_cpu(req->latency);
5524 to_multiplier = __le16_to_cpu(req->to_multiplier);
5525
5526 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5527 min, max, latency, to_multiplier);
5528
5529 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005530
5531 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5532 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005533 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5534 else
5535 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5536
5537 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5538 sizeof(rsp), &rsp);
5539
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005540 if (!err)
5541 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5542
Claudio Takahaside731152011-02-11 19:28:55 -02005543 return 0;
5544}
5545
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005546static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005547 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5548 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005549{
5550 int err = 0;
5551
5552 switch (cmd->code) {
5553 case L2CAP_COMMAND_REJ:
5554 l2cap_command_rej(conn, cmd, data);
5555 break;
5556
5557 case L2CAP_CONN_REQ:
5558 err = l2cap_connect_req(conn, cmd, data);
5559 break;
5560
5561 case L2CAP_CONN_RSP:
5562 err = l2cap_connect_rsp(conn, cmd, data);
5563 break;
5564
5565 case L2CAP_CONF_REQ:
5566 err = l2cap_config_req(conn, cmd, cmd_len, data);
5567 break;
5568
5569 case L2CAP_CONF_RSP:
5570 err = l2cap_config_rsp(conn, cmd, data);
5571 break;
5572
5573 case L2CAP_DISCONN_REQ:
5574 err = l2cap_disconnect_req(conn, cmd, data);
5575 break;
5576
5577 case L2CAP_DISCONN_RSP:
5578 err = l2cap_disconnect_rsp(conn, cmd, data);
5579 break;
5580
5581 case L2CAP_ECHO_REQ:
5582 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5583 break;
5584
5585 case L2CAP_ECHO_RSP:
5586 break;
5587
5588 case L2CAP_INFO_REQ:
5589 err = l2cap_information_req(conn, cmd, data);
5590 break;
5591
5592 case L2CAP_INFO_RSP:
5593 err = l2cap_information_rsp(conn, cmd, data);
5594 break;
5595
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005596 case L2CAP_CREATE_CHAN_REQ:
5597 err = l2cap_create_channel_req(conn, cmd, data);
5598 break;
5599
5600 case L2CAP_CREATE_CHAN_RSP:
5601 err = l2cap_create_channel_rsp(conn, cmd, data);
5602 break;
5603
5604 case L2CAP_MOVE_CHAN_REQ:
5605 case L2CAP_MOVE_CHAN_RSP:
5606 case L2CAP_MOVE_CHAN_CFM:
5607 case L2CAP_MOVE_CHAN_CFM_RSP:
5608 err = l2cap_sig_amp(conn, cmd, data, skb);
5609 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005610 default:
5611 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5612 err = -EINVAL;
5613 break;
5614 }
5615
5616 return err;
5617}
5618
5619static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5620 struct l2cap_cmd_hdr *cmd, u8 *data)
5621{
5622 switch (cmd->code) {
5623 case L2CAP_COMMAND_REJ:
5624 return 0;
5625
5626 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005627 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005628
5629 case L2CAP_CONN_PARAM_UPDATE_RSP:
5630 return 0;
5631
5632 default:
5633 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5634 return -EINVAL;
5635 }
5636}
5637
5638static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5639 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640{
5641 u8 *data = skb->data;
5642 int len = skb->len;
5643 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005644 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645
5646 l2cap_raw_recv(conn, skb);
5647
5648 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005649 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5651 data += L2CAP_CMD_HDR_SIZE;
5652 len -= L2CAP_CMD_HDR_SIZE;
5653
Al Viro88219a02007-07-29 00:17:25 -07005654 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005655
Al Viro88219a02007-07-29 00:17:25 -07005656 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005657
Al Viro88219a02007-07-29 00:17:25 -07005658 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005659 BT_DBG("corrupted command");
5660 break;
5661 }
5662
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005663 if (conn->hcon->type == LE_LINK)
5664 err = l2cap_le_sig_cmd(conn, &cmd, data);
5665 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005666 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5667 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
5669 if (err) {
5670 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005671
5672 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673
5674 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005675 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5677 }
5678
Al Viro88219a02007-07-29 00:17:25 -07005679 data += cmd_len;
5680 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005681 }
5682
5683 kfree_skb(skb);
5684}
5685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005686static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005687{
5688 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005689 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005691 if (pi->extended_control)
5692 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5693 else
5694 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5695
5696 if (pi->fcs == L2CAP_FCS_CRC16) {
5697 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005698 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5699 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005701 if (our_fcs != rcv_fcs) {
5702 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005703 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005704 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005705 }
5706 return 0;
5707}
5708
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005709static void l2cap_ertm_pass_to_tx(struct sock *sk,
5710 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005711{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005712 BT_DBG("sk %p, control %p", sk, control);
5713 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005714}
5715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005716static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5717 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005718{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005719 BT_DBG("sk %p, control %p", sk, control);
5720 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5721}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005723static void l2cap_ertm_resend(struct sock *sk)
5724{
5725 struct bt_l2cap_control control;
5726 struct l2cap_pinfo *pi;
5727 struct sk_buff *skb;
5728 struct sk_buff *tx_skb;
5729 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005730
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005731 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005733 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005734
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005735 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5736 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005738 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5739 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5740 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005742 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5743 seq = l2cap_seq_list_pop(&pi->retrans_list);
5744
5745 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5746 if (!skb) {
5747 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5748 (int) seq);
5749 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005750 }
5751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005752 bt_cb(skb)->retries += 1;
5753 control = bt_cb(skb)->control;
5754
5755 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5756 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5757 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5758 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005759 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005760 }
5761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005762 control.reqseq = pi->buffer_seq;
5763 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5764 control.final = 1;
5765 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5766 } else {
5767 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005768 }
5769
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005770 if (skb_cloned(skb)) {
5771 /* Cloned sk_buffs are read-only, so we need a
5772 * writeable copy
5773 */
5774 tx_skb = skb_copy(skb, GFP_ATOMIC);
5775 } else {
5776 tx_skb = skb_clone(skb, GFP_ATOMIC);
5777 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005779 /* Update skb contents */
5780 if (pi->extended_control) {
5781 put_unaligned_le32(__pack_extended_control(&control),
5782 tx_skb->data + L2CAP_HDR_SIZE);
5783 } else {
5784 put_unaligned_le16(__pack_enhanced_control(&control),
5785 tx_skb->data + L2CAP_HDR_SIZE);
5786 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005788 if (pi->fcs == L2CAP_FCS_CRC16)
5789 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005790
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005791 tx_skb->sk = sk;
5792 tx_skb->destructor = l2cap_skb_destructor;
5793 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005795 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005796
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005797 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005799 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005800 }
5801}
5802
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005803static inline void l2cap_ertm_retransmit(struct sock *sk,
5804 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005806 BT_DBG("sk %p, control %p", sk, control);
5807
5808 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5809 l2cap_ertm_resend(sk);
5810}
5811
5812static void l2cap_ertm_retransmit_all(struct sock *sk,
5813 struct bt_l2cap_control *control)
5814{
5815 struct l2cap_pinfo *pi;
5816 struct sk_buff *skb;
5817
5818 BT_DBG("sk %p, control %p", sk, control);
5819
5820 pi = l2cap_pi(sk);
5821
5822 if (control->poll)
5823 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5824
5825 l2cap_seq_list_clear(&pi->retrans_list);
5826
5827 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5828 return;
5829
5830 if (pi->unacked_frames) {
5831 skb_queue_walk(TX_QUEUE(sk), skb) {
5832 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5833 skb == sk->sk_send_head)
5834 break;
5835 }
5836
5837 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5838 if (skb == sk->sk_send_head)
5839 break;
5840
5841 l2cap_seq_list_append(&pi->retrans_list,
5842 bt_cb(skb)->control.txseq);
5843 }
5844
5845 l2cap_ertm_resend(sk);
5846 }
5847}
5848
5849static inline void append_skb_frag(struct sk_buff *skb,
5850 struct sk_buff *new_frag, struct sk_buff **last_frag)
5851{
5852 /* skb->len reflects data in skb as well as all fragments
5853 skb->data_len reflects only data in fragments
5854 */
5855 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5856
5857 if (!skb_has_frag_list(skb))
5858 skb_shinfo(skb)->frag_list = new_frag;
5859
5860 new_frag->next = NULL;
5861
5862 (*last_frag)->next = new_frag;
5863 *last_frag = new_frag;
5864
5865 skb->len += new_frag->len;
5866 skb->data_len += new_frag->len;
5867 skb->truesize += new_frag->truesize;
5868}
5869
5870static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5871 struct bt_l2cap_control *control, struct sk_buff *skb)
5872{
5873 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005874 int err = -EINVAL;
5875
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005876 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5877 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005879 if (!control)
5880 return err;
5881
5882 pi = l2cap_pi(sk);
5883
5884 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5885 control->frame_type, control->sar, control->txseq,
5886 control->reqseq, control->final);
5887
5888 switch (control->sar) {
5889 case L2CAP_SAR_UNSEGMENTED:
5890 if (pi->sdu) {
5891 BT_DBG("Unexpected unsegmented PDU during reassembly");
5892 kfree_skb(pi->sdu);
5893 pi->sdu = NULL;
5894 pi->sdu_last_frag = NULL;
5895 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005896 }
5897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005898 BT_DBG("Unsegmented");
5899 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005900 break;
5901
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005902 case L2CAP_SAR_START:
5903 if (pi->sdu) {
5904 BT_DBG("Unexpected start PDU during reassembly");
5905 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005906 }
5907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005908 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005909 skb_pull(skb, 2);
5910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005911 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005912 err = -EMSGSIZE;
5913 break;
5914 }
5915
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005916 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005917 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005918
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005919 pi->sdu = skb;
5920 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005922 BT_DBG("Start");
5923
5924 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005925 err = 0;
5926 break;
5927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005928 case L2CAP_SAR_CONTINUE:
5929 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005930 break;
5931
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005932 append_skb_frag(pi->sdu, skb,
5933 &pi->sdu_last_frag);
5934 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005936 if (pi->sdu->len >= pi->sdu_len)
5937 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005939 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5940
5941 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005942 break;
5943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005944 case L2CAP_SAR_END:
5945 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005946 break;
5947
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005948 append_skb_frag(pi->sdu, skb,
5949 &pi->sdu_last_frag);
5950 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005952 if (pi->sdu->len != pi->sdu_len)
5953 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005954
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005955 BT_DBG("End, reassembled %d", pi->sdu->len);
5956 /* If the sender used tiny PDUs, the rcv queuing could fail.
5957 * Applications that have issues here should use a larger
5958 * sk_rcvbuf.
5959 */
5960 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03005961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005962 if (!err) {
5963 /* Reassembly complete */
5964 pi->sdu = NULL;
5965 pi->sdu_last_frag = NULL;
5966 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005967 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005968 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005969
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005970 default:
5971 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005972 break;
5973 }
5974
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005975 if (err) {
5976 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
5977 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
5978 if (pi->sdu) {
5979 kfree_skb(pi->sdu);
5980 pi->sdu = NULL;
5981 }
5982 pi->sdu_last_frag = NULL;
5983 pi->sdu_len = 0;
5984 if (skb)
5985 kfree_skb(skb);
5986 }
5987
5988 /* Update local busy state */
5989 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
5990 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
5991
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005992 return err;
5993}
5994
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005995static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005996{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005997 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005998 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
5999 * until a gap is encountered.
6000 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006002 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006004 BT_DBG("sk %p", sk);
6005 pi = l2cap_pi(sk);
6006
6007 while (l2cap_rmem_available(sk)) {
6008 struct sk_buff *skb;
6009 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6010 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6011
6012 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6013
6014 if (!skb)
6015 break;
6016
6017 skb_unlink(skb, SREJ_QUEUE(sk));
6018 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6019 err = l2cap_ertm_rx_expected_iframe(sk,
6020 &bt_cb(skb)->control, skb);
6021 if (err)
6022 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006023 }
6024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006025 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6026 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6027 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006028 }
6029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006030 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006031}
6032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006033static void l2cap_ertm_handle_srej(struct sock *sk,
6034 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006035{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006036 struct l2cap_pinfo *pi;
6037 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006038
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006039 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006041 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006043 if (control->reqseq == pi->next_tx_seq) {
6044 BT_DBG("Invalid reqseq %d, disconnecting",
6045 (int) control->reqseq);
6046 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006047 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006048 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006050 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006052 if (skb == NULL) {
6053 BT_DBG("Seq %d not available for retransmission",
6054 (int) control->reqseq);
6055 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006056 }
6057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006058 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6059 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6060 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6061 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006062 }
6063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006064 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006066 if (control->poll) {
6067 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006068
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006069 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6070 l2cap_ertm_retransmit(sk, control);
6071 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006073 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6074 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6075 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006076 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006077 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006078 l2cap_ertm_pass_to_tx_fbit(sk, control);
6079
6080 if (control->final) {
6081 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6082 (pi->srej_save_reqseq == control->reqseq)) {
6083 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6084 } else {
6085 l2cap_ertm_retransmit(sk, control);
6086 }
6087 } else {
6088 l2cap_ertm_retransmit(sk, control);
6089 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6090 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6091 pi->srej_save_reqseq = control->reqseq;
6092 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006093 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006094 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006095}
6096
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006097static void l2cap_ertm_handle_rej(struct sock *sk,
6098 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006099{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006100 struct l2cap_pinfo *pi;
6101 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006103 BT_DBG("sk %p, control %p", sk, control);
6104
6105 pi = l2cap_pi(sk);
6106
6107 if (control->reqseq == pi->next_tx_seq) {
6108 BT_DBG("Invalid reqseq %d, disconnecting",
6109 (int) control->reqseq);
6110 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6111 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006112 }
6113
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006114 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006115
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006116 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6117 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6118 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6119 return;
6120 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006121
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006122 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6123
6124 l2cap_ertm_pass_to_tx(sk, control);
6125
6126 if (control->final) {
6127 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6128 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6129 else
6130 l2cap_ertm_retransmit_all(sk, control);
6131 } else {
6132 l2cap_ertm_retransmit_all(sk, control);
6133 l2cap_ertm_send(sk);
6134 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6135 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6136 }
6137}
6138
6139static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6140{
6141 struct l2cap_pinfo *pi;
6142
6143 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6144 pi = l2cap_pi(sk);
6145
6146 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6147 (int)pi->expected_tx_seq);
6148
6149 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6150 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6151 /* See notes below regarding "double poll" and
6152 * invalid packets.
6153 */
6154 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6155 BT_DBG("Invalid/Ignore - txseq outside "
6156 "tx window after SREJ sent");
6157 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6158 } else {
6159 BT_DBG("Invalid - bad txseq within tx "
6160 "window after SREJ sent");
6161 return L2CAP_ERTM_TXSEQ_INVALID;
6162 }
6163 }
6164
6165 if (pi->srej_list.head == txseq) {
6166 BT_DBG("Expected SREJ");
6167 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6168 }
6169
6170 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6171 BT_DBG("Duplicate SREJ - txseq already stored");
6172 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6173 }
6174
6175 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6176 BT_DBG("Unexpected SREJ - txseq not requested "
6177 "with SREJ");
6178 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6179 }
6180 }
6181
6182 if (pi->expected_tx_seq == txseq) {
6183 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6184 BT_DBG("Invalid - txseq outside tx window");
6185 return L2CAP_ERTM_TXSEQ_INVALID;
6186 } else {
6187 BT_DBG("Expected");
6188 return L2CAP_ERTM_TXSEQ_EXPECTED;
6189 }
6190 }
6191
6192 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6193 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6194 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6195 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6196 }
6197
6198 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6199 /* A source of invalid packets is a "double poll" condition,
6200 * where delays cause us to send multiple poll packets. If
6201 * the remote stack receives and processes both polls,
6202 * sequence numbers can wrap around in such a way that a
6203 * resent frame has a sequence number that looks like new data
6204 * with a sequence gap. This would trigger an erroneous SREJ
6205 * request.
6206 *
6207 * Fortunately, this is impossible with a tx window that's
6208 * less than half of the maximum sequence number, which allows
6209 * invalid frames to be safely ignored.
6210 *
6211 * With tx window sizes greater than half of the tx window
6212 * maximum, the frame is invalid and cannot be ignored. This
6213 * causes a disconnect.
6214 */
6215
6216 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6217 BT_DBG("Invalid/Ignore - txseq outside tx window");
6218 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6219 } else {
6220 BT_DBG("Invalid - txseq outside tx window");
6221 return L2CAP_ERTM_TXSEQ_INVALID;
6222 }
6223 } else {
6224 BT_DBG("Unexpected - txseq indicates missing frames");
6225 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6226 }
6227}
6228
6229static int l2cap_ertm_rx_state_recv(struct sock *sk,
6230 struct bt_l2cap_control *control,
6231 struct sk_buff *skb, u8 event)
6232{
6233 struct l2cap_pinfo *pi;
6234 int err = 0;
6235 bool skb_in_use = 0;
6236
6237 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6238 (int)event);
6239 pi = l2cap_pi(sk);
6240
6241 switch (event) {
6242 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6243 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6244 case L2CAP_ERTM_TXSEQ_EXPECTED:
6245 l2cap_ertm_pass_to_tx(sk, control);
6246
6247 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6248 BT_DBG("Busy, discarding expected seq %d",
6249 control->txseq);
6250 break;
6251 }
6252
6253 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6254 pi->buffer_seq = pi->expected_tx_seq;
6255 skb_in_use = 1;
6256
6257 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6258 if (err)
6259 break;
6260
6261 if (control->final) {
6262 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6263 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6264 else {
6265 control->final = 0;
6266 l2cap_ertm_retransmit_all(sk, control);
6267 l2cap_ertm_send(sk);
6268 }
6269 }
6270
6271 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6272 l2cap_ertm_send_ack(sk);
6273 break;
6274 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6275 l2cap_ertm_pass_to_tx(sk, control);
6276
6277 /* Can't issue SREJ frames in the local busy state.
6278 * Drop this frame, it will be seen as missing
6279 * when local busy is exited.
6280 */
6281 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6282 BT_DBG("Busy, discarding unexpected seq %d",
6283 control->txseq);
6284 break;
6285 }
6286
6287 /* There was a gap in the sequence, so an SREJ
6288 * must be sent for each missing frame. The
6289 * current frame is stored for later use.
6290 */
6291 skb_queue_tail(SREJ_QUEUE(sk), skb);
6292 skb_in_use = 1;
6293 BT_DBG("Queued %p (queue len %d)", skb,
6294 skb_queue_len(SREJ_QUEUE(sk)));
6295
6296 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6297 l2cap_seq_list_clear(&pi->srej_list);
6298 l2cap_ertm_send_srej(sk, control->txseq);
6299
6300 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6301 break;
6302 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6303 l2cap_ertm_pass_to_tx(sk, control);
6304 break;
6305 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6306 break;
6307 case L2CAP_ERTM_TXSEQ_INVALID:
6308 default:
6309 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6310 ECONNRESET);
6311 break;
6312 }
6313 break;
6314 case L2CAP_ERTM_EVENT_RECV_RR:
6315 l2cap_ertm_pass_to_tx(sk, control);
6316 if (control->final) {
6317 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6318
6319 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6320 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6321 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6322 pi->amp_move_state ==
6323 L2CAP_AMP_STATE_WAIT_PREPARE) {
6324 control->final = 0;
6325 l2cap_ertm_retransmit_all(sk, control);
6326 }
6327
6328 l2cap_ertm_send(sk);
6329 } else if (control->poll) {
6330 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6331 } else {
6332 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6333 pi->unacked_frames)
6334 l2cap_ertm_start_retrans_timer(pi);
6335 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6336 l2cap_ertm_send(sk);
6337 }
6338 break;
6339 case L2CAP_ERTM_EVENT_RECV_RNR:
6340 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6341 l2cap_ertm_pass_to_tx(sk, control);
6342 if (control && control->poll) {
6343 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6344 l2cap_ertm_send_rr_or_rnr(sk, 0);
6345 }
6346 l2cap_ertm_stop_retrans_timer(pi);
6347 l2cap_seq_list_clear(&pi->retrans_list);
6348 break;
6349 case L2CAP_ERTM_EVENT_RECV_REJ:
6350 l2cap_ertm_handle_rej(sk, control);
6351 break;
6352 case L2CAP_ERTM_EVENT_RECV_SREJ:
6353 l2cap_ertm_handle_srej(sk, control);
6354 break;
6355 default:
6356 break;
6357 }
6358
6359 if (skb && !skb_in_use) {
6360 BT_DBG("Freeing %p", skb);
6361 kfree_skb(skb);
6362 }
6363
6364 return err;
6365}
6366
6367static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6368 struct bt_l2cap_control *control,
6369 struct sk_buff *skb, u8 event)
6370{
6371 struct l2cap_pinfo *pi;
6372 int err = 0;
6373 u16 txseq = control->txseq;
6374 bool skb_in_use = 0;
6375
6376 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6377 (int)event);
6378 pi = l2cap_pi(sk);
6379
6380 switch (event) {
6381 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6382 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6383 case L2CAP_ERTM_TXSEQ_EXPECTED:
6384 /* Keep frame for reassembly later */
6385 l2cap_ertm_pass_to_tx(sk, control);
6386 skb_queue_tail(SREJ_QUEUE(sk), skb);
6387 skb_in_use = 1;
6388 BT_DBG("Queued %p (queue len %d)", skb,
6389 skb_queue_len(SREJ_QUEUE(sk)));
6390
6391 pi->expected_tx_seq = __next_seq(txseq, pi);
6392 break;
6393 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6394 l2cap_seq_list_pop(&pi->srej_list);
6395
6396 l2cap_ertm_pass_to_tx(sk, control);
6397 skb_queue_tail(SREJ_QUEUE(sk), skb);
6398 skb_in_use = 1;
6399 BT_DBG("Queued %p (queue len %d)", skb,
6400 skb_queue_len(SREJ_QUEUE(sk)));
6401
6402 err = l2cap_ertm_rx_queued_iframes(sk);
6403 if (err)
6404 break;
6405
6406 break;
6407 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6408 /* Got a frame that can't be reassembled yet.
6409 * Save it for later, and send SREJs to cover
6410 * the missing frames.
6411 */
6412 skb_queue_tail(SREJ_QUEUE(sk), skb);
6413 skb_in_use = 1;
6414 BT_DBG("Queued %p (queue len %d)", skb,
6415 skb_queue_len(SREJ_QUEUE(sk)));
6416
6417 l2cap_ertm_pass_to_tx(sk, control);
6418 l2cap_ertm_send_srej(sk, control->txseq);
6419 break;
6420 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6421 /* This frame was requested with an SREJ, but
6422 * some expected retransmitted frames are
6423 * missing. Request retransmission of missing
6424 * SREJ'd frames.
6425 */
6426 skb_queue_tail(SREJ_QUEUE(sk), skb);
6427 skb_in_use = 1;
6428 BT_DBG("Queued %p (queue len %d)", skb,
6429 skb_queue_len(SREJ_QUEUE(sk)));
6430
6431 l2cap_ertm_pass_to_tx(sk, control);
6432 l2cap_ertm_send_srej_list(sk, control->txseq);
6433 break;
6434 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6435 /* We've already queued this frame. Drop this copy. */
6436 l2cap_ertm_pass_to_tx(sk, control);
6437 break;
6438 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6439 /* Expecting a later sequence number, so this frame
6440 * was already received. Ignore it completely.
6441 */
6442 break;
6443 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6444 break;
6445 case L2CAP_ERTM_TXSEQ_INVALID:
6446 default:
6447 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6448 ECONNRESET);
6449 break;
6450 }
6451 break;
6452 case L2CAP_ERTM_EVENT_RECV_RR:
6453 l2cap_ertm_pass_to_tx(sk, control);
6454 if (control->final) {
6455 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6456
6457 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6458 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6459 else {
6460 control->final = 0;
6461 l2cap_ertm_retransmit_all(sk, control);
6462 }
6463
6464 l2cap_ertm_send(sk);
6465 } else if (control->poll) {
6466 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6467 pi->unacked_frames) {
6468 l2cap_ertm_start_retrans_timer(pi);
6469 }
6470 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6471 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6472 l2cap_ertm_send_srej_tail(sk);
6473 } else {
6474 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6475 pi->unacked_frames) {
6476 l2cap_ertm_start_retrans_timer(pi);
6477 }
6478 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6479 l2cap_ertm_send_ack(sk);
6480 }
6481 break;
6482 case L2CAP_ERTM_EVENT_RECV_RNR:
6483 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6484 l2cap_ertm_pass_to_tx(sk, control);
6485 if (control->poll)
6486 l2cap_ertm_send_srej_tail(sk);
6487 else {
6488 struct bt_l2cap_control rr_control;
6489 memset(&rr_control, 0, sizeof(rr_control));
6490 rr_control.frame_type = 's';
6491 rr_control.super = L2CAP_SFRAME_RR;
6492 rr_control.reqseq = pi->buffer_seq;
6493 l2cap_ertm_send_sframe(sk, &rr_control);
6494 }
6495
6496 break;
6497 case L2CAP_ERTM_EVENT_RECV_REJ:
6498 l2cap_ertm_handle_rej(sk, control);
6499 break;
6500 case L2CAP_ERTM_EVENT_RECV_SREJ:
6501 l2cap_ertm_handle_srej(sk, control);
6502 break;
6503 }
6504
6505 if (skb && !skb_in_use) {
6506 BT_DBG("Freeing %p", skb);
6507 kfree_skb(skb);
6508 }
6509
6510 return err;
6511}
6512
6513static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6514 struct bt_l2cap_control *control,
6515 struct sk_buff *skb, u8 event)
6516{
6517 struct l2cap_pinfo *pi;
6518 int err = 0;
6519 bool skb_in_use = 0;
6520
6521 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6522 (int)event);
6523 pi = l2cap_pi(sk);
6524
6525 /* Only handle expected frames, to avoid state changes. */
6526
6527 switch (event) {
6528 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6529 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6530 L2CAP_ERTM_TXSEQ_EXPECTED) {
6531 l2cap_ertm_pass_to_tx(sk, control);
6532
6533 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6534 BT_DBG("Busy, discarding expected seq %d",
6535 control->txseq);
6536 break;
6537 }
6538
6539 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6540 pi->buffer_seq = pi->expected_tx_seq;
6541 skb_in_use = 1;
6542
6543 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6544 if (err)
6545 break;
6546
6547 if (control->final) {
6548 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6549 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6550 else
6551 control->final = 0;
6552 }
6553 }
6554 break;
6555 case L2CAP_ERTM_EVENT_RECV_RR:
6556 case L2CAP_ERTM_EVENT_RECV_RNR:
6557 case L2CAP_ERTM_EVENT_RECV_REJ:
6558 l2cap_ertm_process_reqseq(sk, control->reqseq);
6559 break;
6560 case L2CAP_ERTM_EVENT_RECV_SREJ:
6561 /* Ignore */
6562 break;
6563 default:
6564 break;
6565 }
6566
6567 if (skb && !skb_in_use) {
6568 BT_DBG("Freeing %p", skb);
6569 kfree_skb(skb);
6570 }
6571
6572 return err;
6573}
6574
6575static int l2cap_answer_move_poll(struct sock *sk)
6576{
6577 struct l2cap_pinfo *pi;
6578 struct bt_l2cap_control control;
6579 int err = 0;
6580
6581 BT_DBG("sk %p", sk);
6582
6583 pi = l2cap_pi(sk);
6584
6585 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6586
6587 if (!skb_queue_empty(TX_QUEUE(sk)))
6588 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6589 else
6590 sk->sk_send_head = NULL;
6591
6592 /* Rewind next_tx_seq to the point expected
6593 * by the receiver.
6594 */
6595 pi->next_tx_seq = pi->amp_move_reqseq;
6596 pi->unacked_frames = 0;
6597
6598 err = l2cap_finish_amp_move(sk);
6599
6600 if (err)
6601 return err;
6602
6603 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6604 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6605
6606 memset(&control, 0, sizeof(control));
6607 control.reqseq = pi->amp_move_reqseq;
6608
6609 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6610 err = -EPROTO;
6611 else
6612 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6613 pi->amp_move_event);
6614
6615 return err;
6616}
6617
6618static void l2cap_amp_move_setup(struct sock *sk)
6619{
6620 struct l2cap_pinfo *pi;
6621 struct sk_buff *skb;
6622
6623 BT_DBG("sk %p", sk);
6624
6625 pi = l2cap_pi(sk);
6626
6627 l2cap_ertm_stop_ack_timer(pi);
6628 l2cap_ertm_stop_retrans_timer(pi);
6629 l2cap_ertm_stop_monitor_timer(pi);
6630
6631 pi->retry_count = 0;
6632 skb_queue_walk(TX_QUEUE(sk), skb) {
6633 if (bt_cb(skb)->retries)
6634 bt_cb(skb)->retries = 1;
6635 else
6636 break;
6637 }
6638
6639 pi->expected_tx_seq = pi->buffer_seq;
6640
6641 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6642 l2cap_seq_list_clear(&pi->retrans_list);
6643 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6644 skb_queue_purge(SREJ_QUEUE(sk));
6645
6646 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6647 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6648
6649 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6650 pi->rx_state);
6651
6652 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6653}
6654
6655static void l2cap_amp_move_revert(struct sock *sk)
6656{
6657 struct l2cap_pinfo *pi;
6658
6659 BT_DBG("sk %p", sk);
6660
6661 pi = l2cap_pi(sk);
6662
6663 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6664 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6665 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6666 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6667 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6668}
6669
6670static int l2cap_amp_move_reconf(struct sock *sk)
6671{
6672 struct l2cap_pinfo *pi;
6673 u8 buf[64];
6674 int err = 0;
6675
6676 BT_DBG("sk %p", sk);
6677
6678 pi = l2cap_pi(sk);
6679
6680 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6681 l2cap_build_amp_reconf_req(sk, buf), buf);
6682 return err;
6683}
6684
6685static void l2cap_amp_move_success(struct sock *sk)
6686{
6687 struct l2cap_pinfo *pi;
6688
6689 BT_DBG("sk %p", sk);
6690
6691 pi = l2cap_pi(sk);
6692
6693 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6694 int err = 0;
6695 /* Send reconfigure request */
6696 if (pi->mode == L2CAP_MODE_ERTM) {
6697 pi->reconf_state = L2CAP_RECONF_INT;
6698 if (enable_reconfig)
6699 err = l2cap_amp_move_reconf(sk);
6700
6701 if (err || !enable_reconfig) {
6702 pi->reconf_state = L2CAP_RECONF_NONE;
6703 l2cap_ertm_tx(sk, NULL, NULL,
6704 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6705 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6706 }
6707 } else
6708 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6709 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6710 if (pi->mode == L2CAP_MODE_ERTM)
6711 pi->rx_state =
6712 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6713 else
6714 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6715 }
6716}
6717
6718static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6719{
6720 /* Make sure reqseq is for a packet that has been sent but not acked */
6721 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6722 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6723}
6724
6725static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6726 struct sk_buff *skb)
6727{
6728 struct l2cap_pinfo *pi;
6729 int err = 0;
6730
6731 BT_DBG("sk %p, control %p, skb %p, state %d",
6732 sk, control, skb, l2cap_pi(sk)->rx_state);
6733
6734 pi = l2cap_pi(sk);
6735
6736 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6737 L2CAP_ERTM_TXSEQ_EXPECTED) {
6738 l2cap_ertm_pass_to_tx(sk, control);
6739
6740 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6741 __next_seq(pi->buffer_seq, pi));
6742
6743 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6744
6745 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6746 } else {
6747 if (pi->sdu) {
6748 kfree_skb(pi->sdu);
6749 pi->sdu = NULL;
6750 }
6751 pi->sdu_last_frag = NULL;
6752 pi->sdu_len = 0;
6753
6754 if (skb) {
6755 BT_DBG("Freeing %p", skb);
6756 kfree_skb(skb);
6757 }
6758 }
6759
6760 pi->last_acked_seq = control->txseq;
6761 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6762
6763 return err;
6764}
6765
6766static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6767 struct sk_buff *skb, u8 event)
6768{
6769 struct l2cap_pinfo *pi;
6770 int err = 0;
6771
6772 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6773 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6774
6775 pi = l2cap_pi(sk);
6776
6777 if (__valid_reqseq(pi, control->reqseq)) {
6778 switch (pi->rx_state) {
6779 case L2CAP_ERTM_RX_STATE_RECV:
6780 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6781 break;
6782 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6783 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6784 event);
6785 break;
6786 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6787 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6788 event);
6789 break;
6790 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6791 if (control->final) {
6792 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6793 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6794
6795 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6796 l2cap_ertm_process_reqseq(sk, control->reqseq);
6797
6798 if (!skb_queue_empty(TX_QUEUE(sk)))
6799 sk->sk_send_head =
6800 skb_peek(TX_QUEUE(sk));
6801 else
6802 sk->sk_send_head = NULL;
6803
6804 /* Rewind next_tx_seq to the point expected
6805 * by the receiver.
6806 */
6807 pi->next_tx_seq = control->reqseq;
6808 pi->unacked_frames = 0;
6809
6810 if (pi->ampcon)
6811 pi->conn->mtu =
6812 pi->ampcon->hdev->acl_mtu;
6813 else
6814 pi->conn->mtu =
6815 pi->conn->hcon->hdev->acl_mtu;
6816
6817 err = l2cap_setup_resegment(sk);
6818
6819 if (err)
6820 break;
6821
6822 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6823 event);
6824 }
6825 break;
6826 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6827 if (control->poll) {
6828 pi->amp_move_reqseq = control->reqseq;
6829 pi->amp_move_event = event;
6830 err = l2cap_answer_move_poll(sk);
6831 }
6832 break;
6833 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6834 if (control->poll) {
6835 pi->amp_move_reqseq = control->reqseq;
6836 pi->amp_move_event = event;
6837
6838 BT_DBG("amp_move_role 0x%2.2x, "
6839 "reconf_state 0x%2.2x",
6840 pi->amp_move_role, pi->reconf_state);
6841
6842 if (pi->reconf_state == L2CAP_RECONF_ACC)
6843 err = l2cap_amp_move_reconf(sk);
6844 else
6845 err = l2cap_answer_move_poll(sk);
6846 }
6847 break;
6848 default:
6849 /* shut it down */
6850 break;
6851 }
6852 } else {
6853 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6854 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6855 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6856 }
6857
6858 return err;
6859}
6860
6861void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6862{
6863 lock_sock(sk);
6864
6865 l2cap_pi(sk)->fixed_channel = 1;
6866
6867 l2cap_pi(sk)->imtu = opt->imtu;
6868 l2cap_pi(sk)->omtu = opt->omtu;
6869 l2cap_pi(sk)->remote_mps = opt->omtu;
6870 l2cap_pi(sk)->mps = opt->omtu;
6871 l2cap_pi(sk)->flush_to = opt->flush_to;
6872 l2cap_pi(sk)->mode = opt->mode;
6873 l2cap_pi(sk)->fcs = opt->fcs;
6874 l2cap_pi(sk)->max_tx = opt->max_tx;
6875 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6876 l2cap_pi(sk)->tx_win = opt->txwin_size;
6877 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6878 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6879 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6880
6881 if (opt->mode == L2CAP_MODE_ERTM ||
6882 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6883 l2cap_ertm_init(sk);
6884
6885 release_sock(sk);
6886
6887 return;
6888}
6889
6890static const u8 l2cap_ertm_rx_func_to_event[4] = {
6891 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6892 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6893};
6894
6895int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6896{
6897 struct l2cap_pinfo *pi;
6898 struct bt_l2cap_control *control;
6899 u16 len;
6900 u8 event;
6901 pi = l2cap_pi(sk);
6902
6903 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6904
6905 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006906 goto drop;
6907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006908 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006909 case L2CAP_MODE_BASIC:
6910 /* If socket recv buffers overflows we drop data here
6911 * which is *bad* because L2CAP has to be reliable.
6912 * But we don't have any other choice. L2CAP doesn't
6913 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006914
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006915 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006916 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006918 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006919 goto done;
6920 break;
6921
6922 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006923 case L2CAP_MODE_STREAMING:
6924 control = &bt_cb(skb)->control;
6925 if (pi->extended_control) {
6926 __get_extended_control(get_unaligned_le32(skb->data),
6927 control);
6928 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006929 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006930 __get_enhanced_control(get_unaligned_le16(skb->data),
6931 control);
6932 skb_pull(skb, 2);
6933 }
6934
6935 len = skb->len;
6936
6937 if (l2cap_check_fcs(pi, skb))
6938 goto drop;
6939
6940 if ((control->frame_type == 'i') &&
6941 (control->sar == L2CAP_SAR_START))
6942 len -= 2;
6943
6944 if (pi->fcs == L2CAP_FCS_CRC16)
6945 len -= 2;
6946
6947 /*
6948 * We can just drop the corrupted I-frame here.
6949 * Receiver will miss it and start proper recovery
6950 * procedures and ask for retransmission.
6951 */
6952 if (len > pi->mps) {
6953 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6954 goto drop;
6955 }
6956
6957 if (control->frame_type == 'i') {
6958
6959 int err;
6960
6961 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6962 control->sar, control->reqseq, control->final,
6963 control->txseq);
6964
6965 /* Validate F-bit - F=0 always valid, F=1 only
6966 * valid in TX WAIT_F
6967 */
6968 if (control->final && (pi->tx_state !=
6969 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006970 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006971
6972 if (pi->mode != L2CAP_MODE_STREAMING) {
6973 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
6974 err = l2cap_ertm_rx(sk, control, skb, event);
6975 } else
6976 err = l2cap_strm_rx(sk, control, skb);
6977 if (err)
6978 l2cap_send_disconn_req(pi->conn, sk,
6979 ECONNRESET);
6980 } else {
6981 /* Only I-frames are expected in streaming mode */
6982 if (pi->mode == L2CAP_MODE_STREAMING)
6983 goto drop;
6984
6985 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6986 control->reqseq, control->final, control->poll,
6987 control->super);
6988
6989 if (len != 0) {
6990 l2cap_send_disconn_req(pi->conn, sk,
6991 ECONNRESET);
6992 goto drop;
6993 }
6994
6995 /* Validate F and P bits */
6996 if (control->final &&
6997 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
6998 || control->poll))
6999 goto drop;
7000
7001 event = l2cap_ertm_rx_func_to_event[control->super];
7002 if (l2cap_ertm_rx(sk, control, skb, event))
7003 l2cap_send_disconn_req(pi->conn, sk,
7004 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007005 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007006
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007007 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007008
7009 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007010 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007011 break;
7012 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007013
7014drop:
7015 kfree_skb(skb);
7016
7017done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007018 return 0;
7019}
7020
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007021void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7022{
7023 lock_sock(sk);
7024 l2cap_data_channel(sk, skb);
7025 release_sock(sk);
7026}
7027
Al Viro8e036fc2007-07-29 00:16:36 -07007028static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007029{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007030 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007032 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7033 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034 goto drop;
7035
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007036 bh_lock_sock(sk);
7037
Linus Torvalds1da177e2005-04-16 15:20:36 -07007038 BT_DBG("sk %p, len %d", sk, skb->len);
7039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007040 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007041 goto drop;
7042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007043 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007044 goto drop;
7045
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007046 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007047 goto done;
7048
7049drop:
7050 kfree_skb(skb);
7051
7052done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007053 if (sk)
7054 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007055 return 0;
7056}
7057
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007058static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7059{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007060 struct sock *sk;
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007062 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
7063 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007064 goto drop;
7065
7066 bh_lock_sock(sk);
7067
7068 BT_DBG("sk %p, len %d", sk, skb->len);
7069
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007070 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007071 goto drop;
7072
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007073 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007074 goto drop;
7075
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007076 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007077 goto done;
7078
7079drop:
7080 kfree_skb(skb);
7081
7082done:
7083 if (sk)
7084 bh_unlock_sock(sk);
7085 return 0;
7086}
7087
Linus Torvalds1da177e2005-04-16 15:20:36 -07007088static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7089{
7090 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007091 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007092 u16 cid, len;
7093 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007094
7095 skb_pull(skb, L2CAP_HDR_SIZE);
7096 cid = __le16_to_cpu(lh->cid);
7097 len = __le16_to_cpu(lh->len);
7098
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007099 if (len != skb->len) {
7100 kfree_skb(skb);
7101 return;
7102 }
7103
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7105
7106 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007107 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007108 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109 l2cap_sig_channel(conn, skb);
7110 break;
7111
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007112 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007113 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007114 skb_pull(skb, 2);
7115 l2cap_conless_channel(conn, psm, skb);
7116 break;
7117
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007118 case L2CAP_CID_LE_DATA:
7119 l2cap_att_channel(conn, cid, skb);
7120 break;
7121
Anderson Brigliaea370122011-06-07 18:46:31 -03007122 case L2CAP_CID_SMP:
7123 if (smp_sig_channel(conn, skb))
7124 l2cap_conn_del(conn->hcon, EACCES);
7125 break;
7126
Linus Torvalds1da177e2005-04-16 15:20:36 -07007127 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007128 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7129 if (sk) {
7130 if (sock_owned_by_user(sk)) {
7131 BT_DBG("backlog sk %p", sk);
7132 if (sk_add_backlog(sk, skb))
7133 kfree_skb(skb);
7134 } else
7135 l2cap_data_channel(sk, skb);
7136
7137 bh_unlock_sock(sk);
7138 } else if (cid == L2CAP_CID_A2MP) {
7139 BT_DBG("A2MP");
7140 amp_conn_ind(conn, skb);
7141 } else {
7142 BT_DBG("unknown cid 0x%4.4x", cid);
7143 kfree_skb(skb);
7144 }
7145
Linus Torvalds1da177e2005-04-16 15:20:36 -07007146 break;
7147 }
7148}
7149
7150/* ---- L2CAP interface with lower layer (HCI) ---- */
7151
7152static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7153{
7154 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007155 register struct sock *sk;
7156 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157
7158 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007159 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007160
7161 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7162
7163 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007164 read_lock(&l2cap_sk_list.lock);
7165 sk_for_each(sk, node, &l2cap_sk_list.head) {
7166 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007167 continue;
7168
7169 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007170 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007171 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007172 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007173 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007174 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7175 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007176 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007177 lm2 |= HCI_LM_MASTER;
7178 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007179 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007180 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007181
7182 return exact ? lm1 : lm2;
7183}
7184
7185static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7186{
Marcel Holtmann01394182006-07-03 10:02:46 +02007187 struct l2cap_conn *conn;
7188
Linus Torvalds1da177e2005-04-16 15:20:36 -07007189 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7190
Ville Tervoacd7d372011-02-10 22:38:49 -03007191 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007192 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007193
7194 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195 conn = l2cap_conn_add(hcon, status);
7196 if (conn)
7197 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007198 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007199 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007200
7201 return 0;
7202}
7203
Marcel Holtmann2950f212009-02-12 14:02:50 +01007204static int l2cap_disconn_ind(struct hci_conn *hcon)
7205{
7206 struct l2cap_conn *conn = hcon->l2cap_data;
7207
7208 BT_DBG("hcon %p", hcon);
7209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007210 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007211 return 0x13;
7212
7213 return conn->disc_reason;
7214}
7215
7216static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217{
7218 BT_DBG("hcon %p reason %d", hcon, reason);
7219
Ville Tervoacd7d372011-02-10 22:38:49 -03007220 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007221 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007223 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007224
Linus Torvalds1da177e2005-04-16 15:20:36 -07007225 return 0;
7226}
7227
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007228static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007229{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007230 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007231 return;
7232
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007233 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007234 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7235 l2cap_sock_clear_timer(sk);
7236 l2cap_sock_set_timer(sk, HZ * 5);
7237 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7238 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007239 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007240 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7241 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007242 }
7243}
7244
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007245static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007247 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007248 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007249 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007250
Marcel Holtmann01394182006-07-03 10:02:46 +02007251 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007252 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007254 l = &conn->chan_list;
7255
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 BT_DBG("conn %p", conn);
7257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007258 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007260 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007261 bh_lock_sock(sk);
7262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007263 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007265 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007266 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007267 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007268
7269 del_timer(&conn->security_timer);
7270 l2cap_chan_ready(sk);
7271 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007272
7273 bh_unlock_sock(sk);
7274 continue;
7275 }
7276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007277 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007278 bh_unlock_sock(sk);
7279 continue;
7280 }
7281
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007282 if (!status && (sk->sk_state == BT_CONNECTED ||
7283 sk->sk_state == BT_CONFIG)) {
7284 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007285 bh_unlock_sock(sk);
7286 continue;
7287 }
7288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007289 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007290 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007291 l2cap_pi(sk)->conf_state |=
7292 L2CAP_CONF_CONNECT_PEND;
7293 if (l2cap_pi(sk)->amp_pref ==
7294 BT_AMP_POLICY_PREFER_AMP) {
7295 amp_create_physical(l2cap_pi(sk)->conn,
7296 sk);
7297 } else
7298 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007299 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007300 l2cap_sock_clear_timer(sk);
7301 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007302 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007303 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007304 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007305 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007306
7307 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007308 if (l2cap_pi(sk)->amp_id) {
7309 amp_accept_physical(conn,
7310 l2cap_pi(sk)->amp_id, sk);
7311 bh_unlock_sock(sk);
7312 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007313 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007314
7315 sk->sk_state = BT_CONFIG;
7316 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007317 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007318 sk->sk_state = BT_DISCONN;
7319 l2cap_sock_set_timer(sk, HZ / 10);
7320 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007321 }
7322
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007323 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7324 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7325 rsp.result = cpu_to_le16(result);
7326 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7327 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7328 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007329 }
7330
Linus Torvalds1da177e2005-04-16 15:20:36 -07007331 bh_unlock_sock(sk);
7332 }
7333
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007334 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007335
Linus Torvalds1da177e2005-04-16 15:20:36 -07007336 return 0;
7337}
7338
7339static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7340{
7341 struct l2cap_conn *conn = hcon->l2cap_data;
7342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007343 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7344 goto drop;
7345
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007346 if (!conn)
7347 conn = l2cap_conn_add(hcon, 0);
7348
7349 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 goto drop;
7351
7352 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7353
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007354 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007355 struct l2cap_hdr *hdr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007356 struct sock *sk;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007357 u16 cid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007358 int len;
7359
7360 if (conn->rx_len) {
7361 BT_ERR("Unexpected start frame (len %d)", skb->len);
7362 kfree_skb(conn->rx_skb);
7363 conn->rx_skb = NULL;
7364 conn->rx_len = 0;
7365 l2cap_conn_unreliable(conn, ECOMM);
7366 }
7367
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007368 /* Start fragment always begin with Basic L2CAP header */
7369 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007370 BT_ERR("Frame is too short (len %d)", skb->len);
7371 l2cap_conn_unreliable(conn, ECOMM);
7372 goto drop;
7373 }
7374
7375 hdr = (struct l2cap_hdr *) skb->data;
7376 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007377 cid = __le16_to_cpu(hdr->cid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007378
7379 if (len == skb->len) {
7380 /* Complete frame received */
7381 l2cap_recv_frame(conn, skb);
7382 return 0;
7383 }
7384
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007385 if (flags & ACL_CONT) {
7386 BT_ERR("Complete frame is incomplete "
7387 "(len %d, expected len %d)",
7388 skb->len, len);
7389 l2cap_conn_unreliable(conn, ECOMM);
7390 goto drop;
7391 }
7392
Linus Torvalds1da177e2005-04-16 15:20:36 -07007393 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7394
7395 if (skb->len > len) {
7396 BT_ERR("Frame is too long (len %d, expected len %d)",
7397 skb->len, len);
7398 l2cap_conn_unreliable(conn, ECOMM);
7399 goto drop;
7400 }
7401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007402 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007403
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007404 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
7405 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
7406 len, l2cap_pi(sk)->imtu);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007407 bh_unlock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007408 l2cap_conn_unreliable(conn, ECOMM);
7409 goto drop;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007410 }
7411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007412 if (sk)
7413 bh_unlock_sock(sk);
7414
Linus Torvalds1da177e2005-04-16 15:20:36 -07007415 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007416 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7417 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007418 goto drop;
7419
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007420 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007421 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007422 conn->rx_len = len - skb->len;
7423 } else {
7424 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7425
7426 if (!conn->rx_len) {
7427 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7428 l2cap_conn_unreliable(conn, ECOMM);
7429 goto drop;
7430 }
7431
7432 if (skb->len > conn->rx_len) {
7433 BT_ERR("Fragment is too long (len %d, expected %d)",
7434 skb->len, conn->rx_len);
7435 kfree_skb(conn->rx_skb);
7436 conn->rx_skb = NULL;
7437 conn->rx_len = 0;
7438 l2cap_conn_unreliable(conn, ECOMM);
7439 goto drop;
7440 }
7441
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007442 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007443 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007444 conn->rx_len -= skb->len;
7445
7446 if (!conn->rx_len) {
7447 /* Complete frame received */
7448 l2cap_recv_frame(conn, conn->rx_skb);
7449 conn->rx_skb = NULL;
7450 }
7451 }
7452
7453drop:
7454 kfree_skb(skb);
7455 return 0;
7456}
7457
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007458static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007460 struct sock *sk;
7461 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007463 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007464
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007465 sk_for_each(sk, node, &l2cap_sk_list.head) {
7466 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007467
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007468 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007469 batostr(&bt_sk(sk)->src),
7470 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007471 sk->sk_state, __le16_to_cpu(pi->psm),
7472 pi->scid, pi->dcid,
7473 pi->imtu, pi->omtu, pi->sec_level,
7474 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007475 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007477 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007478
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007479 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007480}
7481
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007482static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7483{
7484 return single_open(file, l2cap_debugfs_show, inode->i_private);
7485}
7486
7487static const struct file_operations l2cap_debugfs_fops = {
7488 .open = l2cap_debugfs_open,
7489 .read = seq_read,
7490 .llseek = seq_lseek,
7491 .release = single_release,
7492};
7493
7494static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007495
Linus Torvalds1da177e2005-04-16 15:20:36 -07007496static struct hci_proto l2cap_hci_proto = {
7497 .name = "L2CAP",
7498 .id = HCI_PROTO_L2CAP,
7499 .connect_ind = l2cap_connect_ind,
7500 .connect_cfm = l2cap_connect_cfm,
7501 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007502 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007503 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007504 .recv_acldata = l2cap_recv_acldata,
7505 .create_cfm = l2cap_create_cfm,
7506 .modify_cfm = l2cap_modify_cfm,
7507 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007508};
7509
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007510int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007511{
7512 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007513
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007514 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007515 if (err < 0)
7516 return err;
7517
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007518 _l2cap_wq = create_singlethread_workqueue("l2cap");
7519 if (!_l2cap_wq) {
7520 err = -ENOMEM;
7521 goto error;
7522 }
7523
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 err = hci_register_proto(&l2cap_hci_proto);
7525 if (err < 0) {
7526 BT_ERR("L2CAP protocol registration failed");
7527 bt_sock_unregister(BTPROTO_L2CAP);
7528 goto error;
7529 }
7530
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007531 if (bt_debugfs) {
7532 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7533 bt_debugfs, NULL, &l2cap_debugfs_fops);
7534 if (!l2cap_debugfs)
7535 BT_ERR("Failed to create L2CAP debug file");
7536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007538 if (amp_init() < 0) {
7539 BT_ERR("AMP Manager initialization failed");
7540 goto error;
7541 }
7542
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543 return 0;
7544
7545error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007546 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007547 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007548 return err;
7549}
7550
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007551void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007552{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007553 amp_exit();
7554
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007555 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007556
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007557 flush_workqueue(_l2cap_wq);
7558 destroy_workqueue(_l2cap_wq);
7559
Linus Torvalds1da177e2005-04-16 15:20:36 -07007560 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7561 BT_ERR("L2CAP protocol unregistration failed");
7562
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007563 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007564}
7565
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007566module_param(disable_ertm, bool, 0644);
7567MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007568
7569module_param(enable_reconfig, bool, 0644);
7570MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");