blob: ca18ed55f3c68dbf254733496417c9e97fc91fa5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
552 hci_chan_put(l2cap_pi(sk)->ampchan);
553 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
554 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
555 l2cap_pi(sk));
556 }
557 l2cap_pi(sk)->ampchan = NULL;
558 l2cap_pi(sk)->amp_id = 0;
559 }
560
561 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200562 sock_set_flag(sk, SOCK_ZAPPED);
563
564 if (err)
565 sk->sk_err = err;
566
567 if (parent) {
568 bt_accept_unlink(sk);
569 parent->sk_data_ready(parent, 0);
570 } else
571 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
576 if (l2cap_pi(sk)->sdu)
577 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
583 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300584 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (sk->sk_type == SOCK_RAW) {
590 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530591 case BT_SECURITY_HIGH:
592 return HCI_AT_DEDICATED_BONDING_MITM;
593 case BT_SECURITY_MEDIUM:
594 return HCI_AT_DEDICATED_BONDING;
595 default:
596 return HCI_AT_NO_BONDING;
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
599 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
600 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530603 return HCI_AT_NO_BONDING_MITM;
604 else
605 return HCI_AT_NO_BONDING;
606 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530608 case BT_SECURITY_HIGH:
609 return HCI_AT_GENERAL_BONDING_MITM;
610 case BT_SECURITY_MEDIUM:
611 return HCI_AT_GENERAL_BONDING;
612 default:
613 return HCI_AT_NO_BONDING;
614 }
615 }
616}
617
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200618/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100622 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
627 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200631{
632 u8 id;
633
634 /* Get next available identificator.
635 * 1 - 128 are used by kernel.
636 * 129 - 199 are reserved.
637 * 200 - 254 are used by utilities like l2ping, etc.
638 */
639
640 spin_lock_bh(&conn->lock);
641
642 if (++conn->tx_ident > 128)
643 conn->tx_ident = 1;
644
645 id = conn->tx_ident;
646
647 spin_unlock_bh(&conn->lock);
648
649 return id;
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void apply_fcs(struct sk_buff *skb)
653{
654 size_t len;
655 u16 partial_crc;
656 struct sk_buff *iter;
657 struct sk_buff *final_frag = skb;
658
659 if (skb_has_frag_list(skb))
660 len = skb_headlen(skb);
661 else
662 len = skb->len - L2CAP_FCS_SIZE;
663
664 partial_crc = crc16(0, (u8 *) skb->data, len);
665
666 skb_walk_frags(skb, iter) {
667 len = iter->len;
668 if (!iter->next)
669 len -= L2CAP_FCS_SIZE;
670
671 partial_crc = crc16(partial_crc, iter->data, len);
672 final_frag = iter;
673 }
674
675 put_unaligned_le16(partial_crc,
676 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
677}
678
679void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200680{
681 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200682 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200683
684 BT_DBG("code 0x%2.2x", code);
685
686 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300687 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200688
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200689 if (lmp_no_flush_capable(conn->hcon->hdev))
690 flags = ACL_START_NO_FLUSH;
691 else
692 flags = ACL_START;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697}
698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300702}
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300705{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 struct l2cap_conn_req req;
707 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
708 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
713 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300717{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 struct l2cap_create_chan_req req;
719 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
720 req.psm = l2cap_pi(sk)->psm;
721 req.amp_id = amp_id;
722
723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
725
726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100735 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
736 return;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
739 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
742 amp_create_physical(l2cap_pi(sk)->conn, sk);
743 else
744 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200745 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200746 } else {
747 struct l2cap_info_req req;
748 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
749
750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
751 conn->info_ident = l2cap_get_ident(conn);
752
753 mod_timer(&conn->info_timer, jiffies +
754 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
755
756 l2cap_send_cmd(conn, conn->info_ident,
757 L2CAP_INFO_REQ, sizeof(req), &req);
758 }
759}
760
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300761static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
762{
763 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300764 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300765 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
766
767 switch (mode) {
768 case L2CAP_MODE_ERTM:
769 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
770 case L2CAP_MODE_STREAMING:
771 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
772 default:
773 return 0x00;
774 }
775}
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300778{
779 struct l2cap_disconn_req req;
780
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300781 if (!conn)
782 return;
783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
787 skb_queue_purge(SREJ_QUEUE(sk));
788
789 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
790 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300792 }
793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
795 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300796 l2cap_send_cmd(conn, l2cap_get_ident(conn),
797 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300800 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200804static void l2cap_conn_start(struct l2cap_conn *conn)
805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 struct l2cap_chan_list *l = &conn->chan_list;
807 struct sock_del_list del, *tmp1, *tmp2;
808 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200809
810 BT_DBG("conn %p", conn);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817 bh_lock_sock(sk);
818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 if (sk->sk_type != SOCK_SEQPACKET &&
820 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200821 bh_unlock_sock(sk);
822 continue;
823 }
824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 if (sk->sk_state == BT_CONNECT) {
826 if (!l2cap_check_security(sk) ||
827 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300828 bh_unlock_sock(sk);
829 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200830 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
833 conn->feat_mask)
834 && l2cap_pi(sk)->conf_state &
835 L2CAP_CONF_STATE2_DEVICE) {
836 tmp1 = kzalloc(sizeof(struct sock_del_list),
837 GFP_ATOMIC);
838 tmp1->sk = sk;
839 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300840 bh_unlock_sock(sk);
841 continue;
842 }
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
847 amp_create_physical(l2cap_pi(sk)->conn, sk);
848 else
849 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200852 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300853 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
855 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100858 if (bt_sk(sk)->defer_setup) {
859 struct sock *parent = bt_sk(sk)->parent;
860 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
861 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700862 if (parent)
863 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100864
865 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
868 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
869 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200870 } else {
871 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
872 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
873 }
874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
876 l2cap_pi(sk)->amp_id) {
877 amp_accept_physical(conn,
878 l2cap_pi(sk)->amp_id, sk);
879 bh_unlock_sock(sk);
880 continue;
881 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
884 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
885
886 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300887 rsp.result != L2CAP_CR_SUCCESS) {
888 bh_unlock_sock(sk);
889 continue;
890 }
891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300893 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 l2cap_build_conf_req(sk, buf), buf);
895 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200896 }
897
898 bh_unlock_sock(sk);
899 }
900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 read_unlock(&l->lock);
902
903 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
904 bh_lock_sock(tmp1->sk);
905 __l2cap_sock_close(tmp1->sk, ECONNRESET);
906 bh_unlock_sock(tmp1->sk);
907 list_del(&tmp1->list);
908 kfree(tmp1);
909 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200910}
911
Ville Tervob62f3282011-02-10 22:38:50 -0300912/* Find socket with cid and source bdaddr.
913 * Returns closest match, locked.
914 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300916{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 struct sock *sk = NULL, *sk1 = NULL;
918 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 sk_for_each(sk, node, &l2cap_sk_list.head) {
923 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300924 continue;
925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300927 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 if (!bacmp(&bt_sk(sk)->src, src))
929 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300930
931 /* Closest match */
932 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300934 }
935 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300940}
941
942static void l2cap_le_conn_ready(struct l2cap_conn *conn)
943{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 struct l2cap_chan_list *list = &conn->chan_list;
945 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300946
947 BT_DBG("");
948
949 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300951 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300953 return;
954
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300955 bh_lock_sock(parent);
956
Ville Tervob62f3282011-02-10 22:38:50 -0300957 /* Check for backlog size */
958 if (sk_acceptq_is_full(parent)) {
959 BT_DBG("backlog full %d", parent->sk_ack_backlog);
960 goto clean;
961 }
962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
964 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300965 goto clean;
966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300968
969 hci_conn_hold(conn->hcon);
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -0300972 bacpy(&bt_sk(sk)->src, conn->src);
973 bacpy(&bt_sk(sk)->dst, conn->dst);
974
Gustavo F. Padovand1010242011-03-25 00:39:48 -0300975 bt_accept_enqueue(parent, sk);
976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -0300982 parent->sk_data_ready(parent, 0);
983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300985
986clean:
987 bh_unlock_sock(parent);
988}
989
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200990static void l2cap_conn_ready(struct l2cap_conn *conn)
991{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 struct l2cap_chan_list *l = &conn->chan_list;
993 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200994
995 BT_DBG("conn %p", conn);
996
Ville Tervob62f3282011-02-10 22:38:50 -0300997 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
998 l2cap_le_conn_ready(conn);
999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001001
Brian Gixa68668b2011-08-11 15:49:36 -07001002 if (l->head) {
1003 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1004 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001005
Brian Gixa68668b2011-08-11 15:49:36 -07001006 if (conn->hcon->type == LE_LINK) {
1007 if (smp_conn_security(conn,
1008 l2cap_pi(sk)->sec_level))
1009 l2cap_chan_ready(sk);
Ville Tervoacd7d372011-02-10 22:38:49 -03001010
Brian Gixa68668b2011-08-11 15:49:36 -07001011 } else if (sk->sk_type != SOCK_SEQPACKET &&
1012 sk->sk_type != SOCK_STREAM) {
1013 l2cap_sock_clear_timer(sk);
1014 sk->sk_state = BT_CONNECTED;
1015 sk->sk_state_change(sk);
1016 } else if (sk->sk_state == BT_CONNECT)
1017 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001018
Brian Gixa68668b2011-08-11 15:49:36 -07001019 bh_unlock_sock(sk);
1020 }
1021 } else if (conn->hcon->type == LE_LINK) {
1022 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001023 }
1024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001025 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001026}
1027
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001028/* Notify sockets that we cannot guaranty reliability anymore */
1029static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1030{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 struct l2cap_chan_list *l = &conn->chan_list;
1032 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001033
1034 BT_DBG("conn %p", conn);
1035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001036 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1039 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001040 sk->sk_err = err;
1041 }
1042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001044}
1045
1046static void l2cap_info_timeout(unsigned long arg)
1047{
1048 struct l2cap_conn *conn = (void *) arg;
1049
Marcel Holtmann984947d2009-02-06 23:35:19 +01001050 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001051 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001052
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001053 l2cap_conn_start(conn);
1054}
1055
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001056static void security_timeout(unsigned long arg)
1057{
1058 struct l2cap_conn *conn = (void *) arg;
1059
1060 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1061}
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1064{
Marcel Holtmann01394182006-07-03 10:02:46 +02001065 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Marcel Holtmann01394182006-07-03 10:02:46 +02001067 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 return conn;
1069
Marcel Holtmann01394182006-07-03 10:02:46 +02001070 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1071 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 hcon->l2cap_data = conn;
1075 conn->hcon = hcon;
1076
Marcel Holtmann01394182006-07-03 10:02:46 +02001077 BT_DBG("hcon %p conn %p", hcon, conn);
1078
Ville Tervoacd7d372011-02-10 22:38:49 -03001079 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1080 conn->mtu = hcon->hdev->le_mtu;
1081 else
1082 conn->mtu = hcon->hdev->acl_mtu;
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 conn->src = &hcon->hdev->bdaddr;
1085 conn->dst = &hcon->dst;
1086
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001087 conn->feat_mask = 0;
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001092 if (hcon->type == LE_LINK)
1093 setup_timer(&conn->security_timer, security_timeout,
1094 (unsigned long) conn);
1095 else
Ville Tervob62f3282011-02-10 22:38:50 -03001096 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001097 (unsigned long) conn);
1098
Marcel Holtmann2950f212009-02-12 14:02:50 +01001099 conn->disc_reason = 0x13;
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 return conn;
1102}
1103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 struct l2cap_conn *conn = hcon->l2cap_data;
1107 struct sock *sk;
1108 struct sock *next;
1109
1110 if (!conn)
1111 return;
1112
1113 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1114
1115 if ((conn->hcon == hcon) && (conn->rx_skb))
1116 kfree_skb(conn->rx_skb);
1117
1118 BT_DBG("conn->hcon %p", conn->hcon);
1119
1120 /* Kill channels */
1121 for (sk = conn->chan_list.head; sk; ) {
1122 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1123 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1124 next = l2cap_pi(sk)->next_c;
1125 bh_lock_sock(sk);
1126 l2cap_chan_del(sk, err);
1127 bh_unlock_sock(sk);
1128 l2cap_sock_kill(sk);
1129 sk = next;
1130 } else
1131 sk = l2cap_pi(sk)->next_c;
1132 }
1133
1134 if (conn->hcon == hcon) {
1135 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1136 del_timer_sync(&conn->info_timer);
1137
1138 hcon->l2cap_data = NULL;
1139
1140 kfree(conn);
1141 }
1142}
1143
1144static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1145{
1146 struct l2cap_chan_list *l = &conn->chan_list;
1147 write_lock_bh(&l->lock);
1148 __l2cap_chan_add(conn, sk);
1149 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150}
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154/* Find socket with psm and source bdaddr.
1155 * Returns closest match.
1156 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 struct sock *sk = NULL, *sk1 = NULL;
1160 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 sk_for_each(sk, node, &l2cap_sk_list.head) {
1165 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 continue;
1167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 if (!bacmp(&bt_sk(sk)->src, src))
1171 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 /* Closest match */
1174 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 }
1177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182}
1183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
1186 bdaddr_t *src = &bt_sk(sk)->src;
1187 bdaddr_t *dst = &bt_sk(sk)->dst;
1188 struct l2cap_conn *conn;
1189 struct hci_conn *hcon;
1190 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001191 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001192 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001194 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001197 hdev = hci_get_route(dst, src);
1198 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 return -EHOSTUNREACH;
1200
1201 hci_dev_lock_bh(hdev);
1202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 if (l2cap_pi(sk)->fixed_channel) {
1206 /* Fixed channels piggyback on existing ACL connections */
1207 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1208 if (!hcon || !hcon->l2cap_data) {
1209 err = -ENOTCONN;
1210 goto done;
1211 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 conn = hcon->l2cap_data;
1214 } else {
1215 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
1216 hcon = hci_connect(hdev, LE_LINK, 0, dst,
1217 l2cap_pi(sk)->sec_level, auth_type);
1218 else
1219 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
1220 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 if (IS_ERR(hcon)) {
1223 err = PTR_ERR(hcon);
1224 goto done;
1225 }
1226
1227 conn = l2cap_conn_add(hcon, 0);
1228 if (!conn) {
1229 hci_conn_put(hcon);
1230 err = -ENOMEM;
1231 goto done;
1232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Update source addr of the socket */
1236 bacpy(src, conn->src);
1237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001239
Brian Gixa68668b2011-08-11 15:49:36 -07001240 if ((l2cap_pi(sk)->fixed_channel) ||
1241 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1242 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 sk->sk_state = BT_CONNECTED;
1244 sk->sk_state_change(sk);
1245 } else {
1246 sk->sk_state = BT_CONNECT;
1247 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1248 sk->sk_state_change(sk);
1249
1250 if (hcon->state == BT_CONNECTED) {
1251 if (sk->sk_type != SOCK_SEQPACKET &&
1252 sk->sk_type != SOCK_STREAM) {
1253 l2cap_sock_clear_timer(sk);
1254 if (l2cap_check_security(sk)) {
1255 sk->sk_state = BT_CONNECTED;
1256 sk->sk_state_change(sk);
1257 }
1258 } else
1259 l2cap_do_start(sk);
1260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 }
1262
Ville Tervo30e76272011-02-22 16:10:53 -03001263 err = 0;
1264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265done:
1266 hci_dev_unlock_bh(hdev);
1267 hci_dev_put(hdev);
1268 return err;
1269}
1270
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001271int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001272{
1273 DECLARE_WAITQUEUE(wait, current);
1274 int err = 0;
1275 int timeo = HZ/5;
1276
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001277 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1279 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1280 set_current_state(TASK_INTERRUPTIBLE);
1281
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001282 if (!timeo)
1283 timeo = HZ/5;
1284
1285 if (signal_pending(current)) {
1286 err = sock_intr_errno(timeo);
1287 break;
1288 }
1289
1290 release_sock(sk);
1291 timeo = schedule_timeout(timeo);
1292 lock_sock(sk);
1293
1294 err = sock_error(sk);
1295 if (err)
1296 break;
1297 }
1298 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001299 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001300 return err;
1301}
1302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001304{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 struct l2cap_pinfo *pi =
1306 container_of(work, struct l2cap_pinfo, tx_work);
1307 struct sock *sk = (struct sock *)pi;
1308 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 lock_sock(sk);
1311 l2cap_ertm_send(sk);
1312 release_sock(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001313}
1314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001316{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001317 struct sock *sk = skb->sk;
1318 int queued;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1321 if (queued < L2CAP_MIN_ERTM_QUEUED)
1322 queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001323}
1324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001326{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1332 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1333 BT_DBG("Sending on AMP connection %p %p",
1334 pi->ampcon, pi->ampchan);
1335 if (pi->ampchan)
1336 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1337 ACL_COMPLETE);
1338 else
1339 kfree_skb(skb);
1340 } else {
1341 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 bt_cb(skb)->force_active = pi->force_active;
1344 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1347 !l2cap_pi(sk)->flushable)
1348 flags = ACL_START_NO_FLUSH;
1349 else
1350 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001353 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001354}
1355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001357{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001358 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359 struct l2cap_pinfo *pi = l2cap_pi(sk);
1360 struct bt_l2cap_control *control;
1361 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001366 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1369 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1372 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1373 return 0;
1374
1375 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1376 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1377 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1378
1379 skb = sk->sk_send_head;
1380
1381 bt_cb(skb)->retries = 1;
1382 control = &bt_cb(skb)->control;
1383
1384 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1385 control->final = 1;
1386 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1387 }
1388 control->reqseq = pi->buffer_seq;
1389 pi->last_acked_seq = pi->buffer_seq;
1390 control->txseq = pi->next_tx_seq;
1391
1392 if (pi->extended_control) {
1393 put_unaligned_le32(__pack_extended_control(control),
1394 skb->data + L2CAP_HDR_SIZE);
1395 } else {
1396 put_unaligned_le16(__pack_enhanced_control(control),
1397 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001398 }
1399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 if (pi->fcs == L2CAP_FCS_CRC16)
1401 apply_fcs(skb);
1402
1403 /* Clone after data has been modified. Data is assumed to be
1404 read-only (for locking purposes) on cloned sk_buffs.
1405 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 tx_skb->sk = sk;
1409 tx_skb->destructor = l2cap_skb_destructor;
1410 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1419 pi->unacked_frames += 1;
1420 pi->frames_sent += 1;
1421 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1425 else
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1427 }
1428
1429 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1430 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1431 atomic_read(&pi->ertm_queued));
1432
1433 return sent;
1434}
1435
1436int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1437{
1438 struct sk_buff *skb;
1439 struct l2cap_pinfo *pi = l2cap_pi(sk);
1440 struct bt_l2cap_control *control;
1441 int sent = 0;
1442
1443 BT_DBG("sk %p, skbs %p", sk, skbs);
1444
1445 if (sk->sk_state != BT_CONNECTED)
1446 return -ENOTCONN;
1447
1448 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1449 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1450 return 0;
1451
1452 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1453
1454 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1455 while (!skb_queue_empty(TX_QUEUE(sk))) {
1456
1457 skb = skb_dequeue(TX_QUEUE(sk));
1458
1459 BT_DBG("skb %p", skb);
1460
1461 bt_cb(skb)->retries = 1;
1462 control = &bt_cb(skb)->control;
1463
1464 BT_DBG("control %p", control);
1465
1466 control->reqseq = 0;
1467 control->txseq = pi->next_tx_seq;
1468
1469 if (pi->extended_control) {
1470 put_unaligned_le32(__pack_extended_control(control),
1471 skb->data + L2CAP_HDR_SIZE);
1472 } else {
1473 put_unaligned_le16(__pack_enhanced_control(control),
1474 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001475 }
1476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 if (pi->fcs == L2CAP_FCS_CRC16)
1478 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1485 pi->frames_sent += 1;
1486 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001487 }
1488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 BT_DBG("Sent %d", sent);
1490
1491 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001492}
1493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001495{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 while (len > 0) {
1497 if (iv->iov_len) {
1498 int copy = min_t(unsigned int, len, iv->iov_len);
1499 memcpy(kdata, iv->iov_base, copy);
1500 len -= copy;
1501 kdata += copy;
1502 iv->iov_base += copy;
1503 iv->iov_len -= copy;
1504 }
1505 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001506 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001509}
1510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1512 int len, int count, struct sk_buff *skb,
1513 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001514{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001516 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001518 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1521 msg, (int)len, (int)count, skb);
1522
1523 if (!conn)
1524 return -ENOTCONN;
1525
1526 /* When resegmenting, data is copied from kernel space */
1527 if (reseg) {
1528 err = memcpy_fromkvec(skb_put(skb, count),
1529 (struct kvec *) msg->msg_iov, count);
1530 } else {
1531 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1532 count);
1533 }
1534
1535 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001536 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 sent += count;
1539 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 /* Continuation fragments (no L2CAP header) */
1543 frag = &skb_shinfo(skb)->frag_list;
1544 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 count = min_t(unsigned int, conn->mtu, len);
1547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 /* Add room for the FCS if it fits */
1549 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1550 len + L2CAP_FCS_SIZE <= conn->mtu)
1551 skblen = count + L2CAP_FCS_SIZE;
1552 else
1553 skblen = count;
1554
1555 /* Don't use bt_skb_send_alloc() while resegmenting, since
1556 * it is not ok to block.
1557 */
1558 if (reseg) {
1559 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1560 if (*frag)
1561 skb_set_owner_w(*frag, sk);
1562 } else {
1563 *frag = bt_skb_send_alloc(sk, skblen,
1564 msg->msg_flags & MSG_DONTWAIT, &err);
1565 }
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 return -EFAULT;
1569
1570 /* When resegmenting, data is copied from kernel space */
1571 if (reseg) {
1572 err = memcpy_fromkvec(skb_put(*frag, count),
1573 (struct kvec *) msg->msg_iov,
1574 count);
1575 } else {
1576 err = memcpy_fromiovec(skb_put(*frag, count),
1577 msg->msg_iov, count);
1578 }
1579
1580 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001581 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 sent += count;
1584 len -= count;
1585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 final = *frag;
1587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 frag = &(*frag)->next;
1589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1592 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1593 if (reseg) {
1594 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1595 GFP_ATOMIC);
1596 if (*frag)
1597 skb_set_owner_w(*frag, sk);
1598 } else {
1599 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1600 msg->msg_flags & MSG_DONTWAIT,
1601 &err);
1602 }
1603
1604 if (!*frag)
1605 return -EFAULT;
1606
1607 final = *frag;
1608 }
1609
1610 skb_put(final, L2CAP_FCS_SIZE);
1611 }
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001617{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001619 struct sk_buff *skb;
1620 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1621 struct l2cap_hdr *lh;
1622
1623 BT_DBG("sk %p len %d", sk, (int)len);
1624
1625 count = min_t(unsigned int, (conn->mtu - hlen), len);
1626 skb = bt_skb_send_alloc(sk, count + hlen,
1627 msg->msg_flags & MSG_DONTWAIT, &err);
1628 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001629 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001630
1631 /* Create L2CAP header */
1632 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001634 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001638 if (unlikely(err < 0)) {
1639 kfree_skb(skb);
1640 return ERR_PTR(err);
1641 }
1642 return skb;
1643}
1644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001646{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001648 struct sk_buff *skb;
1649 int err, count, hlen = L2CAP_HDR_SIZE;
1650 struct l2cap_hdr *lh;
1651
1652 BT_DBG("sk %p len %d", sk, (int)len);
1653
1654 count = min_t(unsigned int, (conn->mtu - hlen), len);
1655 skb = bt_skb_send_alloc(sk, count + hlen,
1656 msg->msg_flags & MSG_DONTWAIT, &err);
1657 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001658 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001659
1660 /* Create L2CAP header */
1661 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001663 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001666 if (unlikely(err < 0)) {
1667 kfree_skb(skb);
1668 return ERR_PTR(err);
1669 }
1670 return skb;
1671}
1672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1674 struct msghdr *msg, size_t len,
1675 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001676{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001677 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 int err, count, hlen;
1679 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001680 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 if (l2cap_pi(sk)->extended_control)
1684 hlen = L2CAP_EXTENDED_HDR_SIZE;
1685 else
1686 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001687
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001688 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 if (fcs == L2CAP_FCS_CRC16)
1692 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1695 sk, msg, (int)len, (int)sdulen, hlen);
1696
1697 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1698
1699 /* Allocate extra headroom for Qualcomm PAL. This is only
1700 * necessary in two places (here and when creating sframes)
1701 * because only unfragmented iframes and sframes are sent
1702 * using AMP controllers.
1703 */
1704 if (l2cap_pi(sk)->ampcon &&
1705 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1706 reserve = BT_SKB_RESERVE_80211;
1707
1708 /* Don't use bt_skb_send_alloc() while resegmenting, since
1709 * it is not ok to block.
1710 */
1711 if (reseg) {
1712 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1713 if (skb)
1714 skb_set_owner_w(skb, sk);
1715 } else {
1716 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1717 msg->msg_flags & MSG_DONTWAIT, &err);
1718 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001719 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001720 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 if (reserve)
1723 skb_reserve(skb, reserve);
1724
1725 bt_cb(skb)->control.fcs = fcs;
1726
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001727 /* Create L2CAP header */
1728 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1730 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 /* Control header is populated later */
1733 if (l2cap_pi(sk)->extended_control)
1734 put_unaligned_le32(0, skb_put(skb, 4));
1735 else
1736 put_unaligned_le16(0, skb_put(skb, 2));
1737
1738 if (sdulen)
1739 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1740
1741 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001742 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001743 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001744 kfree_skb(skb);
1745 return ERR_PTR(err);
1746 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001747
1748 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001749 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
1751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001753{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001754 struct l2cap_pinfo *pi;
1755 struct sk_buff *acked_skb;
1756 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001758 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1763 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1766 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1767
1768 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1769 ackseq = __next_seq(ackseq, pi)) {
1770
1771 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1772 if (acked_skb) {
1773 skb_unlink(acked_skb, TX_QUEUE(sk));
1774 kfree_skb(acked_skb);
1775 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001776 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001777 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 pi->expected_ack_seq = reqseq;
1780
1781 if (pi->unacked_frames == 0)
1782 l2cap_ertm_stop_retrans_timer(pi);
1783
1784 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001785}
1786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001788{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001789 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 int len;
1791 int reserve = 0;
1792 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 if (l2cap_pi(sk)->extended_control)
1795 len = L2CAP_EXTENDED_HDR_SIZE;
1796 else
1797 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1800 len += L2CAP_FCS_SIZE;
1801
1802 /* Allocate extra headroom for Qualcomm PAL */
1803 if (l2cap_pi(sk)->ampcon &&
1804 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1805 reserve = BT_SKB_RESERVE_80211;
1806
1807 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1808
1809 if (!skb)
1810 return ERR_PTR(-ENOMEM);
1811
1812 if (reserve)
1813 skb_reserve(skb, reserve);
1814
1815 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1816 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1817 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1818
1819 if (l2cap_pi(sk)->extended_control)
1820 put_unaligned_le32(control, skb_put(skb, 4));
1821 else
1822 put_unaligned_le16(control, skb_put(skb, 2));
1823
1824 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1825 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1826 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001827 }
1828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 return skb;
1830}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832static void l2cap_ertm_send_sframe(struct sock *sk,
1833 struct bt_l2cap_control *control)
1834{
1835 struct l2cap_pinfo *pi;
1836 struct sk_buff *skb;
1837 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 if (control->frame_type != 's')
1842 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1847 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1848 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1849 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1850 return;
1851 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001852
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1854 control->final = 1;
1855 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1856 }
1857
1858 if (control->super == L2CAP_SFRAME_RR)
1859 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1860 else if (control->super == L2CAP_SFRAME_RNR)
1861 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1862
1863 if (control->super != L2CAP_SFRAME_SREJ) {
1864 pi->last_acked_seq = control->reqseq;
1865 l2cap_ertm_stop_ack_timer(pi);
1866 }
1867
1868 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1869 (int) control->final, (int) control->poll,
1870 (int) control->super);
1871
1872 if (pi->extended_control)
1873 control_field = __pack_extended_control(control);
1874 else
1875 control_field = __pack_enhanced_control(control);
1876
1877 skb = l2cap_create_sframe_pdu(sk, control_field);
1878 if (!IS_ERR(skb))
1879 l2cap_do_send(sk, skb);
1880}
1881
1882static void l2cap_ertm_send_ack(struct sock *sk)
1883{
1884 struct l2cap_pinfo *pi = l2cap_pi(sk);
1885 struct bt_l2cap_control control;
1886 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1887 int threshold;
1888
1889 BT_DBG("sk %p", sk);
1890 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1891 (int)pi->buffer_seq);
1892
1893 memset(&control, 0, sizeof(control));
1894 control.frame_type = 's';
1895
1896 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1897 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1898 l2cap_ertm_stop_ack_timer(pi);
1899 control.super = L2CAP_SFRAME_RNR;
1900 control.reqseq = pi->buffer_seq;
1901 l2cap_ertm_send_sframe(sk, &control);
1902 } else {
1903 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1904 l2cap_ertm_send(sk);
1905 /* If any i-frames were sent, they included an ack */
1906 if (pi->buffer_seq == pi->last_acked_seq)
1907 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001908 }
1909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 /* Ack now if the tx window is 3/4ths full.
1911 * Calculate without mul or div
1912 */
1913 threshold = pi->tx_win;
1914 threshold += threshold << 1;
1915 threshold >>= 2;
1916
1917 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1918 threshold);
1919
1920 if (frames_to_ack >= threshold) {
1921 l2cap_ertm_stop_ack_timer(pi);
1922 control.super = L2CAP_SFRAME_RR;
1923 control.reqseq = pi->buffer_seq;
1924 l2cap_ertm_send_sframe(sk, &control);
1925 frames_to_ack = 0;
1926 }
1927
1928 if (frames_to_ack)
1929 l2cap_ertm_start_ack_timer(pi);
1930 }
1931}
1932
1933static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1934{
1935 struct l2cap_pinfo *pi;
1936 struct bt_l2cap_control control;
1937
1938 BT_DBG("sk %p, poll %d", sk, (int) poll);
1939
1940 pi = l2cap_pi(sk);
1941
1942 memset(&control, 0, sizeof(control));
1943 control.frame_type = 's';
1944 control.poll = poll;
1945
1946 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1947 control.super = L2CAP_SFRAME_RNR;
1948 else
1949 control.super = L2CAP_SFRAME_RR;
1950
1951 control.reqseq = pi->buffer_seq;
1952 l2cap_ertm_send_sframe(sk, &control);
1953}
1954
1955static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1956{
1957 struct l2cap_pinfo *pi;
1958 struct bt_l2cap_control control;
1959
1960 BT_DBG("sk %p", sk);
1961
1962 pi = l2cap_pi(sk);
1963
1964 memset(&control, 0, sizeof(control));
1965 control.frame_type = 's';
1966 control.final = 1;
1967 control.reqseq = pi->buffer_seq;
1968 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
1969
1970 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1971 control.super = L2CAP_SFRAME_RNR;
1972 l2cap_ertm_send_sframe(sk, &control);
1973 }
1974
1975 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1976 (pi->unacked_frames > 0))
1977 l2cap_ertm_start_retrans_timer(pi);
1978
1979 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
1980
1981 /* Send pending iframes */
1982 l2cap_ertm_send(sk);
1983
1984 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1985 /* F-bit wasn't sent in an s-frame or i-frame yet, so
1986 * send it now.
1987 */
1988 control.super = L2CAP_SFRAME_RR;
1989 l2cap_ertm_send_sframe(sk, &control);
1990 }
1991}
1992
1993static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
1994{
1995 struct bt_l2cap_control control;
1996 struct l2cap_pinfo *pi;
1997 u16 seq;
1998
1999 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2000
2001 pi = l2cap_pi(sk);
2002 memset(&control, 0, sizeof(control));
2003 control.frame_type = 's';
2004 control.super = L2CAP_SFRAME_SREJ;
2005
2006 for (seq = pi->expected_tx_seq; seq != txseq;
2007 seq = __next_seq(seq, pi)) {
2008 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2009 control.reqseq = seq;
2010 l2cap_ertm_send_sframe(sk, &control);
2011 l2cap_seq_list_append(&pi->srej_list, seq);
2012 }
2013 }
2014
2015 pi->expected_tx_seq = __next_seq(txseq, pi);
2016}
2017
2018static void l2cap_ertm_send_srej_tail(struct sock *sk)
2019{
2020 struct bt_l2cap_control control;
2021 struct l2cap_pinfo *pi;
2022
2023 BT_DBG("sk %p", sk);
2024
2025 pi = l2cap_pi(sk);
2026
2027 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2028 return;
2029
2030 memset(&control, 0, sizeof(control));
2031 control.frame_type = 's';
2032 control.super = L2CAP_SFRAME_SREJ;
2033 control.reqseq = pi->srej_list.tail;
2034 l2cap_ertm_send_sframe(sk, &control);
2035}
2036
2037static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2038{
2039 struct bt_l2cap_control control;
2040 struct l2cap_pinfo *pi;
2041 u16 initial_head;
2042 u16 seq;
2043
2044 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2045
2046 pi = l2cap_pi(sk);
2047 memset(&control, 0, sizeof(control));
2048 control.frame_type = 's';
2049 control.super = L2CAP_SFRAME_SREJ;
2050
2051 /* Capture initial list head to allow only one pass through the list. */
2052 initial_head = pi->srej_list.head;
2053
2054 do {
2055 seq = l2cap_seq_list_pop(&pi->srej_list);
2056 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2057 break;
2058
2059 control.reqseq = seq;
2060 l2cap_ertm_send_sframe(sk, &control);
2061 l2cap_seq_list_append(&pi->srej_list, seq);
2062 } while (pi->srej_list.head != initial_head);
2063}
2064
2065static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2066{
2067 struct l2cap_pinfo *pi = l2cap_pi(sk);
2068 BT_DBG("sk %p", sk);
2069
2070 pi->expected_tx_seq = pi->buffer_seq;
2071 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2072 skb_queue_purge(SREJ_QUEUE(sk));
2073 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2074}
2075
2076static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2077 struct bt_l2cap_control *control,
2078 struct sk_buff_head *skbs, u8 event)
2079{
2080 struct l2cap_pinfo *pi;
2081 int err = 0;
2082
2083 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2084 (int)event);
2085 pi = l2cap_pi(sk);
2086
2087 switch (event) {
2088 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2089 if (sk->sk_send_head == NULL)
2090 sk->sk_send_head = skb_peek(skbs);
2091
2092 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2093 l2cap_ertm_send(sk);
2094 break;
2095 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2096 BT_DBG("Enter LOCAL_BUSY");
2097 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2098
2099 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2100 /* The SREJ_SENT state must be aborted if we are to
2101 * enter the LOCAL_BUSY state.
2102 */
2103 l2cap_ertm_abort_rx_srej_sent(sk);
2104 }
2105
2106 l2cap_ertm_send_ack(sk);
2107
2108 break;
2109 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2110 BT_DBG("Exit LOCAL_BUSY");
2111 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2112
2113 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2114 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2115 pi->amp_move_state =
2116 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2117 l2cap_send_move_chan_cfm(pi->conn, pi,
2118 pi->scid,
2119 L2CAP_MOVE_CHAN_CONFIRMED);
2120 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2121 } else if (pi->amp_move_role ==
2122 L2CAP_AMP_MOVE_RESPONDER) {
2123 pi->amp_move_state =
2124 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2125 l2cap_send_move_chan_rsp(pi->conn,
2126 pi->amp_move_cmd_ident,
2127 pi->dcid,
2128 L2CAP_MOVE_CHAN_SUCCESS);
2129 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002130 break;
2131 }
2132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2134 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2135 struct bt_l2cap_control local_control;
2136
2137 memset(&local_control, 0, sizeof(local_control));
2138 local_control.frame_type = 's';
2139 local_control.super = L2CAP_SFRAME_RR;
2140 local_control.poll = 1;
2141 local_control.reqseq = pi->buffer_seq;
2142 l2cap_ertm_send_sframe(sk, &local_control);
2143
2144 pi->retry_count = 1;
2145 l2cap_ertm_start_monitor_timer(pi);
2146 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002147 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002148 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2150 l2cap_ertm_process_reqseq(sk, control->reqseq);
2151 break;
2152 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2153 l2cap_ertm_send_rr_or_rnr(sk, 1);
2154 pi->retry_count = 1;
2155 l2cap_ertm_start_monitor_timer(pi);
2156 l2cap_ertm_stop_ack_timer(pi);
2157 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2158 break;
2159 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2160 l2cap_ertm_send_rr_or_rnr(sk, 1);
2161 pi->retry_count = 1;
2162 l2cap_ertm_start_monitor_timer(pi);
2163 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2164 break;
2165 case L2CAP_ERTM_EVENT_RECV_FBIT:
2166 /* Nothing to process */
2167 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002168 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002169 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002170 }
2171
2172 return err;
2173}
2174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2176 struct bt_l2cap_control *control,
2177 struct sk_buff_head *skbs, u8 event)
2178{
2179 struct l2cap_pinfo *pi;
2180 int err = 0;
2181
2182 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2183 (int)event);
2184 pi = l2cap_pi(sk);
2185
2186 switch (event) {
2187 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2188 if (sk->sk_send_head == NULL)
2189 sk->sk_send_head = skb_peek(skbs);
2190 /* Queue data, but don't send. */
2191 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2192 break;
2193 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2194 BT_DBG("Enter LOCAL_BUSY");
2195 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2196
2197 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2198 /* The SREJ_SENT state must be aborted if we are to
2199 * enter the LOCAL_BUSY state.
2200 */
2201 l2cap_ertm_abort_rx_srej_sent(sk);
2202 }
2203
2204 l2cap_ertm_send_ack(sk);
2205
2206 break;
2207 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2208 BT_DBG("Exit LOCAL_BUSY");
2209 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2210
2211 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2212 struct bt_l2cap_control local_control;
2213 memset(&local_control, 0, sizeof(local_control));
2214 local_control.frame_type = 's';
2215 local_control.super = L2CAP_SFRAME_RR;
2216 local_control.poll = 1;
2217 local_control.reqseq = pi->buffer_seq;
2218 l2cap_ertm_send_sframe(sk, &local_control);
2219
2220 pi->retry_count = 1;
2221 l2cap_ertm_start_monitor_timer(pi);
2222 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2223 }
2224 break;
2225 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2226 l2cap_ertm_process_reqseq(sk, control->reqseq);
2227
2228 /* Fall through */
2229
2230 case L2CAP_ERTM_EVENT_RECV_FBIT:
2231 if (control && control->final) {
2232 l2cap_ertm_stop_monitor_timer(pi);
2233 if (pi->unacked_frames > 0)
2234 l2cap_ertm_start_retrans_timer(pi);
2235 pi->retry_count = 0;
2236 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2237 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2238 }
2239 break;
2240 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2241 /* Ignore */
2242 break;
2243 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2244 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2245 l2cap_ertm_send_rr_or_rnr(sk, 1);
2246 l2cap_ertm_start_monitor_timer(pi);
2247 pi->retry_count += 1;
2248 } else
2249 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2250 break;
2251 default:
2252 break;
2253 }
2254
2255 return err;
2256}
2257
2258int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2259 struct sk_buff_head *skbs, u8 event)
2260{
2261 struct l2cap_pinfo *pi;
2262 int err = 0;
2263
2264 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2265 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2266
2267 pi = l2cap_pi(sk);
2268
2269 switch (pi->tx_state) {
2270 case L2CAP_ERTM_TX_STATE_XMIT:
2271 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2272 break;
2273 case L2CAP_ERTM_TX_STATE_WAIT_F:
2274 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2275 break;
2276 default:
2277 /* Ignore event */
2278 break;
2279 }
2280
2281 return err;
2282}
2283
2284int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2285 struct msghdr *msg, size_t len, int reseg)
2286{
2287 struct sk_buff *skb;
2288 u16 sdu_len;
2289 size_t pdu_len;
2290 int err = 0;
2291 u8 sar;
2292
2293 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2294
2295 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2296 * so fragmented skbs are not used. The HCI layer's handling
2297 * of fragmented skbs is not compatible with ERTM's queueing.
2298 */
2299
2300 /* PDU size is derived from the HCI MTU */
2301 pdu_len = l2cap_pi(sk)->conn->mtu;
2302
2303 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2304 if (!l2cap_pi(sk)->ampcon)
2305 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2306
2307 /* Adjust for largest possible L2CAP overhead. */
2308 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2309
2310 /* Remote device may have requested smaller PDUs */
2311 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2312
2313 if (len <= pdu_len) {
2314 sar = L2CAP_SAR_UNSEGMENTED;
2315 sdu_len = 0;
2316 pdu_len = len;
2317 } else {
2318 sar = L2CAP_SAR_START;
2319 sdu_len = len;
2320 pdu_len -= L2CAP_SDULEN_SIZE;
2321 }
2322
2323 while (len) {
2324 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2325
2326 BT_DBG("iframe skb %p", skb);
2327
2328 if (IS_ERR(skb)) {
2329 __skb_queue_purge(seg_queue);
2330 return PTR_ERR(skb);
2331 }
2332
2333 bt_cb(skb)->control.sar = sar;
2334 __skb_queue_tail(seg_queue, skb);
2335
2336 len -= pdu_len;
2337 if (sdu_len) {
2338 sdu_len = 0;
2339 pdu_len += L2CAP_SDULEN_SIZE;
2340 }
2341
2342 if (len <= pdu_len) {
2343 sar = L2CAP_SAR_END;
2344 pdu_len = len;
2345 } else {
2346 sar = L2CAP_SAR_CONTINUE;
2347 }
2348 }
2349
2350 return err;
2351}
2352
2353static inline int is_initial_frame(u8 sar)
2354{
2355 return (sar == L2CAP_SAR_UNSEGMENTED ||
2356 sar == L2CAP_SAR_START);
2357}
2358
2359static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2360 size_t veclen)
2361{
2362 struct sk_buff *frag_iter;
2363
2364 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2365
2366 if (iv->iov_len + skb->len > veclen)
2367 return -ENOMEM;
2368
2369 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2370 iv->iov_len += skb->len;
2371
2372 skb_walk_frags(skb, frag_iter) {
2373 if (iv->iov_len + skb->len > veclen)
2374 return -ENOMEM;
2375
2376 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2377 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2378 frag_iter->len);
2379 iv->iov_len += frag_iter->len;
2380 }
2381
2382 return 0;
2383}
2384
2385int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2386{
2387 void *buf;
2388 int buflen;
2389 int err = 0;
2390 struct sk_buff *skb;
2391 struct msghdr msg;
2392 struct kvec iv;
2393 struct sk_buff_head old_frames;
2394 struct l2cap_pinfo *pi = l2cap_pi(sk);
2395
2396 BT_DBG("sk %p", sk);
2397
2398 if (skb_queue_empty(queue))
2399 return 0;
2400
2401 memset(&msg, 0, sizeof(msg));
2402 msg.msg_iov = (struct iovec *) &iv;
2403
2404 buflen = pi->omtu + L2CAP_FCS_SIZE;
2405 buf = kzalloc(buflen, GFP_TEMPORARY);
2406
2407 if (!buf) {
2408 BT_DBG("Could not allocate resegmentation buffer");
2409 return -ENOMEM;
2410 }
2411
2412 /* Move current frames off the original queue */
2413 __skb_queue_head_init(&old_frames);
2414 skb_queue_splice_tail_init(queue, &old_frames);
2415
2416 while (!skb_queue_empty(&old_frames)) {
2417 struct sk_buff_head current_sdu;
2418 u8 original_sar;
2419
2420 /* Reassemble each SDU from one or more PDUs */
2421
2422 iv.iov_base = buf;
2423 iv.iov_len = 0;
2424
2425 skb = skb_peek(&old_frames);
2426 original_sar = bt_cb(skb)->control.sar;
2427
2428 __skb_unlink(skb, &old_frames);
2429
2430 /* Append data to SDU */
2431 if (pi->extended_control)
2432 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2433 else
2434 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2435
2436 if (original_sar == L2CAP_SAR_START)
2437 skb_pull(skb, L2CAP_SDULEN_SIZE);
2438
2439 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2440
2441 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2442 iv.iov_len -= L2CAP_FCS_SIZE;
2443
2444 /* Free skb */
2445 kfree_skb(skb);
2446
2447 if (err)
2448 break;
2449
2450 while (!skb_queue_empty(&old_frames) && !err) {
2451 /* Check next frame */
2452 skb = skb_peek(&old_frames);
2453
2454 if (is_initial_frame(bt_cb(skb)->control.sar))
2455 break;
2456
2457 __skb_unlink(skb, &old_frames);
2458
2459 /* Append data to SDU */
2460 if (pi->extended_control)
2461 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2462 else
2463 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2464
2465 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2466 skb_pull(skb, L2CAP_SDULEN_SIZE);
2467
2468 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2469
2470 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2471 iv.iov_len -= L2CAP_FCS_SIZE;
2472
2473 /* Free skb */
2474 kfree_skb(skb);
2475 }
2476
2477 if (err)
2478 break;
2479
2480 /* Segment data */
2481
2482 __skb_queue_head_init(&current_sdu);
2483
2484 /* skbs for the SDU were just freed, but the
2485 * resegmenting process could produce more, smaller
2486 * skbs due to smaller PDUs and reduced HCI MTU. The
2487 * overhead from the sk_buff structs could put us over
2488 * the sk_sndbuf limit.
2489 *
2490 * Since this code is running in response to a
2491 * received poll/final packet, it cannot block.
2492 * Therefore, memory allocation needs to be allowed by
2493 * falling back to bt_skb_alloc() (with
2494 * skb_set_owner_w() to maintain sk_wmem_alloc
2495 * correctly).
2496 */
2497 msg.msg_iovlen = iv.iov_len;
2498 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2499 msg.msg_iovlen, 1);
2500
2501 if (err || skb_queue_empty(&current_sdu)) {
2502 BT_DBG("Error %d resegmenting data for socket %p",
2503 err, sk);
2504 __skb_queue_purge(&current_sdu);
2505 break;
2506 }
2507
2508 /* Fix up first PDU SAR bits */
2509 if (!is_initial_frame(original_sar)) {
2510 BT_DBG("Changing SAR bits, %d PDUs",
2511 skb_queue_len(&current_sdu));
2512 skb = skb_peek(&current_sdu);
2513
2514 if (skb_queue_len(&current_sdu) == 1) {
2515 /* Change SAR from 'unsegmented' to 'end' */
2516 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2517 } else {
2518 struct l2cap_hdr *lh;
2519 size_t hdrlen;
2520
2521 /* Change SAR from 'start' to 'continue' */
2522 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2523
2524 /* Start frames contain 2 bytes for
2525 * sdulen and continue frames don't.
2526 * Must rewrite header to eliminate
2527 * sdulen and then adjust l2cap frame
2528 * length.
2529 */
2530 if (pi->extended_control)
2531 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2532 else
2533 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2534
2535 memmove(skb->data + L2CAP_SDULEN_SIZE,
2536 skb->data, hdrlen);
2537 skb_pull(skb, L2CAP_SDULEN_SIZE);
2538 lh = (struct l2cap_hdr *)skb->data;
2539 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2540 L2CAP_SDULEN_SIZE);
2541 }
2542 }
2543
2544 /* Add to queue */
2545 skb_queue_splice_tail(&current_sdu, queue);
2546 }
2547
2548 __skb_queue_purge(&old_frames);
2549 if (err)
2550 __skb_queue_purge(queue);
2551
2552 kfree(buf);
2553
2554 BT_DBG("Queue resegmented, err=%d", err);
2555 return err;
2556}
2557
2558static void l2cap_resegment_worker(struct work_struct *work)
2559{
2560 int err = 0;
2561 struct l2cap_resegment_work *seg_work =
2562 container_of(work, struct l2cap_resegment_work, work);
2563 struct sock *sk = seg_work->sk;
2564
2565 kfree(seg_work);
2566
2567 BT_DBG("sk %p", sk);
2568 lock_sock(sk);
2569
2570 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2571 release_sock(sk);
2572 return;
2573 }
2574
2575 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2576
2577 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2578
2579 if (skb_queue_empty(TX_QUEUE(sk)))
2580 sk->sk_send_head = NULL;
2581 else
2582 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2583
2584 if (err)
2585 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2586 else
2587 l2cap_ertm_send(sk);
2588
2589 release_sock(sk);
2590}
2591
2592static int l2cap_setup_resegment(struct sock *sk)
2593{
2594 struct l2cap_resegment_work *seg_work;
2595
2596 BT_DBG("sk %p", sk);
2597
2598 if (skb_queue_empty(TX_QUEUE(sk)))
2599 return 0;
2600
2601 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2602 if (!seg_work)
2603 return -ENOMEM;
2604
2605 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
2606 seg_work->sk = sk;
2607
2608 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2609 kfree(seg_work);
2610 return -ENOMEM;
2611 }
2612
2613 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2614
2615 return 0;
2616}
2617
2618static inline int l2cap_rmem_available(struct sock *sk)
2619{
2620 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2621 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2622 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2623}
2624
2625static inline int l2cap_rmem_full(struct sock *sk)
2626{
2627 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2628 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2629 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2630}
2631
2632void l2cap_amp_move_init(struct sock *sk)
2633{
2634 BT_DBG("sk %p", sk);
2635
2636 if (!l2cap_pi(sk)->conn)
2637 return;
2638
2639 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2640 return;
2641
2642 if (l2cap_pi(sk)->amp_id == 0) {
2643 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2644 return;
2645 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2646 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2647 amp_create_physical(l2cap_pi(sk)->conn, sk);
2648 } else {
2649 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2650 l2cap_pi(sk)->amp_move_state =
2651 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2652 l2cap_pi(sk)->amp_move_id = 0;
2653 l2cap_amp_move_setup(sk);
2654 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2655 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2656 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2657 }
2658}
2659
2660static void l2cap_chan_ready(struct sock *sk)
2661{
2662 struct sock *parent = bt_sk(sk)->parent;
2663
2664 BT_DBG("sk %p, parent %p", sk, parent);
2665
2666 l2cap_pi(sk)->conf_state = 0;
2667 l2cap_sock_clear_timer(sk);
2668
2669 if (!parent) {
2670 /* Outgoing channel.
2671 * Wake up socket sleeping on connect.
2672 */
2673 sk->sk_state = BT_CONNECTED;
2674 sk->sk_state_change(sk);
2675 } else {
2676 /* Incoming channel.
2677 * Wake up socket sleeping on accept.
2678 */
2679 parent->sk_data_ready(parent, 0);
2680 }
2681}
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683/* Copy frame to all raw sockets on that connection */
2684static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2685{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 BT_DBG("conn %p", conn);
2691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002692 read_lock(&l->lock);
2693 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2694 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 continue;
2696
2697 /* Don't send frame to the socket it came from */
2698 if (skb->sk == sk)
2699 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002700 nskb = skb_clone(skb, GFP_ATOMIC);
2701 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 continue;
2703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002704 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 kfree_skb(nskb);
2706 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002707 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708}
2709
2710/* ---- L2CAP signalling commands ---- */
2711static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2712 u8 code, u8 ident, u16 dlen, void *data)
2713{
2714 struct sk_buff *skb, **frag;
2715 struct l2cap_cmd_hdr *cmd;
2716 struct l2cap_hdr *lh;
2717 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002718 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002720 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2721 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
2726 skb = bt_skb_alloc(count, GFP_ATOMIC);
2727 if (!skb)
2728 return NULL;
2729
2730 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002731 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002732
2733 if (conn->hcon->type == LE_LINK)
2734 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2735 else
2736 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
2738 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2739 cmd->code = code;
2740 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002741 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 if (dlen) {
2744 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2745 memcpy(skb_put(skb, count), data, count);
2746 data += count;
2747 }
2748
2749 len -= skb->len;
2750
2751 /* Continuation fragments (no L2CAP header) */
2752 frag = &skb_shinfo(skb)->frag_list;
2753 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2757 if (!*frag)
2758 goto fail;
2759
2760 memcpy(skb_put(*frag, count), data, count);
2761
2762 len -= count;
2763 data += count;
2764
2765 frag = &(*frag)->next;
2766 }
2767
2768 return skb;
2769
2770fail:
2771 kfree_skb(skb);
2772 return NULL;
2773}
2774
2775static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2776{
2777 struct l2cap_conf_opt *opt = *ptr;
2778 int len;
2779
2780 len = L2CAP_CONF_OPT_SIZE + opt->len;
2781 *ptr += len;
2782
2783 *type = opt->type;
2784 *olen = opt->len;
2785
2786 switch (opt->len) {
2787 case 1:
2788 *val = *((u8 *) opt->val);
2789 break;
2790
2791 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002792 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 break;
2794
2795 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002796 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 break;
2798
2799 default:
2800 *val = (unsigned long) opt->val;
2801 break;
2802 }
2803
2804 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2805 return len;
2806}
2807
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2809{
2810 struct l2cap_conf_opt *opt = *ptr;
2811
2812 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2813
2814 opt->type = type;
2815 opt->len = len;
2816
2817 switch (len) {
2818 case 1:
2819 *((u8 *) opt->val) = val;
2820 break;
2821
2822 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002823 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 break;
2825
2826 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002827 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 break;
2829
2830 default:
2831 memcpy(opt->val, (void *) val, len);
2832 break;
2833 }
2834
2835 *ptr += L2CAP_CONF_OPT_SIZE + len;
2836}
2837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002838static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002839{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840 struct delayed_work *delayed =
2841 container_of(work, struct delayed_work, work);
2842 struct l2cap_pinfo *pi =
2843 container_of(delayed, struct l2cap_pinfo, ack_work);
2844 struct sock *sk = (struct sock *)pi;
2845 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002847 BT_DBG("sk %p", sk);
2848
2849 if (!sk)
2850 return;
2851
2852 lock_sock(sk);
2853
2854 if (!l2cap_pi(sk)->conn) {
2855 release_sock(sk);
2856 return;
2857 }
2858
2859 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2860 l2cap_pi(sk)->last_acked_seq,
2861 l2cap_pi(sk));
2862
2863 if (frames_to_ack)
2864 l2cap_ertm_send_rr_or_rnr(sk, 0);
2865
2866 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002867}
2868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002869static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002870{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002871 struct delayed_work *delayed =
2872 container_of(work, struct delayed_work, work);
2873 struct l2cap_pinfo *pi =
2874 container_of(delayed, struct l2cap_pinfo, retrans_work);
2875 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002879 if (!sk)
2880 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 if (!l2cap_pi(sk)->conn) {
2885 release_sock(sk);
2886 return;
2887 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2890 release_sock(sk);
2891}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2894{
2895 struct delayed_work *delayed =
2896 container_of(work, struct delayed_work, work);
2897 struct l2cap_pinfo *pi =
2898 container_of(delayed, struct l2cap_pinfo, monitor_work);
2899 struct sock *sk = (struct sock *)pi;
2900
2901 BT_DBG("sk %p", sk);
2902
2903 if (!sk)
2904 return;
2905
2906 lock_sock(sk);
2907
2908 if (!l2cap_pi(sk)->conn) {
2909 release_sock(sk);
2910 return;
2911 }
2912
2913 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2914
2915 release_sock(sk);
2916}
2917
2918static inline void l2cap_ertm_init(struct sock *sk)
2919{
2920 l2cap_pi(sk)->next_tx_seq = 0;
2921 l2cap_pi(sk)->expected_tx_seq = 0;
2922 l2cap_pi(sk)->expected_ack_seq = 0;
2923 l2cap_pi(sk)->unacked_frames = 0;
2924 l2cap_pi(sk)->buffer_seq = 0;
2925 l2cap_pi(sk)->frames_sent = 0;
2926 l2cap_pi(sk)->last_acked_seq = 0;
2927 l2cap_pi(sk)->sdu = NULL;
2928 l2cap_pi(sk)->sdu_last_frag = NULL;
2929 l2cap_pi(sk)->sdu_len = 0;
2930 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2931
2932 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2933 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2934
2935 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2936 l2cap_pi(sk)->rx_state);
2937
2938 l2cap_pi(sk)->amp_id = 0;
2939 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2940 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2941 l2cap_pi(sk)->amp_move_reqseq = 0;
2942 l2cap_pi(sk)->amp_move_event = 0;
2943
2944 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2945 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2946 l2cap_ertm_retrans_timeout);
2947 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2948 l2cap_ertm_monitor_timeout);
2949 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2950 skb_queue_head_init(SREJ_QUEUE(sk));
2951 skb_queue_head_init(TX_QUEUE(sk));
2952
2953 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2954 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
2955 l2cap_pi(sk)->remote_tx_win);
2956}
2957
2958void l2cap_ertm_destruct(struct sock *sk)
2959{
2960 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
2961 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
2962}
2963
2964void l2cap_ertm_shutdown(struct sock *sk)
2965{
2966 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
2967 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
2968 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
2969}
2970
2971void l2cap_ertm_recv_done(struct sock *sk)
2972{
2973 lock_sock(sk);
2974
2975 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
2976 release_sock(sk);
2977 return;
2978 }
2979
2980 /* Consume any queued incoming frames and update local busy status */
2981 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
2982 l2cap_ertm_rx_queued_iframes(sk))
2983 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2984 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2985 l2cap_rmem_available(sk))
2986 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
2987
2988 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002989}
2990
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03002991static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2992{
2993 switch (mode) {
2994 case L2CAP_MODE_STREAMING:
2995 case L2CAP_MODE_ERTM:
2996 if (l2cap_mode_supported(mode, remote_feat_mask))
2997 return mode;
2998 /* fall through */
2999 default:
3000 return L2CAP_MODE_BASIC;
3001 }
3002}
3003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3007 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3008 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3009 pi->extended_control = 1;
3010 } else {
3011 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3012 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3013
3014 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3015 pi->extended_control = 0;
3016 }
3017}
3018
3019static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3020 struct hci_ext_fs *new,
3021 struct hci_ext_fs *agg)
3022{
3023 *agg = *cur;
3024 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3025 /* current flow spec has known rate */
3026 if ((new->max_sdu == 0xFFFF) ||
3027 (new->sdu_arr_time == 0xFFFFFFFF)) {
3028 /* new fs has unknown rate, so aggregate is unknown */
3029 agg->max_sdu = 0xFFFF;
3030 agg->sdu_arr_time = 0xFFFFFFFF;
3031 } else {
3032 /* new fs has known rate, so aggregate is known */
3033 u64 cur_rate;
3034 u64 new_rate;
3035 cur_rate = cur->max_sdu * 1000000ULL;
3036 if (cur->sdu_arr_time)
3037 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3038 new_rate = new->max_sdu * 1000000ULL;
3039 if (new->sdu_arr_time)
3040 new_rate = div_u64(new_rate, new->sdu_arr_time);
3041 cur_rate = cur_rate + new_rate;
3042 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3043 cur_rate);
3044 }
3045 }
3046}
3047
3048static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3049{
3050 struct hci_ext_fs tx_fs;
3051 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003053 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003055 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3056 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3057 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3058 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3059 return 0;
3060
3061 l2cap_aggregate_fs(&chan->tx_fs,
3062 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3063 l2cap_aggregate_fs(&chan->rx_fs,
3064 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3065 hci_chan_modify(chan, &tx_fs, &rx_fs);
3066 return 1;
3067}
3068
3069static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3070 struct hci_ext_fs *old,
3071 struct hci_ext_fs *agg)
3072{
3073 *agg = *cur;
3074 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3075 u64 cur_rate;
3076 u64 old_rate;
3077 cur_rate = cur->max_sdu * 1000000ULL;
3078 if (cur->sdu_arr_time)
3079 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3080 old_rate = old->max_sdu * 1000000ULL;
3081 if (old->sdu_arr_time)
3082 old_rate = div_u64(old_rate, old->sdu_arr_time);
3083 cur_rate = cur_rate - old_rate;
3084 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3085 cur_rate);
3086 }
3087}
3088
3089static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3090{
3091 struct hci_ext_fs tx_fs;
3092 struct hci_ext_fs rx_fs;
3093
3094 BT_DBG("chan %p", chan);
3095
3096 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3097 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3098 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3099 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3100 return 0;
3101
3102 l2cap_deaggregate_fs(&chan->tx_fs,
3103 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3104 l2cap_deaggregate_fs(&chan->rx_fs,
3105 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3106 hci_chan_modify(chan, &tx_fs, &rx_fs);
3107 return 1;
3108}
3109
3110static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3111{
3112 struct hci_dev *hdev;
3113 struct hci_conn *hcon;
3114 struct hci_chan *chan;
3115
3116 hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
3117 if (!hdev)
3118 return NULL;
3119
3120 BT_DBG("hdev %s", hdev->name);
3121
3122 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
3123 if (!hcon)
3124 return NULL;
3125
3126 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3127 if (chan) {
3128 l2cap_aggregate(chan, pi);
3129 hci_chan_hold(chan);
3130 return chan;
3131 }
3132
3133 if (bt_sk(pi)->parent) {
3134 /* Incoming connection */
3135 chan = hci_chan_accept(hcon,
3136 (struct hci_ext_fs *) &pi->local_fs,
3137 (struct hci_ext_fs *) &pi->remote_fs);
3138 } else {
3139 /* Outgoing connection */
3140 chan = hci_chan_create(hcon,
3141 (struct hci_ext_fs *) &pi->local_fs,
3142 (struct hci_ext_fs *) &pi->remote_fs);
3143 }
3144 return chan;
3145}
3146
3147int l2cap_build_conf_req(struct sock *sk, void *data)
3148{
3149 struct l2cap_pinfo *pi = l2cap_pi(sk);
3150 struct l2cap_conf_req *req = data;
3151 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3152 void *ptr = req->data;
3153
3154 BT_DBG("sk %p", sk);
3155
3156 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003157 goto done;
3158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003159 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003160 case L2CAP_MODE_STREAMING:
3161 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003163 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003164
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003165 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003166 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003168 break;
3169 }
3170
3171done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172 if (pi->imtu != L2CAP_DEFAULT_MTU)
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003176 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003177 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3178 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003179 break;
3180
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003181 rfc.txwin_size = 0;
3182 rfc.max_transmit = 0;
3183 rfc.retrans_timeout = 0;
3184 rfc.monitor_timeout = 0;
3185 rfc.max_pdu_size = 0;
3186
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3188 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003189 break;
3190
3191 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003192 l2cap_setup_txwin(pi);
3193 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3194 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3195 else
3196 rfc.txwin_size = pi->tx_win;
3197 rfc.max_transmit = pi->max_tx;
3198 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3199 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003200 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3202 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003203
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3205 (unsigned long) &rfc);
3206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3208 pi->extended_control) {
3209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3210 pi->tx_win);
3211 }
3212
3213 if (pi->amp_id) {
3214 /* default best effort extended flow spec */
3215 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3216 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3218 sizeof(fs), (unsigned long) &fs);
3219 }
3220
3221 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003222 break;
3223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003224 if (pi->fcs == L2CAP_FCS_NONE ||
3225 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3226 pi->fcs = L2CAP_FCS_NONE;
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003228 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003229 break;
3230
3231 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003232 rfc.txwin_size = 0;
3233 rfc.max_transmit = 0;
3234 rfc.retrans_timeout = 0;
3235 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003236 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003237 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3238 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003239
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3241 (unsigned long) &rfc);
3242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003243 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3244 pi->extended_control) {
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3246 }
3247
3248 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003249 break;
3250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003251 if (pi->fcs == L2CAP_FCS_NONE ||
3252 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3253 pi->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003255 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003256 break;
3257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003259 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003260 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
3262 return ptr - data;
3263}
3264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003265
3266static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003268 struct l2cap_pinfo *pi = l2cap_pi(sk);
3269 struct l2cap_conf_req *req = data;
3270 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3271 void *ptr = req->data;
3272 u32 be_flush_to;
3273
3274 BT_DBG("sk %p", sk);
3275
3276 /* convert to milliseconds, round up */
3277 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3278
3279 switch (pi->mode) {
3280 case L2CAP_MODE_ERTM:
3281 rfc.mode = L2CAP_MODE_ERTM;
3282 rfc.txwin_size = pi->tx_win;
3283 rfc.max_transmit = pi->max_tx;
3284 if (pi->amp_move_id) {
3285 rfc.retrans_timeout =
3286 cpu_to_le16((3 * be_flush_to) + 500);
3287 rfc.monitor_timeout =
3288 cpu_to_le16((3 * be_flush_to) + 500);
3289 } else {
3290 rfc.retrans_timeout =
3291 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3292 rfc.monitor_timeout =
3293 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3294 }
3295 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3296 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3297 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3298
3299 break;
3300
3301 default:
3302 return -ECONNREFUSED;
3303 }
3304
3305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3306 (unsigned long) &rfc);
3307
3308 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3309
3310 /* TODO assign fcs for br/edr based on socket config option */
3311 if (pi->amp_move_id)
3312 pi->local_conf.fcs = L2CAP_FCS_NONE;
3313 else
3314 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3315
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3317 pi->local_conf.fcs);
3318
3319 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3320 }
3321
3322 req->dcid = cpu_to_le16(pi->dcid);
3323 req->flags = cpu_to_le16(0);
3324
3325 return ptr - data;
3326}
3327
3328static int l2cap_parse_conf_req(struct sock *sk, void *data)
3329{
3330 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003331 struct l2cap_conf_rsp *rsp = data;
3332 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003333 void *req = pi->conf_req;
3334 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003335 int type, hint, olen;
3336 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003337 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003338 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003339 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003340 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003342 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003343
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003344 while (len >= L2CAP_CONF_OPT_SIZE) {
3345 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003347 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003348 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003349
3350 switch (type) {
3351 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003352 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003353 break;
3354
3355 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003356 pi->flush_to = val;
3357 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3358 result = L2CAP_CONF_UNACCEPT;
3359 else
3360 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003361 break;
3362
3363 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003364 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3365 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003366 break;
3367
Marcel Holtmann6464f352007-10-20 13:39:51 +02003368 case L2CAP_CONF_RFC:
3369 if (olen == sizeof(rfc))
3370 memcpy(&rfc, (void *) val, olen);
3371 break;
3372
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003373 case L2CAP_CONF_FCS:
3374 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003375 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3376 pi->remote_conf.fcs = val;
3377 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003379 case L2CAP_CONF_EXT_FS:
3380 if (olen == sizeof(fs)) {
3381 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3382 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3383 result = L2CAP_CONF_UNACCEPT;
3384 break;
3385 }
3386 memcpy(&fs, (void *) val, olen);
3387 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3388 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3389 break;
3390 }
3391 pi->remote_conf.flush_to =
3392 le32_to_cpu(fs.flush_to);
3393 pi->remote_fs.id = fs.id;
3394 pi->remote_fs.type = fs.type;
3395 pi->remote_fs.max_sdu =
3396 le16_to_cpu(fs.max_sdu);
3397 pi->remote_fs.sdu_arr_time =
3398 le32_to_cpu(fs.sdu_arr_time);
3399 pi->remote_fs.acc_latency =
3400 le32_to_cpu(fs.acc_latency);
3401 pi->remote_fs.flush_to =
3402 le32_to_cpu(fs.flush_to);
3403 }
3404 break;
3405
3406 case L2CAP_CONF_EXT_WINDOW:
3407 pi->extended_control = 1;
3408 pi->remote_tx_win = val;
3409 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3410 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003411 break;
3412
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003413 default:
3414 if (hint)
3415 break;
3416
3417 result = L2CAP_CONF_UNKNOWN;
3418 *((u8 *) ptr++) = type;
3419 break;
3420 }
3421 }
3422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003423 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003424 goto done;
3425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003426 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003427 case L2CAP_MODE_STREAMING:
3428 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003429 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3430 pi->mode = l2cap_select_mode(rfc.mode,
3431 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003432 break;
3433 }
3434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003435 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003436 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003437
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003438 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003439 }
3440
3441done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003442 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003443 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003444 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003446 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003447 return -ECONNREFUSED;
3448
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3450 sizeof(rfc), (unsigned long) &rfc);
3451 }
3452
3453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003454 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3455 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3456 return -ECONNREFUSED;
3457
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003458 if (result == L2CAP_CONF_SUCCESS) {
3459 /* Configure output options and let the other side know
3460 * which ones we don't like. */
3461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003462 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003463 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003464 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003465 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003466 else {
3467 pi->omtu = mtu;
3468 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3469 }
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003471
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003472 switch (rfc.mode) {
3473 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003474 pi->fcs = L2CAP_FCS_NONE;
3475 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003476 break;
3477
3478 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3480 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003482 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003486 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003487 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003488 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003489 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003491 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003492
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3494 sizeof(rfc), (unsigned long) &rfc);
3495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003496 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3498 sizeof(fs), (unsigned long) &fs);
3499
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003500 break;
3501
3502 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003503 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003505 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003506
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3508 sizeof(rfc), (unsigned long) &rfc);
3509
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510 break;
3511
3512 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003513 result = L2CAP_CONF_UNACCEPT;
3514
3515 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003516 rfc.mode = pi->mode;
3517 }
3518
3519 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3520 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3521 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3522 result = L2CAP_CONF_PENDING;
3523
3524 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3525 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003526 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003527 /* Trigger logical link creation only on AMP */
3528
Peter Krystadf453bb32011-07-19 17:23:34 -07003529 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003530 if (!chan)
3531 return -ECONNREFUSED;
3532
3533 chan->l2cap_sk = sk;
3534 if (chan->state == BT_CONNECTED)
3535 l2cap_create_cfm(chan, 0);
3536 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003537 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003538
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003539 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003540 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003541 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003542 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003543 rsp->result = cpu_to_le16(result);
3544 rsp->flags = cpu_to_le16(0x0000);
3545
3546 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547}
3548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003549static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003550{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003551 struct l2cap_pinfo *pi = l2cap_pi(sk);
3552 struct l2cap_conf_rsp *rsp = data;
3553 void *ptr = rsp->data;
3554 void *req = pi->conf_req;
3555 int len = pi->conf_len;
3556 int type, hint, olen;
3557 unsigned long val;
3558 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3559 struct l2cap_conf_ext_fs fs;
3560 u16 mtu = pi->omtu;
3561 u16 tx_win = pi->remote_tx_win;
3562 u16 result = L2CAP_CONF_SUCCESS;
3563
3564 BT_DBG("sk %p", sk);
3565
3566 while (len >= L2CAP_CONF_OPT_SIZE) {
3567 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3568
3569 hint = type & L2CAP_CONF_HINT;
3570 type &= L2CAP_CONF_MASK;
3571
3572 switch (type) {
3573 case L2CAP_CONF_MTU:
3574 mtu = val;
3575 break;
3576
3577 case L2CAP_CONF_FLUSH_TO:
3578 if (pi->amp_move_id)
3579 result = L2CAP_CONF_UNACCEPT;
3580 else
3581 pi->remote_conf.flush_to = val;
3582 break;
3583
3584 case L2CAP_CONF_QOS:
3585 if (pi->amp_move_id)
3586 result = L2CAP_CONF_UNACCEPT;
3587 break;
3588
3589 case L2CAP_CONF_RFC:
3590 if (olen == sizeof(rfc))
3591 memcpy(&rfc, (void *) val, olen);
3592 if (pi->mode != rfc.mode ||
3593 rfc.mode == L2CAP_MODE_BASIC)
3594 result = L2CAP_CONF_UNACCEPT;
3595 break;
3596
3597 case L2CAP_CONF_FCS:
3598 pi->remote_conf.fcs = val;
3599 break;
3600
3601 case L2CAP_CONF_EXT_FS:
3602 if (olen == sizeof(fs)) {
3603 memcpy(&fs, (void *) val, olen);
3604 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3605 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3606 else {
3607 pi->remote_conf.flush_to =
3608 le32_to_cpu(fs.flush_to);
3609 }
3610 }
3611 break;
3612
3613 case L2CAP_CONF_EXT_WINDOW:
3614 tx_win = val;
3615 break;
3616
3617 default:
3618 if (hint)
3619 break;
3620
3621 result = L2CAP_CONF_UNKNOWN;
3622 *((u8 *) ptr++) = type;
3623 break;
3624 }
3625 }
3626
3627 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3628 result, pi->mode, rfc.mode);
3629
3630 if (result == L2CAP_CONF_SUCCESS) {
3631 /* Configure output options and let the other side know
3632 * which ones we don't like. */
3633
3634 /* Don't allow mtu to decrease. */
3635 if (mtu < pi->omtu)
3636 result = L2CAP_CONF_UNACCEPT;
3637
3638 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3639
3640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3641
3642 /* Don't allow extended transmit window to change. */
3643 if (tx_win != pi->remote_tx_win) {
3644 result = L2CAP_CONF_UNACCEPT;
3645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3646 pi->remote_tx_win);
3647 }
3648
3649 if (rfc.mode == L2CAP_MODE_ERTM) {
3650 pi->remote_conf.retrans_timeout =
3651 le16_to_cpu(rfc.retrans_timeout);
3652 pi->remote_conf.monitor_timeout =
3653 le16_to_cpu(rfc.monitor_timeout);
3654
3655 BT_DBG("remote conf monitor timeout %d",
3656 pi->remote_conf.monitor_timeout);
3657
3658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3659 sizeof(rfc), (unsigned long) &rfc);
3660 }
3661
3662 }
3663
3664 if (result != L2CAP_CONF_SUCCESS)
3665 goto done;
3666
3667 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3668
3669 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3670 pi->flush_to = pi->remote_conf.flush_to;
3671 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3672
3673 if (pi->amp_move_id)
3674 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3675 else
3676 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3677 BT_DBG("mode %d monitor timeout %d",
3678 pi->mode, pi->monitor_timeout);
3679
3680 }
3681
3682done:
3683 rsp->scid = cpu_to_le16(pi->dcid);
3684 rsp->result = cpu_to_le16(result);
3685 rsp->flags = cpu_to_le16(0x0000);
3686
3687 return ptr - data;
3688}
3689
3690static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3691{
3692 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003693 struct l2cap_conf_req *req = data;
3694 void *ptr = req->data;
3695 int type, olen;
3696 unsigned long val;
3697 struct l2cap_conf_rfc rfc;
3698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003699 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003700
3701 while (len >= L2CAP_CONF_OPT_SIZE) {
3702 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3703
3704 switch (type) {
3705 case L2CAP_CONF_MTU:
3706 if (val < L2CAP_DEFAULT_MIN_MTU) {
3707 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003708 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003709 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003710 pi->imtu = val;
3711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003712 break;
3713
3714 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003715 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003717 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003718 break;
3719
3720 case L2CAP_CONF_RFC:
3721 if (olen == sizeof(rfc))
3722 memcpy(&rfc, (void *)val, olen);
3723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003724 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3725 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003726 return -ECONNREFUSED;
3727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003728 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003729
3730 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3731 sizeof(rfc), (unsigned long) &rfc);
3732 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003733
3734 case L2CAP_CONF_EXT_WINDOW:
3735 pi->tx_win = val;
3736
3737 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3738 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3739
3740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3741 2, pi->tx_win);
3742 break;
3743
3744 default:
3745 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003746 }
3747 }
3748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003749 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003750 return -ECONNREFUSED;
3751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003752 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003753
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003754 if (*result == L2CAP_CONF_SUCCESS) {
3755 switch (rfc.mode) {
3756 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003757 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3758 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3759 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003760 break;
3761 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003762 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003763 }
3764 }
3765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003766 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003767 req->flags = cpu_to_le16(0x0000);
3768
3769 return ptr - data;
3770}
3771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003772static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773{
3774 struct l2cap_conf_rsp *rsp = data;
3775 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003777 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003779 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003780 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003781 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782
3783 return ptr - data;
3784}
3785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003786static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003787{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003788 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003789 int type, olen;
3790 unsigned long val;
3791 struct l2cap_conf_rfc rfc;
3792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003793 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003795 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003796 return;
3797
3798 while (len >= L2CAP_CONF_OPT_SIZE) {
3799 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3800
3801 switch (type) {
3802 case L2CAP_CONF_RFC:
3803 if (olen == sizeof(rfc))
3804 memcpy(&rfc, (void *)val, olen);
3805 goto done;
3806 }
3807 }
3808
3809done:
3810 switch (rfc.mode) {
3811 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003812 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3813 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3814 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003815 break;
3816 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003817 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003818 }
3819}
3820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003821static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3822{
3823 struct l2cap_pinfo *pi = l2cap_pi(sk);
3824 int type, olen;
3825 unsigned long val;
3826 struct l2cap_conf_ext_fs fs;
3827
3828 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3829
3830 while (len >= L2CAP_CONF_OPT_SIZE) {
3831 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3832 if ((type == L2CAP_CONF_EXT_FS) &&
3833 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3834 memcpy(&fs, (void *)val, olen);
3835 pi->local_fs.id = fs.id;
3836 pi->local_fs.type = fs.type;
3837 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3838 pi->local_fs.sdu_arr_time =
3839 le32_to_cpu(fs.sdu_arr_time);
3840 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3841 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3842 break;
3843 }
3844 }
3845
3846}
3847
3848static int l2cap_finish_amp_move(struct sock *sk)
3849{
3850 struct l2cap_pinfo *pi;
3851 int err;
3852
3853 BT_DBG("sk %p", sk);
3854
3855 pi = l2cap_pi(sk);
3856
3857 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3858 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3859
3860 if (pi->ampcon)
3861 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3862 else
3863 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3864
3865 err = l2cap_setup_resegment(sk);
3866
3867 return err;
3868}
3869
3870static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3871 u16 result)
3872{
3873 int err = 0;
3874 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3875 struct l2cap_pinfo *pi = l2cap_pi(sk);
3876
3877 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3878
3879 if (pi->reconf_state == L2CAP_RECONF_NONE)
3880 return -ECONNREFUSED;
3881
3882 if (result == L2CAP_CONF_SUCCESS) {
3883 while (len >= L2CAP_CONF_OPT_SIZE) {
3884 int type, olen;
3885 unsigned long val;
3886
3887 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3888
3889 if (type == L2CAP_CONF_RFC) {
3890 if (olen == sizeof(rfc))
3891 memcpy(&rfc, (void *)val, olen);
3892 if (rfc.mode != pi->mode &&
3893 rfc.mode != L2CAP_MODE_ERTM) {
3894 err = -ECONNREFUSED;
3895 goto done;
3896 }
3897 break;
3898 }
3899 }
3900 }
3901
3902done:
3903 l2cap_ertm_stop_ack_timer(pi);
3904 l2cap_ertm_stop_retrans_timer(pi);
3905 l2cap_ertm_stop_monitor_timer(pi);
3906
3907 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3908 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3909
3910 /* Respond to poll */
3911 err = l2cap_answer_move_poll(sk);
3912
3913 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3914
3915 /* If moving to BR/EDR, use default timeout defined by
3916 * the spec */
3917 if (pi->amp_move_id == 0)
3918 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3919
3920 if (pi->mode == L2CAP_MODE_ERTM) {
3921 l2cap_ertm_tx(sk, NULL, NULL,
3922 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3923 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3924 }
3925 }
3926
3927 return err;
3928}
3929
3930
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003931static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3932{
3933 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3934
3935 if (rej->reason != 0x0000)
3936 return 0;
3937
3938 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3939 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003940 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003941
3942 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003943 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003944
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003945 l2cap_conn_start(conn);
3946 }
3947
3948 return 0;
3949}
3950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003951static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
3952 struct l2cap_cmd_hdr *cmd,
3953 u8 *data, u8 rsp_code,
3954 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003956 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3958 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04003959 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003960 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961
3962 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003963 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
3965 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
3966
3967 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003968 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
3969 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 result = L2CAP_CR_BAD_PSM;
3971 goto sendresp;
3972 }
3973
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003974 bh_lock_sock(parent);
3975
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003976 /* Check if the ACL is secure enough (if not SDP) */
3977 if (psm != cpu_to_le16(0x0001) &&
3978 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01003979 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003980 result = L2CAP_CR_SEC_BLOCK;
3981 goto response;
3982 }
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 result = L2CAP_CR_NO_MEM;
3985
3986 /* Check for backlog size */
3987 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003988 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 goto response;
3990 }
3991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003992 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
3993 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 goto response;
3995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003996 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
3998 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003999 if (__l2cap_get_chan_by_dcid(list, scid)) {
4000 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004002 l2cap_sock_kill(sk);
4003 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 goto response;
4005 }
4006
4007 hci_conn_hold(conn->hcon);
4008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004009 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 bacpy(&bt_sk(sk)->src, conn->src);
4011 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004012 l2cap_pi(sk)->psm = psm;
4013 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004015 bt_accept_enqueue(parent, sk);
4016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004017 __l2cap_chan_add(conn, sk);
4018 dcid = l2cap_pi(sk)->scid;
4019 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004020
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004021 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004023 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024
Marcel Holtmann984947d2009-02-06 23:35:19 +01004025 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004026 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004027 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004028 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004029 result = L2CAP_CR_PEND;
4030 status = L2CAP_CS_AUTHOR_PEND;
4031 parent->sk_data_ready(parent, 0);
4032 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004033 /* Force pending result for AMP controllers.
4034 * The connection will succeed after the
4035 * physical link is up. */
4036 if (amp_id) {
4037 sk->sk_state = BT_CONNECT2;
4038 result = L2CAP_CR_PEND;
4039 } else {
4040 sk->sk_state = BT_CONFIG;
4041 result = L2CAP_CR_SUCCESS;
4042 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004043 status = L2CAP_CS_NO_INFO;
4044 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004045 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004046 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004047 result = L2CAP_CR_PEND;
4048 status = L2CAP_CS_AUTHEN_PEND;
4049 }
4050 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004052 result = L2CAP_CR_PEND;
4053 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 }
4055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004056 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
4058response:
4059 bh_unlock_sock(parent);
4060
4061sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004062 rsp.scid = cpu_to_le16(scid);
4063 rsp.dcid = cpu_to_le16(dcid);
4064 rsp.result = cpu_to_le16(result);
4065 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004066 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004068 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004069 struct l2cap_info_req info;
4070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4071
4072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4073 conn->info_ident = l2cap_get_ident(conn);
4074
4075 mod_timer(&conn->info_timer, jiffies +
4076 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4077
4078 l2cap_send_cmd(conn, conn->info_ident,
4079 L2CAP_INFO_REQ, sizeof(info), &info);
4080 }
4081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004082 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004083 result == L2CAP_CR_SUCCESS) {
4084 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004086 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004087 l2cap_build_conf_req(sk, buf), buf);
4088 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004089 }
4090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004091 return sk;
4092}
4093
4094static inline int l2cap_connect_req(struct l2cap_conn *conn,
4095 struct l2cap_cmd_hdr *cmd, u8 *data)
4096{
4097 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 return 0;
4099}
4100
4101static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4102{
4103 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4104 u16 scid, dcid, result, status;
4105 struct sock *sk;
4106 u8 req[128];
4107
4108 scid = __le16_to_cpu(rsp->scid);
4109 dcid = __le16_to_cpu(rsp->dcid);
4110 result = __le16_to_cpu(rsp->result);
4111 status = __le16_to_cpu(rsp->status);
4112
4113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4114
4115 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004116 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4117 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004118 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004120 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4121 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004122 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 }
4124
4125 switch (result) {
4126 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004127 sk->sk_state = BT_CONFIG;
4128 l2cap_pi(sk)->ident = 0;
4129 l2cap_pi(sk)->dcid = dcid;
4130 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004132 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004133 break;
4134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004135 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4136
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004138 l2cap_build_conf_req(sk, req), req);
4139 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 break;
4141
4142 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004143 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 break;
4145
4146 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004147 /* don't delete l2cap channel if sk is owned by user */
4148 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004149 sk->sk_state = BT_DISCONN;
4150 l2cap_sock_clear_timer(sk);
4151 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004152 break;
4153 }
4154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004155 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 break;
4157 }
4158
4159 bh_unlock_sock(sk);
4160 return 0;
4161}
4162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004163static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004164{
4165 /* FCS is enabled only in ERTM or streaming mode, if one or both
4166 * sides request it.
4167 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004168 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4169 pi->fcs = L2CAP_FCS_NONE;
4170 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4171 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004172}
4173
Al Viro88219a02007-07-29 00:17:25 -07004174static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175{
4176 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4177 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004178 u8 rspbuf[64];
4179 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004181 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004182 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
4184 dcid = __le16_to_cpu(req->dcid);
4185 flags = __le16_to_cpu(req->flags);
4186
4187 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004189 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4190 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 return -ENOENT;
4192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004193 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4194 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4195 sk->sk_state, l2cap_pi(sk)->rx_state,
4196 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4197 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004199 /* Detect a reconfig request due to channel move between
4200 * BR/EDR and AMP
4201 */
4202 if (sk->sk_state == BT_CONNECTED &&
4203 l2cap_pi(sk)->rx_state ==
4204 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4205 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4206
4207 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4208 amp_move_reconf = 1;
4209
4210 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004211 struct l2cap_cmd_rej rej;
4212
4213 rej.reason = cpu_to_le16(0x0002);
4214 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4215 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004216 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004217 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004218
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004219 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004220 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004221 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004222 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004223 l2cap_build_conf_rsp(sk, rspbuf,
4224 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004225 goto unlock;
4226 }
4227
4228 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004229 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4230 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
4232 if (flags & 0x0001) {
4233 /* Incomplete config. Send empty response. */
4234 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004235 l2cap_build_conf_rsp(sk, rspbuf,
4236 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 goto unlock;
4238 }
4239
4240 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004241 if (!amp_move_reconf)
4242 len = l2cap_parse_conf_req(sk, rspbuf);
4243 else
4244 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4245
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004246 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004247 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004251 l2cap_pi(sk)->conf_ident = cmd->ident;
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4253
4254 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4255 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4256 !l2cap_pi(sk)->amp_id) {
4257 /* Send success response right after pending if using
4258 * lockstep config on BR/EDR
4259 */
4260 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4261 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4262 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4263 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004264
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004265 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004266 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004268 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004269 goto unlock;
4270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004271 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004273 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4274 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004276 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4277 set_default_fcs(l2cap_pi(sk));
4278
4279 sk->sk_state = BT_CONNECTED;
4280
4281 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4282 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4283 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004286 goto unlock;
4287 }
4288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004289 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004290 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004291 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004293 l2cap_build_conf_req(sk, buf), buf);
4294 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 }
4296
4297unlock:
4298 bh_unlock_sock(sk);
4299 return 0;
4300}
4301
4302static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4303{
4304 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4305 u16 scid, flags, result;
4306 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004307 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004308 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309
4310 scid = __le16_to_cpu(rsp->scid);
4311 flags = __le16_to_cpu(rsp->flags);
4312 result = __le16_to_cpu(rsp->result);
4313
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004314 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4315 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004317 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4318 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 return 0;
4320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004321 pi = l2cap_pi(sk);
4322
4323 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4324 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4325 goto done;
4326 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004327
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 switch (result) {
4329 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004330 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4331 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4332 /* Lockstep procedure requires a pending response
4333 * before success.
4334 */
4335 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4336 goto done;
4337 }
4338
4339 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 break;
4341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004342 case L2CAP_CONF_PENDING:
4343 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4344 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4345 goto done;
4346 }
4347
4348 l2cap_conf_rfc_get(sk, rsp->data, len);
4349
4350 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4351
Peter Krystadf453bb32011-07-19 17:23:34 -07004352 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004353
4354 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4355 struct hci_chan *chan;
4356
4357 /* Already sent a 'pending' response, so set up
4358 * the logical link now
4359 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004360 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004361 if (!chan) {
4362 l2cap_send_disconn_req(pi->conn, sk,
4363 ECONNRESET);
4364 goto done;
4365 }
4366
4367 chan->l2cap_sk = sk;
4368 if (chan->state == BT_CONNECTED)
4369 l2cap_create_cfm(chan, 0);
4370 }
4371
4372 goto done;
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004375 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004376 char req[64];
4377
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004378 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004379 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004380 goto done;
4381 }
4382
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004383 /* throw out any old stored conf requests */
4384 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004385 len = l2cap_parse_conf_rsp(sk, rsp->data,
4386 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004387 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004388 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004389 goto done;
4390 }
4391
4392 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4393 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004394 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004395 if (result != L2CAP_CONF_SUCCESS)
4396 goto done;
4397 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398 }
4399
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004400 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004401 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004402 l2cap_sock_set_timer(sk, HZ * 5);
4403 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 goto done;
4405 }
4406
4407 if (flags & 0x01)
4408 goto done;
4409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004410 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004412 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4413 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004415 sk->sk_state = BT_CONNECTED;
4416
4417 if (pi->mode == L2CAP_MODE_ERTM ||
4418 pi->mode == L2CAP_MODE_STREAMING)
4419 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004420
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 l2cap_chan_ready(sk);
4422 }
4423
4424done:
4425 bh_unlock_sock(sk);
4426 return 0;
4427}
4428
4429static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4430{
4431 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4432 struct l2cap_disconn_rsp rsp;
4433 u16 dcid, scid;
4434 struct sock *sk;
4435
4436 scid = __le16_to_cpu(req->scid);
4437 dcid = __le16_to_cpu(req->dcid);
4438
4439 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004441 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4442 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 return 0;
4444
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004445 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4446 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004449 /* Only do cleanup if a disconnect request was not sent already */
4450 if (sk->sk_state != BT_DISCONN) {
4451 sk->sk_shutdown = SHUTDOWN_MASK;
4452
4453 skb_queue_purge(TX_QUEUE(sk));
4454
4455 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4456 skb_queue_purge(SREJ_QUEUE(sk));
4457
4458 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4459 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4460 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4461 }
4462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004464 /* don't delete l2cap channel if sk is owned by user */
4465 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004466 sk->sk_state = BT_DISCONN;
4467 l2cap_sock_clear_timer(sk);
4468 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004469 bh_unlock_sock(sk);
4470 return 0;
4471 }
4472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004473 l2cap_chan_del(sk, ECONNRESET);
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 bh_unlock_sock(sk);
4476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004477 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 return 0;
4479}
4480
4481static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4482{
4483 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4484 u16 dcid, scid;
4485 struct sock *sk;
4486
4487 scid = __le16_to_cpu(rsp->scid);
4488 dcid = __le16_to_cpu(rsp->dcid);
4489
4490 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004492 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4493 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 return 0;
4495
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004496 /* don't delete l2cap channel if sk is owned by user */
4497 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004498 sk->sk_state = BT_DISCONN;
4499 l2cap_sock_clear_timer(sk);
4500 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004501 bh_unlock_sock(sk);
4502 return 0;
4503 }
4504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004505 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 bh_unlock_sock(sk);
4507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004508 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 return 0;
4510}
4511
4512static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4513{
4514 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 u16 type;
4516
4517 type = __le16_to_cpu(req->type);
4518
4519 BT_DBG("type 0x%4.4x", type);
4520
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004521 if (type == L2CAP_IT_FEAT_MASK) {
4522 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004523 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004524 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4525 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4526 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004527 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004528 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004529 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004530 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004531 l2cap_send_cmd(conn, cmd->ident,
4532 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004533 } else if (type == L2CAP_IT_FIXED_CHAN) {
4534 u8 buf[12];
4535 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4536 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4537 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4538 memcpy(buf + 4, l2cap_fixed_chan, 8);
4539 l2cap_send_cmd(conn, cmd->ident,
4540 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004541 } else {
4542 struct l2cap_info_rsp rsp;
4543 rsp.type = cpu_to_le16(type);
4544 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4545 l2cap_send_cmd(conn, cmd->ident,
4546 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
4549 return 0;
4550}
4551
4552static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4553{
4554 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4555 u16 type, result;
4556
4557 type = __le16_to_cpu(rsp->type);
4558 result = __le16_to_cpu(rsp->result);
4559
4560 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4561
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004562 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4563 if (cmd->ident != conn->info_ident ||
4564 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4565 return 0;
4566
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004567 del_timer(&conn->info_timer);
4568
Ville Tervoadb08ed2010-08-04 09:43:33 +03004569 if (result != L2CAP_IR_SUCCESS) {
4570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4571 conn->info_ident = 0;
4572
4573 l2cap_conn_start(conn);
4574
4575 return 0;
4576 }
4577
Marcel Holtmann984947d2009-02-06 23:35:19 +01004578 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004579 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004580
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004581 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004582 struct l2cap_info_req req;
4583 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4584
4585 conn->info_ident = l2cap_get_ident(conn);
4586
4587 l2cap_send_cmd(conn, conn->info_ident,
4588 L2CAP_INFO_REQ, sizeof(req), &req);
4589 } else {
4590 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4591 conn->info_ident = 0;
4592
4593 l2cap_conn_start(conn);
4594 }
4595 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004596 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004597 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004598 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004599
4600 l2cap_conn_start(conn);
4601 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004602
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 return 0;
4604}
4605
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004606static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4607 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4608{
4609 struct l2cap_move_chan_req req;
4610 u8 ident;
4611
4612 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4613 (int) dest_amp_id);
4614
4615 ident = l2cap_get_ident(conn);
4616 if (pi)
4617 pi->ident = ident;
4618
4619 req.icid = cpu_to_le16(icid);
4620 req.dest_amp_id = dest_amp_id;
4621
4622 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4623}
4624
4625static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4626 u16 icid, u16 result)
4627{
4628 struct l2cap_move_chan_rsp rsp;
4629
4630 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4631
4632 rsp.icid = cpu_to_le16(icid);
4633 rsp.result = cpu_to_le16(result);
4634
4635 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4636}
4637
4638static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4639 struct l2cap_pinfo *pi, u16 icid, u16 result)
4640{
4641 struct l2cap_move_chan_cfm cfm;
4642 u8 ident;
4643
4644 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4645
4646 ident = l2cap_get_ident(conn);
4647 if (pi)
4648 pi->ident = ident;
4649
4650 cfm.icid = cpu_to_le16(icid);
4651 cfm.result = cpu_to_le16(result);
4652
4653 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4654}
4655
4656static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4657 u16 icid)
4658{
4659 struct l2cap_move_chan_cfm_rsp rsp;
4660
4661 BT_DBG("icid %d", (int) icid);
4662
4663 rsp.icid = cpu_to_le16(icid);
4664 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4665}
4666
4667static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4668 struct l2cap_cmd_hdr *cmd, u8 *data)
4669{
4670 struct l2cap_create_chan_req *req =
4671 (struct l2cap_create_chan_req *) data;
4672 struct sock *sk;
4673 u16 psm, scid;
4674
4675 psm = le16_to_cpu(req->psm);
4676 scid = le16_to_cpu(req->scid);
4677
4678 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4679 (int) req->amp_id);
4680
4681 if (req->amp_id) {
4682 struct hci_dev *hdev;
4683
4684 /* Validate AMP controller id */
4685 hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
4686 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4687 struct l2cap_create_chan_rsp rsp;
4688
4689 rsp.dcid = 0;
4690 rsp.scid = cpu_to_le16(scid);
4691 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4692 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4693
4694 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4695 sizeof(rsp), &rsp);
4696
4697 if (hdev)
4698 hci_dev_put(hdev);
4699
4700 return 0;
4701 }
4702
4703 hci_dev_put(hdev);
4704 }
4705
4706 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4707 req->amp_id);
4708
4709 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
4710
4711 if (sk && req->amp_id)
4712 amp_accept_physical(conn, req->amp_id, sk);
4713
4714 return 0;
4715}
4716
4717static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4718 struct l2cap_cmd_hdr *cmd, u8 *data)
4719{
4720 BT_DBG("conn %p", conn);
4721
4722 return l2cap_connect_rsp(conn, cmd, data);
4723}
4724
4725static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4726 struct l2cap_cmd_hdr *cmd, u8 *data)
4727{
4728 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4729 struct sock *sk;
4730 struct l2cap_pinfo *pi;
4731 u16 icid = 0;
4732 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4733
4734 icid = le16_to_cpu(req->icid);
4735
4736 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4737
4738 read_lock(&conn->chan_list.lock);
4739 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4740 read_unlock(&conn->chan_list.lock);
4741
4742 if (!sk)
4743 goto send_move_response;
4744
4745 lock_sock(sk);
4746 pi = l2cap_pi(sk);
4747
4748 if (pi->scid < L2CAP_CID_DYN_START ||
4749 (pi->mode != L2CAP_MODE_ERTM &&
4750 pi->mode != L2CAP_MODE_STREAMING)) {
4751 goto send_move_response;
4752 }
4753
4754 if (pi->amp_id == req->dest_amp_id) {
4755 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4756 goto send_move_response;
4757 }
4758
4759 if (req->dest_amp_id) {
4760 struct hci_dev *hdev;
4761 hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
4762 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4763 if (hdev)
4764 hci_dev_put(hdev);
4765
4766 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4767 goto send_move_response;
4768 }
4769 }
4770
4771 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4772 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4773 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4774 bacmp(conn->src, conn->dst) > 0) {
4775 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4776 goto send_move_response;
4777 }
4778
4779 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4780 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4781 goto send_move_response;
4782 }
4783
4784 pi->amp_move_cmd_ident = cmd->ident;
4785 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4786 l2cap_amp_move_setup(sk);
4787 pi->amp_move_id = req->dest_amp_id;
4788 icid = pi->dcid;
4789
4790 if (req->dest_amp_id == 0) {
4791 /* Moving to BR/EDR */
4792 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4793 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4794 result = L2CAP_MOVE_CHAN_PENDING;
4795 } else {
4796 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4797 result = L2CAP_MOVE_CHAN_SUCCESS;
4798 }
4799 } else {
4800 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4801 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4802 result = L2CAP_MOVE_CHAN_PENDING;
4803 }
4804
4805send_move_response:
4806 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4807
4808 if (sk)
4809 release_sock(sk);
4810
4811 return 0;
4812}
4813
4814static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4815 struct l2cap_cmd_hdr *cmd, u8 *data)
4816{
4817 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4818 struct sock *sk;
4819 struct l2cap_pinfo *pi;
4820 u16 icid, result;
4821
4822 icid = le16_to_cpu(rsp->icid);
4823 result = le16_to_cpu(rsp->result);
4824
4825 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4826
4827 switch (result) {
4828 case L2CAP_MOVE_CHAN_SUCCESS:
4829 case L2CAP_MOVE_CHAN_PENDING:
4830 read_lock(&conn->chan_list.lock);
4831 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4832 read_unlock(&conn->chan_list.lock);
4833
4834 if (!sk) {
4835 l2cap_send_move_chan_cfm(conn, NULL, icid,
4836 L2CAP_MOVE_CHAN_UNCONFIRMED);
4837 break;
4838 }
4839
4840 lock_sock(sk);
4841 pi = l2cap_pi(sk);
4842
4843 l2cap_sock_clear_timer(sk);
4844 if (result == L2CAP_MOVE_CHAN_PENDING)
4845 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4846
4847 if (pi->amp_move_state ==
4848 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4849 /* Move confirm will be sent when logical link
4850 * is complete.
4851 */
4852 pi->amp_move_state =
4853 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4854 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4855 pi->amp_move_state ==
4856 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4857 /* Logical link is up or moving to BR/EDR,
4858 * proceed with move */
4859 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4860 pi->amp_move_state =
4861 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4862 } else {
4863 pi->amp_move_state =
4864 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4865 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4866 L2CAP_MOVE_CHAN_CONFIRMED);
4867 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4868 }
4869 } else if (pi->amp_move_state ==
4870 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4871 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4872 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4873 struct hci_chan *chan;
4874 /* Moving to AMP */
4875 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4876 /* Remote is ready, send confirm immediately
4877 * after logical link is ready
4878 */
4879 pi->amp_move_state =
4880 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4881 } else {
4882 /* Both logical link and move success
4883 * are required to confirm
4884 */
4885 pi->amp_move_state =
4886 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4887 }
4888 pi->remote_fs = default_fs;
4889 pi->local_fs = default_fs;
4890 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4891 if (!chan) {
4892 /* Logical link not available */
4893 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4894 L2CAP_MOVE_CHAN_UNCONFIRMED);
4895 break;
4896 }
4897 if (chan->state == BT_CONNECTED) {
4898 /* Logical link is already ready to go */
4899 pi->ampchan = chan;
4900 pi->ampcon = chan->conn;
4901 pi->ampcon->l2cap_data = pi->conn;
4902 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4903 /* Can confirm now */
4904 l2cap_send_move_chan_cfm(conn, pi,
4905 pi->scid,
4906 L2CAP_MOVE_CHAN_CONFIRMED);
4907 } else {
4908 /* Now only need move success
4909 * required to confirm
4910 */
4911 pi->amp_move_state =
4912 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4913 }
4914 } else
4915 chan->l2cap_sk = sk;
4916 } else {
4917 /* Any other amp move state means the move failed. */
4918 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4919 L2CAP_MOVE_CHAN_UNCONFIRMED);
4920 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4921 }
4922 break;
4923 default:
4924 /* Failed (including collision case) */
4925 read_lock(&conn->chan_list.lock);
4926 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4927 read_unlock(&conn->chan_list.lock);
4928
4929 if (!sk) {
4930 /* Could not locate channel, icid is best guess */
4931 l2cap_send_move_chan_cfm(conn, NULL, icid,
4932 L2CAP_MOVE_CHAN_UNCONFIRMED);
4933 break;
4934 }
4935
4936 lock_sock(sk);
4937 pi = l2cap_pi(sk);
4938
4939 l2cap_sock_clear_timer(sk);
4940
4941 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
4942 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
4943 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4944 else {
4945 /* Cleanup - cancel move */
4946 pi->amp_move_id = pi->amp_id;
4947 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4948 l2cap_amp_move_revert(sk);
4949 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
4950 }
4951 } else {
4952 /* State is STABLE so the confirm response is
4953 * ignored.
4954 */
4955 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4956 }
4957
4958 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4959 L2CAP_MOVE_CHAN_UNCONFIRMED);
4960 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4961 break;
4962 }
4963
4964 if (sk)
4965 release_sock(sk);
4966
4967 return 0;
4968}
4969
4970static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4971 struct l2cap_cmd_hdr *cmd, u8 *data)
4972{
4973 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
4974 struct sock *sk;
4975 u16 icid, result;
4976
4977 icid = le16_to_cpu(cfm->icid);
4978 result = le16_to_cpu(cfm->result);
4979
4980 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4981
4982 read_lock(&conn->chan_list.lock);
4983 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4984 read_unlock(&conn->chan_list.lock);
4985
4986 if (!sk) {
4987 BT_DBG("Bad channel (%d)", (int) icid);
4988 goto send_move_confirm_response;
4989 }
4990
4991 lock_sock(sk);
4992
4993 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
4994 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
4995 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
4996 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
4997 if ((!l2cap_pi(sk)->amp_id) &&
4998 (l2cap_pi(sk)->ampchan)) {
4999 /* Have moved off of AMP, free the channel */
5000 hci_chan_put(l2cap_pi(sk)->ampchan);
5001 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5002 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5003 l2cap_pi(sk));
5004 l2cap_pi(sk)->ampchan = NULL;
5005 l2cap_pi(sk)->ampcon = NULL;
5006 }
5007 l2cap_amp_move_success(sk);
5008 } else {
5009 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5010 l2cap_amp_move_revert(sk);
5011 }
5012 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5013 } else if (l2cap_pi(sk)->amp_move_state ==
5014 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5015 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5016 }
5017
5018send_move_confirm_response:
5019 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5020
5021 if (sk)
5022 release_sock(sk);
5023
5024 return 0;
5025}
5026
5027static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5028 struct l2cap_cmd_hdr *cmd, u8 *data)
5029{
5030 struct l2cap_move_chan_cfm_rsp *rsp =
5031 (struct l2cap_move_chan_cfm_rsp *) data;
5032 struct sock *sk;
5033 u16 icid;
5034
5035 icid = le16_to_cpu(rsp->icid);
5036
5037 BT_DBG("icid %d", (int) icid);
5038
5039 read_lock(&conn->chan_list.lock);
5040 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5041 read_unlock(&conn->chan_list.lock);
5042
5043 if (!sk)
5044 return 0;
5045
5046 lock_sock(sk);
5047
5048 l2cap_sock_clear_timer(sk);
5049
5050 if (l2cap_pi(sk)->amp_move_state ==
5051 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5052 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5053 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5054
5055 if (!l2cap_pi(sk)->amp_id) {
5056 /* Have moved off of AMP, free the channel */
5057 l2cap_pi(sk)->ampcon = NULL;
5058 if (l2cap_pi(sk)->ampchan) {
5059 hci_chan_put(l2cap_pi(sk)->ampchan);
5060 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5061 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5062 l2cap_pi(sk));
5063 }
5064 l2cap_pi(sk)->ampchan = NULL;
5065 }
5066
5067 l2cap_amp_move_success(sk);
5068
5069 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5070 }
5071
5072 release_sock(sk);
5073
5074 return 0;
5075}
5076
5077static void l2cap_amp_signal_worker(struct work_struct *work)
5078{
5079 int err = 0;
5080 struct l2cap_amp_signal_work *ampwork =
5081 container_of(work, struct l2cap_amp_signal_work, work);
5082
5083 switch (ampwork->cmd.code) {
5084 case L2CAP_MOVE_CHAN_REQ:
5085 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5086 ampwork->data);
5087 break;
5088
5089 case L2CAP_MOVE_CHAN_RSP:
5090 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5091 ampwork->data);
5092 break;
5093
5094 case L2CAP_MOVE_CHAN_CFM:
5095 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5096 ampwork->data);
5097 break;
5098
5099 case L2CAP_MOVE_CHAN_CFM_RSP:
5100 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5101 &ampwork->cmd, ampwork->data);
5102 break;
5103
5104 default:
5105 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5106 err = -EINVAL;
5107 break;
5108 }
5109
5110 if (err) {
5111 struct l2cap_cmd_rej rej;
5112 BT_DBG("error %d", err);
5113
5114 /* In this context, commands are only rejected with
5115 * "command not understood", code 0.
5116 */
5117 rej.reason = cpu_to_le16(0);
5118 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5119 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5120 }
5121
5122 kfree_skb(ampwork->skb);
5123 kfree(ampwork);
5124}
5125
5126void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5127 struct sock *sk)
5128{
5129 struct l2cap_pinfo *pi;
5130
5131 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5132 (int) local_id, (int) remote_id, sk);
5133
5134 lock_sock(sk);
5135
5136 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5137 release_sock(sk);
5138 return;
5139 }
5140
5141 pi = l2cap_pi(sk);
5142
5143 if (sk->sk_state != BT_CONNECTED) {
5144 if (bt_sk(sk)->parent) {
5145 struct l2cap_conn_rsp rsp;
5146 char buf[128];
5147 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5148 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5149
5150 /* Incoming channel on AMP */
5151 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5152 /* Send successful response */
5153 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5154 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5155 } else {
5156 /* Send negative response */
5157 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5158 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5159 }
5160
5161 l2cap_send_cmd(pi->conn, pi->ident,
5162 L2CAP_CREATE_CHAN_RSP,
5163 sizeof(rsp), &rsp);
5164
5165 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5166 sk->sk_state = BT_CONFIG;
5167 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5168 l2cap_send_cmd(pi->conn,
5169 l2cap_get_ident(pi->conn),
5170 L2CAP_CONF_REQ,
5171 l2cap_build_conf_req(sk, buf), buf);
5172 l2cap_pi(sk)->num_conf_req++;
5173 }
5174 } else {
5175 /* Outgoing channel on AMP */
5176 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5177 /* Revert to BR/EDR connect */
5178 l2cap_send_conn_req(sk);
5179 } else {
5180 pi->amp_id = local_id;
5181 l2cap_send_create_chan_req(sk, remote_id);
5182 }
5183 }
5184 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5185 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5186 l2cap_amp_move_setup(sk);
5187 pi->amp_move_id = local_id;
5188 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5189
5190 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5191 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5192 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5193 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5194 struct hci_chan *chan;
5195 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5196 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5197 pi->remote_fs = default_fs;
5198 pi->local_fs = default_fs;
5199 chan = l2cap_chan_admit(local_id, pi);
5200 if (chan) {
5201 if (chan->state == BT_CONNECTED) {
5202 /* Logical link is ready to go */
5203 pi->ampchan = chan;
5204 pi->ampcon = chan->conn;
5205 pi->ampcon->l2cap_data = pi->conn;
5206 pi->amp_move_state =
5207 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5208 l2cap_send_move_chan_rsp(pi->conn,
5209 pi->amp_move_cmd_ident, pi->dcid,
5210 L2CAP_MOVE_CHAN_SUCCESS);
5211 } else {
5212 /* Wait for logical link to be ready */
5213 chan->l2cap_sk = sk;
5214 pi->amp_move_state =
5215 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5216 }
5217 } else {
5218 /* Logical link not available */
5219 l2cap_send_move_chan_rsp(pi->conn,
5220 pi->amp_move_cmd_ident, pi->dcid,
5221 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5222 }
5223 } else {
5224 BT_DBG("result %d, role %d, local_busy %d", result,
5225 (int) pi->amp_move_role,
5226 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5227
5228 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5229 if (result == -EINVAL)
5230 l2cap_send_move_chan_rsp(pi->conn,
5231 pi->amp_move_cmd_ident, pi->dcid,
5232 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5233 else
5234 l2cap_send_move_chan_rsp(pi->conn,
5235 pi->amp_move_cmd_ident, pi->dcid,
5236 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5237 }
5238
5239 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5240 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5241
5242 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5243 l2cap_rmem_available(sk))
5244 l2cap_ertm_tx(sk, 0, 0,
5245 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5246
5247 /* Restart data transmission */
5248 l2cap_ertm_send(sk);
5249 }
5250
5251 release_sock(sk);
5252}
5253
5254int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5255{
5256 struct l2cap_pinfo *pi;
5257 struct sock *sk;
5258
5259 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5260
5261 sk = chan->l2cap_sk;
5262
5263 BT_DBG("sk %p", sk);
5264
5265 lock_sock(sk);
5266
5267 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5268 release_sock(sk);
5269 return 0;
5270 }
5271
5272 pi = l2cap_pi(sk);
5273
5274 if ((!status) && (chan != NULL)) {
5275 pi->ampchan = chan;
5276 pi->ampcon = chan->conn;
5277 pi->ampcon->l2cap_data = pi->conn;
5278
5279 if (sk->sk_state != BT_CONNECTED) {
5280 struct l2cap_conf_rsp rsp;
5281
5282 /* Must use spinlock to prevent concurrent
5283 * execution of l2cap_config_rsp()
5284 */
5285 bh_lock_sock(sk);
5286 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5287 l2cap_build_conf_rsp(sk, &rsp,
5288 L2CAP_CONF_SUCCESS, 0), &rsp);
5289 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5290
5291 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5292 set_default_fcs(l2cap_pi(sk));
5293
5294 sk->sk_state = BT_CONNECTED;
5295
5296 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5297 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5298 l2cap_ertm_init(sk);
5299
5300 l2cap_chan_ready(sk);
5301 }
5302 bh_unlock_sock(sk);
5303 } else if (pi->amp_move_state ==
5304 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5305 /* Move confirm will be sent after a success
5306 * response is received
5307 */
5308 pi->amp_move_state =
5309 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5310 } else if (pi->amp_move_state ==
5311 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5312 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5313 pi->amp_move_state =
5314 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5315 else if (pi->amp_move_role ==
5316 L2CAP_AMP_MOVE_INITIATOR) {
5317 pi->amp_move_state =
5318 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5319 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5320 L2CAP_MOVE_CHAN_SUCCESS);
5321 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5322 } else if (pi->amp_move_role ==
5323 L2CAP_AMP_MOVE_RESPONDER) {
5324 pi->amp_move_state =
5325 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5326 l2cap_send_move_chan_rsp(pi->conn,
5327 pi->amp_move_cmd_ident, pi->dcid,
5328 L2CAP_MOVE_CHAN_SUCCESS);
5329 }
5330 } else {
5331 /* Move was not in expected state, free the
5332 * logical link
5333 */
5334 hci_chan_put(pi->ampchan);
5335 pi->ampcon = NULL;
5336 pi->ampchan = NULL;
5337 }
5338 } else {
5339 /* Logical link setup failed. */
5340
5341 if (sk->sk_state != BT_CONNECTED)
5342 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5343 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5344 l2cap_amp_move_revert(sk);
5345 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5346 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5347 l2cap_send_move_chan_rsp(pi->conn,
5348 pi->amp_move_cmd_ident, pi->dcid,
5349 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5350 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5351 if ((pi->amp_move_state ==
5352 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5353 (pi->amp_move_state ==
5354 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5355 /* Remote has only sent pending or
5356 * success responses, clean up
5357 */
5358 l2cap_amp_move_revert(sk);
5359 l2cap_pi(sk)->amp_move_role =
5360 L2CAP_AMP_MOVE_NONE;
5361 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5362 }
5363
5364 /* Other amp move states imply that the move
5365 * has already aborted
5366 */
5367 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5368 L2CAP_MOVE_CHAN_UNCONFIRMED);
5369 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5370 }
5371
5372 pi->ampcon = NULL;
5373 pi->ampchan = NULL;
5374 }
5375
5376 release_sock(sk);
5377 return 0;
5378}
5379
5380static void l2cap_logical_link_worker(struct work_struct *work)
5381{
5382 struct l2cap_logical_link_work *log_link_work =
5383 container_of(work, struct l2cap_logical_link_work, work);
5384
5385 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5386 kfree(log_link_work);
5387}
5388
5389static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5390{
5391 struct l2cap_logical_link_work *amp_work;
5392
5393 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5394 if (!amp_work)
5395 return -ENOMEM;
5396
5397 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5398 amp_work->chan = chan;
5399 amp_work->status = status;
5400 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5401 kfree(amp_work);
5402 return -ENOMEM;
5403 }
5404
5405 return 0;
5406}
5407
5408int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5409{
5410 struct l2cap_conn *conn = chan->conn->l2cap_data;
5411
5412 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5413
5414 /* TODO: if failed status restore previous fs */
5415 return 0;
5416}
5417
5418int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5419{
5420 struct l2cap_chan_list *l;
5421 struct l2cap_conn *conn = chan->conn->l2cap_data;
5422 struct sock *sk;
5423
5424 BT_DBG("chan %p conn %p", chan, conn);
5425
5426 if (!conn)
5427 return 0;
5428
5429 l = &conn->chan_list;
5430
5431 read_lock(&l->lock);
5432
5433 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5434 bh_lock_sock(sk);
5435 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5436 if (l2cap_pi(sk)->ampchan == chan) {
5437 l2cap_pi(sk)->ampchan = NULL;
5438 l2cap_amp_move_init(sk);
5439 }
5440 bh_unlock_sock(sk);
5441 }
5442
5443 read_unlock(&l->lock);
5444
5445 return 0;
5446
5447
5448}
5449
5450static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5451 u8 *data, struct sk_buff *skb)
5452{
5453 struct l2cap_amp_signal_work *amp_work;
5454
5455 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5456 if (!amp_work)
5457 return -ENOMEM;
5458
5459 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5460 amp_work->conn = conn;
5461 amp_work->cmd = *cmd;
5462 amp_work->data = data;
5463 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5464 if (!amp_work->skb) {
5465 kfree(amp_work);
5466 return -ENOMEM;
5467 }
5468
5469 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5470 kfree_skb(amp_work->skb);
5471 kfree(amp_work);
5472 return -ENOMEM;
5473 }
5474
5475 return 0;
5476}
5477
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005478static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005479 u16 to_multiplier)
5480{
5481 u16 max_latency;
5482
5483 if (min > max || min < 6 || max > 3200)
5484 return -EINVAL;
5485
5486 if (to_multiplier < 10 || to_multiplier > 3200)
5487 return -EINVAL;
5488
5489 if (max >= to_multiplier * 8)
5490 return -EINVAL;
5491
5492 max_latency = (to_multiplier * 8 / max) - 1;
5493 if (latency > 499 || latency > max_latency)
5494 return -EINVAL;
5495
5496 return 0;
5497}
5498
5499static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5500 struct l2cap_cmd_hdr *cmd, u8 *data)
5501{
5502 struct hci_conn *hcon = conn->hcon;
5503 struct l2cap_conn_param_update_req *req;
5504 struct l2cap_conn_param_update_rsp rsp;
5505 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005506 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005507
5508 if (!(hcon->link_mode & HCI_LM_MASTER))
5509 return -EINVAL;
5510
5511 cmd_len = __le16_to_cpu(cmd->len);
5512 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5513 return -EPROTO;
5514
5515 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005516 min = __le16_to_cpu(req->min);
5517 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005518 latency = __le16_to_cpu(req->latency);
5519 to_multiplier = __le16_to_cpu(req->to_multiplier);
5520
5521 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5522 min, max, latency, to_multiplier);
5523
5524 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005525
5526 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5527 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005528 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5529 else
5530 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5531
5532 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5533 sizeof(rsp), &rsp);
5534
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005535 if (!err)
5536 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5537
Claudio Takahaside731152011-02-11 19:28:55 -02005538 return 0;
5539}
5540
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005541static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005542 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5543 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005544{
5545 int err = 0;
5546
5547 switch (cmd->code) {
5548 case L2CAP_COMMAND_REJ:
5549 l2cap_command_rej(conn, cmd, data);
5550 break;
5551
5552 case L2CAP_CONN_REQ:
5553 err = l2cap_connect_req(conn, cmd, data);
5554 break;
5555
5556 case L2CAP_CONN_RSP:
5557 err = l2cap_connect_rsp(conn, cmd, data);
5558 break;
5559
5560 case L2CAP_CONF_REQ:
5561 err = l2cap_config_req(conn, cmd, cmd_len, data);
5562 break;
5563
5564 case L2CAP_CONF_RSP:
5565 err = l2cap_config_rsp(conn, cmd, data);
5566 break;
5567
5568 case L2CAP_DISCONN_REQ:
5569 err = l2cap_disconnect_req(conn, cmd, data);
5570 break;
5571
5572 case L2CAP_DISCONN_RSP:
5573 err = l2cap_disconnect_rsp(conn, cmd, data);
5574 break;
5575
5576 case L2CAP_ECHO_REQ:
5577 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5578 break;
5579
5580 case L2CAP_ECHO_RSP:
5581 break;
5582
5583 case L2CAP_INFO_REQ:
5584 err = l2cap_information_req(conn, cmd, data);
5585 break;
5586
5587 case L2CAP_INFO_RSP:
5588 err = l2cap_information_rsp(conn, cmd, data);
5589 break;
5590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005591 case L2CAP_CREATE_CHAN_REQ:
5592 err = l2cap_create_channel_req(conn, cmd, data);
5593 break;
5594
5595 case L2CAP_CREATE_CHAN_RSP:
5596 err = l2cap_create_channel_rsp(conn, cmd, data);
5597 break;
5598
5599 case L2CAP_MOVE_CHAN_REQ:
5600 case L2CAP_MOVE_CHAN_RSP:
5601 case L2CAP_MOVE_CHAN_CFM:
5602 case L2CAP_MOVE_CHAN_CFM_RSP:
5603 err = l2cap_sig_amp(conn, cmd, data, skb);
5604 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005605 default:
5606 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5607 err = -EINVAL;
5608 break;
5609 }
5610
5611 return err;
5612}
5613
5614static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5615 struct l2cap_cmd_hdr *cmd, u8 *data)
5616{
5617 switch (cmd->code) {
5618 case L2CAP_COMMAND_REJ:
5619 return 0;
5620
5621 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005622 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005623
5624 case L2CAP_CONN_PARAM_UPDATE_RSP:
5625 return 0;
5626
5627 default:
5628 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5629 return -EINVAL;
5630 }
5631}
5632
5633static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5634 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635{
5636 u8 *data = skb->data;
5637 int len = skb->len;
5638 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005639 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640
5641 l2cap_raw_recv(conn, skb);
5642
5643 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005644 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5646 data += L2CAP_CMD_HDR_SIZE;
5647 len -= L2CAP_CMD_HDR_SIZE;
5648
Al Viro88219a02007-07-29 00:17:25 -07005649 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005650
Al Viro88219a02007-07-29 00:17:25 -07005651 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005652
Al Viro88219a02007-07-29 00:17:25 -07005653 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005654 BT_DBG("corrupted command");
5655 break;
5656 }
5657
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005658 if (conn->hcon->type == LE_LINK)
5659 err = l2cap_le_sig_cmd(conn, &cmd, data);
5660 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005661 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5662 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663
5664 if (err) {
5665 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005666
5667 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668
5669 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005670 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5672 }
5673
Al Viro88219a02007-07-29 00:17:25 -07005674 data += cmd_len;
5675 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005676 }
5677
5678 kfree_skb(skb);
5679}
5680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005681static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005682{
5683 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005684 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005686 if (pi->extended_control)
5687 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5688 else
5689 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5690
5691 if (pi->fcs == L2CAP_FCS_CRC16) {
5692 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005693 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5694 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005696 if (our_fcs != rcv_fcs) {
5697 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005698 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005699 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005700 }
5701 return 0;
5702}
5703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005704static void l2cap_ertm_pass_to_tx(struct sock *sk,
5705 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005706{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005707 BT_DBG("sk %p, control %p", sk, control);
5708 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005709}
5710
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005711static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5712 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005713{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005714 BT_DBG("sk %p, control %p", sk, control);
5715 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5716}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005717
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005718static void l2cap_ertm_resend(struct sock *sk)
5719{
5720 struct bt_l2cap_control control;
5721 struct l2cap_pinfo *pi;
5722 struct sk_buff *skb;
5723 struct sk_buff *tx_skb;
5724 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005725
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005726 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005728 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005730 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5731 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005732
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005733 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5734 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5735 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005736
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005737 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5738 seq = l2cap_seq_list_pop(&pi->retrans_list);
5739
5740 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5741 if (!skb) {
5742 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5743 (int) seq);
5744 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005745 }
5746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005747 bt_cb(skb)->retries += 1;
5748 control = bt_cb(skb)->control;
5749
5750 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5751 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5752 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5753 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005754 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005755 }
5756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005757 control.reqseq = pi->buffer_seq;
5758 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5759 control.final = 1;
5760 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5761 } else {
5762 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005763 }
5764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005765 if (skb_cloned(skb)) {
5766 /* Cloned sk_buffs are read-only, so we need a
5767 * writeable copy
5768 */
5769 tx_skb = skb_copy(skb, GFP_ATOMIC);
5770 } else {
5771 tx_skb = skb_clone(skb, GFP_ATOMIC);
5772 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005774 /* Update skb contents */
5775 if (pi->extended_control) {
5776 put_unaligned_le32(__pack_extended_control(&control),
5777 tx_skb->data + L2CAP_HDR_SIZE);
5778 } else {
5779 put_unaligned_le16(__pack_enhanced_control(&control),
5780 tx_skb->data + L2CAP_HDR_SIZE);
5781 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005782
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005783 if (pi->fcs == L2CAP_FCS_CRC16)
5784 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005786 tx_skb->sk = sk;
5787 tx_skb->destructor = l2cap_skb_destructor;
5788 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005790 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005791
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005792 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005794 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005795 }
5796}
5797
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005798static inline void l2cap_ertm_retransmit(struct sock *sk,
5799 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005800{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005801 BT_DBG("sk %p, control %p", sk, control);
5802
5803 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5804 l2cap_ertm_resend(sk);
5805}
5806
5807static void l2cap_ertm_retransmit_all(struct sock *sk,
5808 struct bt_l2cap_control *control)
5809{
5810 struct l2cap_pinfo *pi;
5811 struct sk_buff *skb;
5812
5813 BT_DBG("sk %p, control %p", sk, control);
5814
5815 pi = l2cap_pi(sk);
5816
5817 if (control->poll)
5818 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5819
5820 l2cap_seq_list_clear(&pi->retrans_list);
5821
5822 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5823 return;
5824
5825 if (pi->unacked_frames) {
5826 skb_queue_walk(TX_QUEUE(sk), skb) {
5827 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5828 skb == sk->sk_send_head)
5829 break;
5830 }
5831
5832 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5833 if (skb == sk->sk_send_head)
5834 break;
5835
5836 l2cap_seq_list_append(&pi->retrans_list,
5837 bt_cb(skb)->control.txseq);
5838 }
5839
5840 l2cap_ertm_resend(sk);
5841 }
5842}
5843
5844static inline void append_skb_frag(struct sk_buff *skb,
5845 struct sk_buff *new_frag, struct sk_buff **last_frag)
5846{
5847 /* skb->len reflects data in skb as well as all fragments
5848 skb->data_len reflects only data in fragments
5849 */
5850 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5851
5852 if (!skb_has_frag_list(skb))
5853 skb_shinfo(skb)->frag_list = new_frag;
5854
5855 new_frag->next = NULL;
5856
5857 (*last_frag)->next = new_frag;
5858 *last_frag = new_frag;
5859
5860 skb->len += new_frag->len;
5861 skb->data_len += new_frag->len;
5862 skb->truesize += new_frag->truesize;
5863}
5864
5865static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5866 struct bt_l2cap_control *control, struct sk_buff *skb)
5867{
5868 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005869 int err = -EINVAL;
5870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005871 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5872 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005874 if (!control)
5875 return err;
5876
5877 pi = l2cap_pi(sk);
5878
5879 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5880 control->frame_type, control->sar, control->txseq,
5881 control->reqseq, control->final);
5882
5883 switch (control->sar) {
5884 case L2CAP_SAR_UNSEGMENTED:
5885 if (pi->sdu) {
5886 BT_DBG("Unexpected unsegmented PDU during reassembly");
5887 kfree_skb(pi->sdu);
5888 pi->sdu = NULL;
5889 pi->sdu_last_frag = NULL;
5890 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005891 }
5892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005893 BT_DBG("Unsegmented");
5894 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005895 break;
5896
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005897 case L2CAP_SAR_START:
5898 if (pi->sdu) {
5899 BT_DBG("Unexpected start PDU during reassembly");
5900 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005901 }
5902
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005903 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005904 skb_pull(skb, 2);
5905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005906 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005907 err = -EMSGSIZE;
5908 break;
5909 }
5910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005911 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005912 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005914 pi->sdu = skb;
5915 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005916
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005917 BT_DBG("Start");
5918
5919 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005920 err = 0;
5921 break;
5922
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005923 case L2CAP_SAR_CONTINUE:
5924 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005925 break;
5926
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005927 append_skb_frag(pi->sdu, skb,
5928 &pi->sdu_last_frag);
5929 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005931 if (pi->sdu->len >= pi->sdu_len)
5932 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005934 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5935
5936 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005937 break;
5938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005939 case L2CAP_SAR_END:
5940 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005941 break;
5942
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005943 append_skb_frag(pi->sdu, skb,
5944 &pi->sdu_last_frag);
5945 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005947 if (pi->sdu->len != pi->sdu_len)
5948 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005949
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005950 BT_DBG("End, reassembled %d", pi->sdu->len);
5951 /* If the sender used tiny PDUs, the rcv queuing could fail.
5952 * Applications that have issues here should use a larger
5953 * sk_rcvbuf.
5954 */
5955 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03005956
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005957 if (!err) {
5958 /* Reassembly complete */
5959 pi->sdu = NULL;
5960 pi->sdu_last_frag = NULL;
5961 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005962 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005963 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005965 default:
5966 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005967 break;
5968 }
5969
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005970 if (err) {
5971 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
5972 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
5973 if (pi->sdu) {
5974 kfree_skb(pi->sdu);
5975 pi->sdu = NULL;
5976 }
5977 pi->sdu_last_frag = NULL;
5978 pi->sdu_len = 0;
5979 if (skb)
5980 kfree_skb(skb);
5981 }
5982
5983 /* Update local busy state */
5984 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
5985 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
5986
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005987 return err;
5988}
5989
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005990static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005991{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005992 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005993 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
5994 * until a gap is encountered.
5995 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005996
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005997 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005999 BT_DBG("sk %p", sk);
6000 pi = l2cap_pi(sk);
6001
6002 while (l2cap_rmem_available(sk)) {
6003 struct sk_buff *skb;
6004 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6005 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6006
6007 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6008
6009 if (!skb)
6010 break;
6011
6012 skb_unlink(skb, SREJ_QUEUE(sk));
6013 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6014 err = l2cap_ertm_rx_expected_iframe(sk,
6015 &bt_cb(skb)->control, skb);
6016 if (err)
6017 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006018 }
6019
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006020 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6021 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6022 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006023 }
6024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006025 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006026}
6027
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006028static void l2cap_ertm_handle_srej(struct sock *sk,
6029 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006030{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006031 struct l2cap_pinfo *pi;
6032 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006033
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006034 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006036 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006038 if (control->reqseq == pi->next_tx_seq) {
6039 BT_DBG("Invalid reqseq %d, disconnecting",
6040 (int) control->reqseq);
6041 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006042 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006043 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006045 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006047 if (skb == NULL) {
6048 BT_DBG("Seq %d not available for retransmission",
6049 (int) control->reqseq);
6050 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006051 }
6052
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006053 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6054 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6055 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6056 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006057 }
6058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006059 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006061 if (control->poll) {
6062 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006063
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006064 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6065 l2cap_ertm_retransmit(sk, control);
6066 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006068 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6069 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6070 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006071 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006072 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006073 l2cap_ertm_pass_to_tx_fbit(sk, control);
6074
6075 if (control->final) {
6076 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6077 (pi->srej_save_reqseq == control->reqseq)) {
6078 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6079 } else {
6080 l2cap_ertm_retransmit(sk, control);
6081 }
6082 } else {
6083 l2cap_ertm_retransmit(sk, control);
6084 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6085 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6086 pi->srej_save_reqseq = control->reqseq;
6087 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006088 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006089 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006090}
6091
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006092static void l2cap_ertm_handle_rej(struct sock *sk,
6093 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006095 struct l2cap_pinfo *pi;
6096 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006097
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006098 BT_DBG("sk %p, control %p", sk, control);
6099
6100 pi = l2cap_pi(sk);
6101
6102 if (control->reqseq == pi->next_tx_seq) {
6103 BT_DBG("Invalid reqseq %d, disconnecting",
6104 (int) control->reqseq);
6105 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6106 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006107 }
6108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006109 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006110
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006111 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6112 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6113 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6114 return;
6115 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006116
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006117 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6118
6119 l2cap_ertm_pass_to_tx(sk, control);
6120
6121 if (control->final) {
6122 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6123 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6124 else
6125 l2cap_ertm_retransmit_all(sk, control);
6126 } else {
6127 l2cap_ertm_retransmit_all(sk, control);
6128 l2cap_ertm_send(sk);
6129 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6130 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6131 }
6132}
6133
6134static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6135{
6136 struct l2cap_pinfo *pi;
6137
6138 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6139 pi = l2cap_pi(sk);
6140
6141 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6142 (int)pi->expected_tx_seq);
6143
6144 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6145 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6146 /* See notes below regarding "double poll" and
6147 * invalid packets.
6148 */
6149 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6150 BT_DBG("Invalid/Ignore - txseq outside "
6151 "tx window after SREJ sent");
6152 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6153 } else {
6154 BT_DBG("Invalid - bad txseq within tx "
6155 "window after SREJ sent");
6156 return L2CAP_ERTM_TXSEQ_INVALID;
6157 }
6158 }
6159
6160 if (pi->srej_list.head == txseq) {
6161 BT_DBG("Expected SREJ");
6162 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6163 }
6164
6165 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6166 BT_DBG("Duplicate SREJ - txseq already stored");
6167 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6168 }
6169
6170 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6171 BT_DBG("Unexpected SREJ - txseq not requested "
6172 "with SREJ");
6173 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6174 }
6175 }
6176
6177 if (pi->expected_tx_seq == txseq) {
6178 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6179 BT_DBG("Invalid - txseq outside tx window");
6180 return L2CAP_ERTM_TXSEQ_INVALID;
6181 } else {
6182 BT_DBG("Expected");
6183 return L2CAP_ERTM_TXSEQ_EXPECTED;
6184 }
6185 }
6186
6187 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6188 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6189 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6190 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6191 }
6192
6193 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6194 /* A source of invalid packets is a "double poll" condition,
6195 * where delays cause us to send multiple poll packets. If
6196 * the remote stack receives and processes both polls,
6197 * sequence numbers can wrap around in such a way that a
6198 * resent frame has a sequence number that looks like new data
6199 * with a sequence gap. This would trigger an erroneous SREJ
6200 * request.
6201 *
6202 * Fortunately, this is impossible with a tx window that's
6203 * less than half of the maximum sequence number, which allows
6204 * invalid frames to be safely ignored.
6205 *
6206 * With tx window sizes greater than half of the tx window
6207 * maximum, the frame is invalid and cannot be ignored. This
6208 * causes a disconnect.
6209 */
6210
6211 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6212 BT_DBG("Invalid/Ignore - txseq outside tx window");
6213 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6214 } else {
6215 BT_DBG("Invalid - txseq outside tx window");
6216 return L2CAP_ERTM_TXSEQ_INVALID;
6217 }
6218 } else {
6219 BT_DBG("Unexpected - txseq indicates missing frames");
6220 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6221 }
6222}
6223
6224static int l2cap_ertm_rx_state_recv(struct sock *sk,
6225 struct bt_l2cap_control *control,
6226 struct sk_buff *skb, u8 event)
6227{
6228 struct l2cap_pinfo *pi;
6229 int err = 0;
6230 bool skb_in_use = 0;
6231
6232 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6233 (int)event);
6234 pi = l2cap_pi(sk);
6235
6236 switch (event) {
6237 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6238 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6239 case L2CAP_ERTM_TXSEQ_EXPECTED:
6240 l2cap_ertm_pass_to_tx(sk, control);
6241
6242 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6243 BT_DBG("Busy, discarding expected seq %d",
6244 control->txseq);
6245 break;
6246 }
6247
6248 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6249 pi->buffer_seq = pi->expected_tx_seq;
6250 skb_in_use = 1;
6251
6252 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6253 if (err)
6254 break;
6255
6256 if (control->final) {
6257 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6258 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6259 else {
6260 control->final = 0;
6261 l2cap_ertm_retransmit_all(sk, control);
6262 l2cap_ertm_send(sk);
6263 }
6264 }
6265
6266 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6267 l2cap_ertm_send_ack(sk);
6268 break;
6269 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6270 l2cap_ertm_pass_to_tx(sk, control);
6271
6272 /* Can't issue SREJ frames in the local busy state.
6273 * Drop this frame, it will be seen as missing
6274 * when local busy is exited.
6275 */
6276 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6277 BT_DBG("Busy, discarding unexpected seq %d",
6278 control->txseq);
6279 break;
6280 }
6281
6282 /* There was a gap in the sequence, so an SREJ
6283 * must be sent for each missing frame. The
6284 * current frame is stored for later use.
6285 */
6286 skb_queue_tail(SREJ_QUEUE(sk), skb);
6287 skb_in_use = 1;
6288 BT_DBG("Queued %p (queue len %d)", skb,
6289 skb_queue_len(SREJ_QUEUE(sk)));
6290
6291 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6292 l2cap_seq_list_clear(&pi->srej_list);
6293 l2cap_ertm_send_srej(sk, control->txseq);
6294
6295 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6296 break;
6297 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6298 l2cap_ertm_pass_to_tx(sk, control);
6299 break;
6300 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6301 break;
6302 case L2CAP_ERTM_TXSEQ_INVALID:
6303 default:
6304 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6305 ECONNRESET);
6306 break;
6307 }
6308 break;
6309 case L2CAP_ERTM_EVENT_RECV_RR:
6310 l2cap_ertm_pass_to_tx(sk, control);
6311 if (control->final) {
6312 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6313
6314 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6315 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6316 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6317 pi->amp_move_state ==
6318 L2CAP_AMP_STATE_WAIT_PREPARE) {
6319 control->final = 0;
6320 l2cap_ertm_retransmit_all(sk, control);
6321 }
6322
6323 l2cap_ertm_send(sk);
6324 } else if (control->poll) {
6325 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6326 } else {
6327 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6328 pi->unacked_frames)
6329 l2cap_ertm_start_retrans_timer(pi);
6330 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6331 l2cap_ertm_send(sk);
6332 }
6333 break;
6334 case L2CAP_ERTM_EVENT_RECV_RNR:
6335 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6336 l2cap_ertm_pass_to_tx(sk, control);
6337 if (control && control->poll) {
6338 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6339 l2cap_ertm_send_rr_or_rnr(sk, 0);
6340 }
6341 l2cap_ertm_stop_retrans_timer(pi);
6342 l2cap_seq_list_clear(&pi->retrans_list);
6343 break;
6344 case L2CAP_ERTM_EVENT_RECV_REJ:
6345 l2cap_ertm_handle_rej(sk, control);
6346 break;
6347 case L2CAP_ERTM_EVENT_RECV_SREJ:
6348 l2cap_ertm_handle_srej(sk, control);
6349 break;
6350 default:
6351 break;
6352 }
6353
6354 if (skb && !skb_in_use) {
6355 BT_DBG("Freeing %p", skb);
6356 kfree_skb(skb);
6357 }
6358
6359 return err;
6360}
6361
6362static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6363 struct bt_l2cap_control *control,
6364 struct sk_buff *skb, u8 event)
6365{
6366 struct l2cap_pinfo *pi;
6367 int err = 0;
6368 u16 txseq = control->txseq;
6369 bool skb_in_use = 0;
6370
6371 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6372 (int)event);
6373 pi = l2cap_pi(sk);
6374
6375 switch (event) {
6376 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6377 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6378 case L2CAP_ERTM_TXSEQ_EXPECTED:
6379 /* Keep frame for reassembly later */
6380 l2cap_ertm_pass_to_tx(sk, control);
6381 skb_queue_tail(SREJ_QUEUE(sk), skb);
6382 skb_in_use = 1;
6383 BT_DBG("Queued %p (queue len %d)", skb,
6384 skb_queue_len(SREJ_QUEUE(sk)));
6385
6386 pi->expected_tx_seq = __next_seq(txseq, pi);
6387 break;
6388 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6389 l2cap_seq_list_pop(&pi->srej_list);
6390
6391 l2cap_ertm_pass_to_tx(sk, control);
6392 skb_queue_tail(SREJ_QUEUE(sk), skb);
6393 skb_in_use = 1;
6394 BT_DBG("Queued %p (queue len %d)", skb,
6395 skb_queue_len(SREJ_QUEUE(sk)));
6396
6397 err = l2cap_ertm_rx_queued_iframes(sk);
6398 if (err)
6399 break;
6400
6401 break;
6402 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6403 /* Got a frame that can't be reassembled yet.
6404 * Save it for later, and send SREJs to cover
6405 * the missing frames.
6406 */
6407 skb_queue_tail(SREJ_QUEUE(sk), skb);
6408 skb_in_use = 1;
6409 BT_DBG("Queued %p (queue len %d)", skb,
6410 skb_queue_len(SREJ_QUEUE(sk)));
6411
6412 l2cap_ertm_pass_to_tx(sk, control);
6413 l2cap_ertm_send_srej(sk, control->txseq);
6414 break;
6415 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6416 /* This frame was requested with an SREJ, but
6417 * some expected retransmitted frames are
6418 * missing. Request retransmission of missing
6419 * SREJ'd frames.
6420 */
6421 skb_queue_tail(SREJ_QUEUE(sk), skb);
6422 skb_in_use = 1;
6423 BT_DBG("Queued %p (queue len %d)", skb,
6424 skb_queue_len(SREJ_QUEUE(sk)));
6425
6426 l2cap_ertm_pass_to_tx(sk, control);
6427 l2cap_ertm_send_srej_list(sk, control->txseq);
6428 break;
6429 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6430 /* We've already queued this frame. Drop this copy. */
6431 l2cap_ertm_pass_to_tx(sk, control);
6432 break;
6433 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6434 /* Expecting a later sequence number, so this frame
6435 * was already received. Ignore it completely.
6436 */
6437 break;
6438 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6439 break;
6440 case L2CAP_ERTM_TXSEQ_INVALID:
6441 default:
6442 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6443 ECONNRESET);
6444 break;
6445 }
6446 break;
6447 case L2CAP_ERTM_EVENT_RECV_RR:
6448 l2cap_ertm_pass_to_tx(sk, control);
6449 if (control->final) {
6450 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6451
6452 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6453 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6454 else {
6455 control->final = 0;
6456 l2cap_ertm_retransmit_all(sk, control);
6457 }
6458
6459 l2cap_ertm_send(sk);
6460 } else if (control->poll) {
6461 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6462 pi->unacked_frames) {
6463 l2cap_ertm_start_retrans_timer(pi);
6464 }
6465 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6466 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6467 l2cap_ertm_send_srej_tail(sk);
6468 } else {
6469 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6470 pi->unacked_frames) {
6471 l2cap_ertm_start_retrans_timer(pi);
6472 }
6473 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6474 l2cap_ertm_send_ack(sk);
6475 }
6476 break;
6477 case L2CAP_ERTM_EVENT_RECV_RNR:
6478 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6479 l2cap_ertm_pass_to_tx(sk, control);
6480 if (control->poll)
6481 l2cap_ertm_send_srej_tail(sk);
6482 else {
6483 struct bt_l2cap_control rr_control;
6484 memset(&rr_control, 0, sizeof(rr_control));
6485 rr_control.frame_type = 's';
6486 rr_control.super = L2CAP_SFRAME_RR;
6487 rr_control.reqseq = pi->buffer_seq;
6488 l2cap_ertm_send_sframe(sk, &rr_control);
6489 }
6490
6491 break;
6492 case L2CAP_ERTM_EVENT_RECV_REJ:
6493 l2cap_ertm_handle_rej(sk, control);
6494 break;
6495 case L2CAP_ERTM_EVENT_RECV_SREJ:
6496 l2cap_ertm_handle_srej(sk, control);
6497 break;
6498 }
6499
6500 if (skb && !skb_in_use) {
6501 BT_DBG("Freeing %p", skb);
6502 kfree_skb(skb);
6503 }
6504
6505 return err;
6506}
6507
6508static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6509 struct bt_l2cap_control *control,
6510 struct sk_buff *skb, u8 event)
6511{
6512 struct l2cap_pinfo *pi;
6513 int err = 0;
6514 bool skb_in_use = 0;
6515
6516 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6517 (int)event);
6518 pi = l2cap_pi(sk);
6519
6520 /* Only handle expected frames, to avoid state changes. */
6521
6522 switch (event) {
6523 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6524 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6525 L2CAP_ERTM_TXSEQ_EXPECTED) {
6526 l2cap_ertm_pass_to_tx(sk, control);
6527
6528 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6529 BT_DBG("Busy, discarding expected seq %d",
6530 control->txseq);
6531 break;
6532 }
6533
6534 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6535 pi->buffer_seq = pi->expected_tx_seq;
6536 skb_in_use = 1;
6537
6538 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6539 if (err)
6540 break;
6541
6542 if (control->final) {
6543 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6544 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6545 else
6546 control->final = 0;
6547 }
6548 }
6549 break;
6550 case L2CAP_ERTM_EVENT_RECV_RR:
6551 case L2CAP_ERTM_EVENT_RECV_RNR:
6552 case L2CAP_ERTM_EVENT_RECV_REJ:
6553 l2cap_ertm_process_reqseq(sk, control->reqseq);
6554 break;
6555 case L2CAP_ERTM_EVENT_RECV_SREJ:
6556 /* Ignore */
6557 break;
6558 default:
6559 break;
6560 }
6561
6562 if (skb && !skb_in_use) {
6563 BT_DBG("Freeing %p", skb);
6564 kfree_skb(skb);
6565 }
6566
6567 return err;
6568}
6569
6570static int l2cap_answer_move_poll(struct sock *sk)
6571{
6572 struct l2cap_pinfo *pi;
6573 struct bt_l2cap_control control;
6574 int err = 0;
6575
6576 BT_DBG("sk %p", sk);
6577
6578 pi = l2cap_pi(sk);
6579
6580 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6581
6582 if (!skb_queue_empty(TX_QUEUE(sk)))
6583 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6584 else
6585 sk->sk_send_head = NULL;
6586
6587 /* Rewind next_tx_seq to the point expected
6588 * by the receiver.
6589 */
6590 pi->next_tx_seq = pi->amp_move_reqseq;
6591 pi->unacked_frames = 0;
6592
6593 err = l2cap_finish_amp_move(sk);
6594
6595 if (err)
6596 return err;
6597
6598 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6599 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6600
6601 memset(&control, 0, sizeof(control));
6602 control.reqseq = pi->amp_move_reqseq;
6603
6604 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6605 err = -EPROTO;
6606 else
6607 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6608 pi->amp_move_event);
6609
6610 return err;
6611}
6612
6613static void l2cap_amp_move_setup(struct sock *sk)
6614{
6615 struct l2cap_pinfo *pi;
6616 struct sk_buff *skb;
6617
6618 BT_DBG("sk %p", sk);
6619
6620 pi = l2cap_pi(sk);
6621
6622 l2cap_ertm_stop_ack_timer(pi);
6623 l2cap_ertm_stop_retrans_timer(pi);
6624 l2cap_ertm_stop_monitor_timer(pi);
6625
6626 pi->retry_count = 0;
6627 skb_queue_walk(TX_QUEUE(sk), skb) {
6628 if (bt_cb(skb)->retries)
6629 bt_cb(skb)->retries = 1;
6630 else
6631 break;
6632 }
6633
6634 pi->expected_tx_seq = pi->buffer_seq;
6635
6636 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6637 l2cap_seq_list_clear(&pi->retrans_list);
6638 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6639 skb_queue_purge(SREJ_QUEUE(sk));
6640
6641 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6642 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6643
6644 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6645 pi->rx_state);
6646
6647 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6648}
6649
6650static void l2cap_amp_move_revert(struct sock *sk)
6651{
6652 struct l2cap_pinfo *pi;
6653
6654 BT_DBG("sk %p", sk);
6655
6656 pi = l2cap_pi(sk);
6657
6658 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6659 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6660 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6661 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6662 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6663}
6664
6665static int l2cap_amp_move_reconf(struct sock *sk)
6666{
6667 struct l2cap_pinfo *pi;
6668 u8 buf[64];
6669 int err = 0;
6670
6671 BT_DBG("sk %p", sk);
6672
6673 pi = l2cap_pi(sk);
6674
6675 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6676 l2cap_build_amp_reconf_req(sk, buf), buf);
6677 return err;
6678}
6679
6680static void l2cap_amp_move_success(struct sock *sk)
6681{
6682 struct l2cap_pinfo *pi;
6683
6684 BT_DBG("sk %p", sk);
6685
6686 pi = l2cap_pi(sk);
6687
6688 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6689 int err = 0;
6690 /* Send reconfigure request */
6691 if (pi->mode == L2CAP_MODE_ERTM) {
6692 pi->reconf_state = L2CAP_RECONF_INT;
6693 if (enable_reconfig)
6694 err = l2cap_amp_move_reconf(sk);
6695
6696 if (err || !enable_reconfig) {
6697 pi->reconf_state = L2CAP_RECONF_NONE;
6698 l2cap_ertm_tx(sk, NULL, NULL,
6699 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6700 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6701 }
6702 } else
6703 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6704 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6705 if (pi->mode == L2CAP_MODE_ERTM)
6706 pi->rx_state =
6707 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6708 else
6709 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6710 }
6711}
6712
6713static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6714{
6715 /* Make sure reqseq is for a packet that has been sent but not acked */
6716 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6717 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6718}
6719
6720static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6721 struct sk_buff *skb)
6722{
6723 struct l2cap_pinfo *pi;
6724 int err = 0;
6725
6726 BT_DBG("sk %p, control %p, skb %p, state %d",
6727 sk, control, skb, l2cap_pi(sk)->rx_state);
6728
6729 pi = l2cap_pi(sk);
6730
6731 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6732 L2CAP_ERTM_TXSEQ_EXPECTED) {
6733 l2cap_ertm_pass_to_tx(sk, control);
6734
6735 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6736 __next_seq(pi->buffer_seq, pi));
6737
6738 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6739
6740 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6741 } else {
6742 if (pi->sdu) {
6743 kfree_skb(pi->sdu);
6744 pi->sdu = NULL;
6745 }
6746 pi->sdu_last_frag = NULL;
6747 pi->sdu_len = 0;
6748
6749 if (skb) {
6750 BT_DBG("Freeing %p", skb);
6751 kfree_skb(skb);
6752 }
6753 }
6754
6755 pi->last_acked_seq = control->txseq;
6756 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6757
6758 return err;
6759}
6760
6761static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6762 struct sk_buff *skb, u8 event)
6763{
6764 struct l2cap_pinfo *pi;
6765 int err = 0;
6766
6767 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6768 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6769
6770 pi = l2cap_pi(sk);
6771
6772 if (__valid_reqseq(pi, control->reqseq)) {
6773 switch (pi->rx_state) {
6774 case L2CAP_ERTM_RX_STATE_RECV:
6775 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6776 break;
6777 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6778 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6779 event);
6780 break;
6781 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6782 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6783 event);
6784 break;
6785 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6786 if (control->final) {
6787 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6788 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6789
6790 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6791 l2cap_ertm_process_reqseq(sk, control->reqseq);
6792
6793 if (!skb_queue_empty(TX_QUEUE(sk)))
6794 sk->sk_send_head =
6795 skb_peek(TX_QUEUE(sk));
6796 else
6797 sk->sk_send_head = NULL;
6798
6799 /* Rewind next_tx_seq to the point expected
6800 * by the receiver.
6801 */
6802 pi->next_tx_seq = control->reqseq;
6803 pi->unacked_frames = 0;
6804
6805 if (pi->ampcon)
6806 pi->conn->mtu =
6807 pi->ampcon->hdev->acl_mtu;
6808 else
6809 pi->conn->mtu =
6810 pi->conn->hcon->hdev->acl_mtu;
6811
6812 err = l2cap_setup_resegment(sk);
6813
6814 if (err)
6815 break;
6816
6817 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6818 event);
6819 }
6820 break;
6821 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6822 if (control->poll) {
6823 pi->amp_move_reqseq = control->reqseq;
6824 pi->amp_move_event = event;
6825 err = l2cap_answer_move_poll(sk);
6826 }
6827 break;
6828 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6829 if (control->poll) {
6830 pi->amp_move_reqseq = control->reqseq;
6831 pi->amp_move_event = event;
6832
6833 BT_DBG("amp_move_role 0x%2.2x, "
6834 "reconf_state 0x%2.2x",
6835 pi->amp_move_role, pi->reconf_state);
6836
6837 if (pi->reconf_state == L2CAP_RECONF_ACC)
6838 err = l2cap_amp_move_reconf(sk);
6839 else
6840 err = l2cap_answer_move_poll(sk);
6841 }
6842 break;
6843 default:
6844 /* shut it down */
6845 break;
6846 }
6847 } else {
6848 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6849 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6850 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6851 }
6852
6853 return err;
6854}
6855
6856void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6857{
6858 lock_sock(sk);
6859
6860 l2cap_pi(sk)->fixed_channel = 1;
6861
6862 l2cap_pi(sk)->imtu = opt->imtu;
6863 l2cap_pi(sk)->omtu = opt->omtu;
6864 l2cap_pi(sk)->remote_mps = opt->omtu;
6865 l2cap_pi(sk)->mps = opt->omtu;
6866 l2cap_pi(sk)->flush_to = opt->flush_to;
6867 l2cap_pi(sk)->mode = opt->mode;
6868 l2cap_pi(sk)->fcs = opt->fcs;
6869 l2cap_pi(sk)->max_tx = opt->max_tx;
6870 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6871 l2cap_pi(sk)->tx_win = opt->txwin_size;
6872 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6873 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6874 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6875
6876 if (opt->mode == L2CAP_MODE_ERTM ||
6877 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6878 l2cap_ertm_init(sk);
6879
6880 release_sock(sk);
6881
6882 return;
6883}
6884
6885static const u8 l2cap_ertm_rx_func_to_event[4] = {
6886 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6887 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6888};
6889
6890int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6891{
6892 struct l2cap_pinfo *pi;
6893 struct bt_l2cap_control *control;
6894 u16 len;
6895 u8 event;
6896 pi = l2cap_pi(sk);
6897
6898 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6899
6900 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006901 goto drop;
6902
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006903 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006904 case L2CAP_MODE_BASIC:
6905 /* If socket recv buffers overflows we drop data here
6906 * which is *bad* because L2CAP has to be reliable.
6907 * But we don't have any other choice. L2CAP doesn't
6908 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006910 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006911 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006912
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006913 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006914 goto done;
6915 break;
6916
6917 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006918 case L2CAP_MODE_STREAMING:
6919 control = &bt_cb(skb)->control;
6920 if (pi->extended_control) {
6921 __get_extended_control(get_unaligned_le32(skb->data),
6922 control);
6923 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006924 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006925 __get_enhanced_control(get_unaligned_le16(skb->data),
6926 control);
6927 skb_pull(skb, 2);
6928 }
6929
6930 len = skb->len;
6931
6932 if (l2cap_check_fcs(pi, skb))
6933 goto drop;
6934
6935 if ((control->frame_type == 'i') &&
6936 (control->sar == L2CAP_SAR_START))
6937 len -= 2;
6938
6939 if (pi->fcs == L2CAP_FCS_CRC16)
6940 len -= 2;
6941
6942 /*
6943 * We can just drop the corrupted I-frame here.
6944 * Receiver will miss it and start proper recovery
6945 * procedures and ask for retransmission.
6946 */
6947 if (len > pi->mps) {
6948 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6949 goto drop;
6950 }
6951
6952 if (control->frame_type == 'i') {
6953
6954 int err;
6955
6956 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6957 control->sar, control->reqseq, control->final,
6958 control->txseq);
6959
6960 /* Validate F-bit - F=0 always valid, F=1 only
6961 * valid in TX WAIT_F
6962 */
6963 if (control->final && (pi->tx_state !=
6964 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006965 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006966
6967 if (pi->mode != L2CAP_MODE_STREAMING) {
6968 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
6969 err = l2cap_ertm_rx(sk, control, skb, event);
6970 } else
6971 err = l2cap_strm_rx(sk, control, skb);
6972 if (err)
6973 l2cap_send_disconn_req(pi->conn, sk,
6974 ECONNRESET);
6975 } else {
6976 /* Only I-frames are expected in streaming mode */
6977 if (pi->mode == L2CAP_MODE_STREAMING)
6978 goto drop;
6979
6980 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6981 control->reqseq, control->final, control->poll,
6982 control->super);
6983
6984 if (len != 0) {
6985 l2cap_send_disconn_req(pi->conn, sk,
6986 ECONNRESET);
6987 goto drop;
6988 }
6989
6990 /* Validate F and P bits */
6991 if (control->final &&
6992 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
6993 || control->poll))
6994 goto drop;
6995
6996 event = l2cap_ertm_rx_func_to_event[control->super];
6997 if (l2cap_ertm_rx(sk, control, skb, event))
6998 l2cap_send_disconn_req(pi->conn, sk,
6999 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03007000 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007001
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02007002 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007003
7004 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007005 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007006 break;
7007 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008
7009drop:
7010 kfree_skb(skb);
7011
7012done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007013 return 0;
7014}
7015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007016void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7017{
7018 lock_sock(sk);
7019 l2cap_data_channel(sk, skb);
7020 release_sock(sk);
7021}
7022
Al Viro8e036fc2007-07-29 00:16:36 -07007023static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007025 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007027 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7028 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007029 goto drop;
7030
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007031 bh_lock_sock(sk);
7032
Linus Torvalds1da177e2005-04-16 15:20:36 -07007033 BT_DBG("sk %p, len %d", sk, skb->len);
7034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007035 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036 goto drop;
7037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007038 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039 goto drop;
7040
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007041 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007042 goto done;
7043
7044drop:
7045 kfree_skb(skb);
7046
7047done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007048 if (sk)
7049 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007050 return 0;
7051}
7052
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007053static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7054{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007055 struct sock *sk;
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007056
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007057 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
7058 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007059 goto drop;
7060
7061 bh_lock_sock(sk);
7062
7063 BT_DBG("sk %p, len %d", sk, skb->len);
7064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007065 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007066 goto drop;
7067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007068 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007069 goto drop;
7070
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007071 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007072 goto done;
7073
7074drop:
7075 kfree_skb(skb);
7076
7077done:
7078 if (sk)
7079 bh_unlock_sock(sk);
7080 return 0;
7081}
7082
Linus Torvalds1da177e2005-04-16 15:20:36 -07007083static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7084{
7085 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007086 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007087 u16 cid, len;
7088 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007089
7090 skb_pull(skb, L2CAP_HDR_SIZE);
7091 cid = __le16_to_cpu(lh->cid);
7092 len = __le16_to_cpu(lh->len);
7093
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007094 if (len != skb->len) {
7095 kfree_skb(skb);
7096 return;
7097 }
7098
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7100
7101 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007102 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007103 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104 l2cap_sig_channel(conn, skb);
7105 break;
7106
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007107 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007108 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007109 skb_pull(skb, 2);
7110 l2cap_conless_channel(conn, psm, skb);
7111 break;
7112
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007113 case L2CAP_CID_LE_DATA:
7114 l2cap_att_channel(conn, cid, skb);
7115 break;
7116
Anderson Brigliaea370122011-06-07 18:46:31 -03007117 case L2CAP_CID_SMP:
7118 if (smp_sig_channel(conn, skb))
7119 l2cap_conn_del(conn->hcon, EACCES);
7120 break;
7121
Linus Torvalds1da177e2005-04-16 15:20:36 -07007122 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007123 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7124 if (sk) {
7125 if (sock_owned_by_user(sk)) {
7126 BT_DBG("backlog sk %p", sk);
7127 if (sk_add_backlog(sk, skb))
7128 kfree_skb(skb);
7129 } else
7130 l2cap_data_channel(sk, skb);
7131
7132 bh_unlock_sock(sk);
7133 } else if (cid == L2CAP_CID_A2MP) {
7134 BT_DBG("A2MP");
7135 amp_conn_ind(conn, skb);
7136 } else {
7137 BT_DBG("unknown cid 0x%4.4x", cid);
7138 kfree_skb(skb);
7139 }
7140
Linus Torvalds1da177e2005-04-16 15:20:36 -07007141 break;
7142 }
7143}
7144
7145/* ---- L2CAP interface with lower layer (HCI) ---- */
7146
7147static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7148{
7149 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007150 register struct sock *sk;
7151 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152
7153 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007154 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007155
7156 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7157
7158 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007159 read_lock(&l2cap_sk_list.lock);
7160 sk_for_each(sk, node, &l2cap_sk_list.head) {
7161 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007162 continue;
7163
7164 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007165 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007166 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007167 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007168 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007169 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7170 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007171 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007172 lm2 |= HCI_LM_MASTER;
7173 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007174 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007175 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007176
7177 return exact ? lm1 : lm2;
7178}
7179
7180static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7181{
Marcel Holtmann01394182006-07-03 10:02:46 +02007182 struct l2cap_conn *conn;
7183
Linus Torvalds1da177e2005-04-16 15:20:36 -07007184 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7185
Ville Tervoacd7d372011-02-10 22:38:49 -03007186 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007187 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007188
7189 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190 conn = l2cap_conn_add(hcon, status);
7191 if (conn)
7192 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007193 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007194 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007195
7196 return 0;
7197}
7198
Marcel Holtmann2950f212009-02-12 14:02:50 +01007199static int l2cap_disconn_ind(struct hci_conn *hcon)
7200{
7201 struct l2cap_conn *conn = hcon->l2cap_data;
7202
7203 BT_DBG("hcon %p", hcon);
7204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007205 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007206 return 0x13;
7207
7208 return conn->disc_reason;
7209}
7210
7211static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212{
7213 BT_DBG("hcon %p reason %d", hcon, reason);
7214
Ville Tervoacd7d372011-02-10 22:38:49 -03007215 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007216 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007218 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007219
Linus Torvalds1da177e2005-04-16 15:20:36 -07007220 return 0;
7221}
7222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007223static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007224{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007225 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007226 return;
7227
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007228 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007229 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7230 l2cap_sock_clear_timer(sk);
7231 l2cap_sock_set_timer(sk, HZ * 5);
7232 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7233 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007234 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007235 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7236 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007237 }
7238}
7239
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007240static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007241{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007242 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007243 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007244 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007245
Marcel Holtmann01394182006-07-03 10:02:46 +02007246 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007247 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007249 l = &conn->chan_list;
7250
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251 BT_DBG("conn %p", conn);
7252
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007253 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007255 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007256 bh_lock_sock(sk);
7257
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007258 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007260 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007261 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007262 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007263
7264 del_timer(&conn->security_timer);
7265 l2cap_chan_ready(sk);
7266 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007267
7268 bh_unlock_sock(sk);
7269 continue;
7270 }
7271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007272 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007273 bh_unlock_sock(sk);
7274 continue;
7275 }
7276
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007277 if (!status && (sk->sk_state == BT_CONNECTED ||
7278 sk->sk_state == BT_CONFIG)) {
7279 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007280 bh_unlock_sock(sk);
7281 continue;
7282 }
7283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007284 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007285 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007286 l2cap_pi(sk)->conf_state |=
7287 L2CAP_CONF_CONNECT_PEND;
7288 if (l2cap_pi(sk)->amp_pref ==
7289 BT_AMP_POLICY_PREFER_AMP) {
7290 amp_create_physical(l2cap_pi(sk)->conn,
7291 sk);
7292 } else
7293 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007294 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007295 l2cap_sock_clear_timer(sk);
7296 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007297 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007298 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007299 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007300 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007301
7302 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007303 if (l2cap_pi(sk)->amp_id) {
7304 amp_accept_physical(conn,
7305 l2cap_pi(sk)->amp_id, sk);
7306 bh_unlock_sock(sk);
7307 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007308 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007309
7310 sk->sk_state = BT_CONFIG;
7311 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007312 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007313 sk->sk_state = BT_DISCONN;
7314 l2cap_sock_set_timer(sk, HZ / 10);
7315 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007316 }
7317
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007318 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7319 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7320 rsp.result = cpu_to_le16(result);
7321 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7322 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7323 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007324 }
7325
Linus Torvalds1da177e2005-04-16 15:20:36 -07007326 bh_unlock_sock(sk);
7327 }
7328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007329 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007330
Linus Torvalds1da177e2005-04-16 15:20:36 -07007331 return 0;
7332}
7333
7334static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7335{
7336 struct l2cap_conn *conn = hcon->l2cap_data;
7337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007338 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7339 goto drop;
7340
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007341 if (!conn)
7342 conn = l2cap_conn_add(hcon, 0);
7343
7344 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345 goto drop;
7346
7347 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7348
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007349 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007350 struct l2cap_hdr *hdr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007351 struct sock *sk;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007352 u16 cid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007353 int len;
7354
7355 if (conn->rx_len) {
7356 BT_ERR("Unexpected start frame (len %d)", skb->len);
7357 kfree_skb(conn->rx_skb);
7358 conn->rx_skb = NULL;
7359 conn->rx_len = 0;
7360 l2cap_conn_unreliable(conn, ECOMM);
7361 }
7362
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007363 /* Start fragment always begin with Basic L2CAP header */
7364 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007365 BT_ERR("Frame is too short (len %d)", skb->len);
7366 l2cap_conn_unreliable(conn, ECOMM);
7367 goto drop;
7368 }
7369
7370 hdr = (struct l2cap_hdr *) skb->data;
7371 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007372 cid = __le16_to_cpu(hdr->cid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007373
7374 if (len == skb->len) {
7375 /* Complete frame received */
7376 l2cap_recv_frame(conn, skb);
7377 return 0;
7378 }
7379
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007380 if (flags & ACL_CONT) {
7381 BT_ERR("Complete frame is incomplete "
7382 "(len %d, expected len %d)",
7383 skb->len, len);
7384 l2cap_conn_unreliable(conn, ECOMM);
7385 goto drop;
7386 }
7387
Linus Torvalds1da177e2005-04-16 15:20:36 -07007388 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7389
7390 if (skb->len > len) {
7391 BT_ERR("Frame is too long (len %d, expected len %d)",
7392 skb->len, len);
7393 l2cap_conn_unreliable(conn, ECOMM);
7394 goto drop;
7395 }
7396
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007397 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007398
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007399 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
7400 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
7401 len, l2cap_pi(sk)->imtu);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007402 bh_unlock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007403 l2cap_conn_unreliable(conn, ECOMM);
7404 goto drop;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007405 }
7406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007407 if (sk)
7408 bh_unlock_sock(sk);
7409
Linus Torvalds1da177e2005-04-16 15:20:36 -07007410 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007411 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7412 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007413 goto drop;
7414
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007415 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007416 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007417 conn->rx_len = len - skb->len;
7418 } else {
7419 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7420
7421 if (!conn->rx_len) {
7422 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7423 l2cap_conn_unreliable(conn, ECOMM);
7424 goto drop;
7425 }
7426
7427 if (skb->len > conn->rx_len) {
7428 BT_ERR("Fragment is too long (len %d, expected %d)",
7429 skb->len, conn->rx_len);
7430 kfree_skb(conn->rx_skb);
7431 conn->rx_skb = NULL;
7432 conn->rx_len = 0;
7433 l2cap_conn_unreliable(conn, ECOMM);
7434 goto drop;
7435 }
7436
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007437 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007438 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007439 conn->rx_len -= skb->len;
7440
7441 if (!conn->rx_len) {
7442 /* Complete frame received */
7443 l2cap_recv_frame(conn, conn->rx_skb);
7444 conn->rx_skb = NULL;
7445 }
7446 }
7447
7448drop:
7449 kfree_skb(skb);
7450 return 0;
7451}
7452
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007453static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007455 struct sock *sk;
7456 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007458 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007459
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007460 sk_for_each(sk, node, &l2cap_sk_list.head) {
7461 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007462
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007463 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007464 batostr(&bt_sk(sk)->src),
7465 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007466 sk->sk_state, __le16_to_cpu(pi->psm),
7467 pi->scid, pi->dcid,
7468 pi->imtu, pi->omtu, pi->sec_level,
7469 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007472 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007473
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007474 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007475}
7476
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007477static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7478{
7479 return single_open(file, l2cap_debugfs_show, inode->i_private);
7480}
7481
7482static const struct file_operations l2cap_debugfs_fops = {
7483 .open = l2cap_debugfs_open,
7484 .read = seq_read,
7485 .llseek = seq_lseek,
7486 .release = single_release,
7487};
7488
7489static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007490
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491static struct hci_proto l2cap_hci_proto = {
7492 .name = "L2CAP",
7493 .id = HCI_PROTO_L2CAP,
7494 .connect_ind = l2cap_connect_ind,
7495 .connect_cfm = l2cap_connect_cfm,
7496 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007497 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007498 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007499 .recv_acldata = l2cap_recv_acldata,
7500 .create_cfm = l2cap_create_cfm,
7501 .modify_cfm = l2cap_modify_cfm,
7502 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007503};
7504
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007505int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007506{
7507 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007508
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007509 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007510 if (err < 0)
7511 return err;
7512
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007513 _l2cap_wq = create_singlethread_workqueue("l2cap");
7514 if (!_l2cap_wq) {
7515 err = -ENOMEM;
7516 goto error;
7517 }
7518
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519 err = hci_register_proto(&l2cap_hci_proto);
7520 if (err < 0) {
7521 BT_ERR("L2CAP protocol registration failed");
7522 bt_sock_unregister(BTPROTO_L2CAP);
7523 goto error;
7524 }
7525
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007526 if (bt_debugfs) {
7527 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7528 bt_debugfs, NULL, &l2cap_debugfs_fops);
7529 if (!l2cap_debugfs)
7530 BT_ERR("Failed to create L2CAP debug file");
7531 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007533 if (amp_init() < 0) {
7534 BT_ERR("AMP Manager initialization failed");
7535 goto error;
7536 }
7537
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 return 0;
7539
7540error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007541 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007542 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007543 return err;
7544}
7545
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007546void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007547{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007548 amp_exit();
7549
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007550 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007551
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007552 flush_workqueue(_l2cap_wq);
7553 destroy_workqueue(_l2cap_wq);
7554
Linus Torvalds1da177e2005-04-16 15:20:36 -07007555 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7556 BT_ERR("L2CAP protocol unregistration failed");
7557
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007558 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007559}
7560
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007561module_param(disable_ertm, bool, 0644);
7562MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007563
7564module_param(enable_reconfig, bool, 0644);
7565MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");