blob: e753c5f8788072475ab2f6ce5e6408d114d30342 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
552 hci_chan_put(l2cap_pi(sk)->ampchan);
553 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
554 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
555 l2cap_pi(sk));
556 }
557 l2cap_pi(sk)->ampchan = NULL;
558 l2cap_pi(sk)->amp_id = 0;
559 }
560
561 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200562 sock_set_flag(sk, SOCK_ZAPPED);
563
564 if (err)
565 sk->sk_err = err;
566
567 if (parent) {
568 bt_accept_unlink(sk);
569 parent->sk_data_ready(parent, 0);
570 } else
571 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
576 if (l2cap_pi(sk)->sdu)
577 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
583 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300584 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (sk->sk_type == SOCK_RAW) {
590 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530591 case BT_SECURITY_HIGH:
592 return HCI_AT_DEDICATED_BONDING_MITM;
593 case BT_SECURITY_MEDIUM:
594 return HCI_AT_DEDICATED_BONDING;
595 default:
596 return HCI_AT_NO_BONDING;
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
599 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
600 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530603 return HCI_AT_NO_BONDING_MITM;
604 else
605 return HCI_AT_NO_BONDING;
606 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530608 case BT_SECURITY_HIGH:
609 return HCI_AT_GENERAL_BONDING_MITM;
610 case BT_SECURITY_MEDIUM:
611 return HCI_AT_GENERAL_BONDING;
612 default:
613 return HCI_AT_NO_BONDING;
614 }
615 }
616}
617
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200618/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100622 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
627 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200631{
632 u8 id;
633
634 /* Get next available identificator.
635 * 1 - 128 are used by kernel.
636 * 129 - 199 are reserved.
637 * 200 - 254 are used by utilities like l2ping, etc.
638 */
639
640 spin_lock_bh(&conn->lock);
641
642 if (++conn->tx_ident > 128)
643 conn->tx_ident = 1;
644
645 id = conn->tx_ident;
646
647 spin_unlock_bh(&conn->lock);
648
649 return id;
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void apply_fcs(struct sk_buff *skb)
653{
654 size_t len;
655 u16 partial_crc;
656 struct sk_buff *iter;
657 struct sk_buff *final_frag = skb;
658
659 if (skb_has_frag_list(skb))
660 len = skb_headlen(skb);
661 else
662 len = skb->len - L2CAP_FCS_SIZE;
663
664 partial_crc = crc16(0, (u8 *) skb->data, len);
665
666 skb_walk_frags(skb, iter) {
667 len = iter->len;
668 if (!iter->next)
669 len -= L2CAP_FCS_SIZE;
670
671 partial_crc = crc16(partial_crc, iter->data, len);
672 final_frag = iter;
673 }
674
675 put_unaligned_le16(partial_crc,
676 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
677}
678
679void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200680{
681 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200682 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200683
684 BT_DBG("code 0x%2.2x", code);
685
686 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300687 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200688
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200689 if (lmp_no_flush_capable(conn->hcon->hdev))
690 flags = ACL_START_NO_FLUSH;
691 else
692 flags = ACL_START;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697}
698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300702}
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300705{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 struct l2cap_conn_req req;
707 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
708 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
713 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300717{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 struct l2cap_create_chan_req req;
719 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
720 req.psm = l2cap_pi(sk)->psm;
721 req.amp_id = amp_id;
722
723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
725
726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100735 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
736 return;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
739 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
742 amp_create_physical(l2cap_pi(sk)->conn, sk);
743 else
744 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200745 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200746 } else {
747 struct l2cap_info_req req;
748 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
749
750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
751 conn->info_ident = l2cap_get_ident(conn);
752
753 mod_timer(&conn->info_timer, jiffies +
754 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
755
756 l2cap_send_cmd(conn, conn->info_ident,
757 L2CAP_INFO_REQ, sizeof(req), &req);
758 }
759}
760
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300761static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
762{
763 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300764 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300765 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
766
767 switch (mode) {
768 case L2CAP_MODE_ERTM:
769 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
770 case L2CAP_MODE_STREAMING:
771 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
772 default:
773 return 0x00;
774 }
775}
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300778{
779 struct l2cap_disconn_req req;
780
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300781 if (!conn)
782 return;
783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
787 skb_queue_purge(SREJ_QUEUE(sk));
788
789 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
790 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300792 }
793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
795 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300796 l2cap_send_cmd(conn, l2cap_get_ident(conn),
797 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300800 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200804static void l2cap_conn_start(struct l2cap_conn *conn)
805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 struct l2cap_chan_list *l = &conn->chan_list;
807 struct sock_del_list del, *tmp1, *tmp2;
808 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200809
810 BT_DBG("conn %p", conn);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817 bh_lock_sock(sk);
818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 if (sk->sk_type != SOCK_SEQPACKET &&
820 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200821 bh_unlock_sock(sk);
822 continue;
823 }
824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 if (sk->sk_state == BT_CONNECT) {
826 if (!l2cap_check_security(sk) ||
827 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300828 bh_unlock_sock(sk);
829 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200830 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
833 conn->feat_mask)
834 && l2cap_pi(sk)->conf_state &
835 L2CAP_CONF_STATE2_DEVICE) {
836 tmp1 = kzalloc(sizeof(struct sock_del_list),
837 GFP_ATOMIC);
838 tmp1->sk = sk;
839 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300840 bh_unlock_sock(sk);
841 continue;
842 }
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
847 amp_create_physical(l2cap_pi(sk)->conn, sk);
848 else
849 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200852 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300853 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
855 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100858 if (bt_sk(sk)->defer_setup) {
859 struct sock *parent = bt_sk(sk)->parent;
860 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
861 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700862 if (parent)
863 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100864
865 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
868 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
869 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200870 } else {
871 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
872 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
873 }
874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
876 l2cap_pi(sk)->amp_id) {
877 amp_accept_physical(conn,
878 l2cap_pi(sk)->amp_id, sk);
879 bh_unlock_sock(sk);
880 continue;
881 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
884 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
885
886 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300887 rsp.result != L2CAP_CR_SUCCESS) {
888 bh_unlock_sock(sk);
889 continue;
890 }
891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300893 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 l2cap_build_conf_req(sk, buf), buf);
895 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200896 }
897
898 bh_unlock_sock(sk);
899 }
900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 read_unlock(&l->lock);
902
903 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
904 bh_lock_sock(tmp1->sk);
905 __l2cap_sock_close(tmp1->sk, ECONNRESET);
906 bh_unlock_sock(tmp1->sk);
907 list_del(&tmp1->list);
908 kfree(tmp1);
909 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200910}
911
Ville Tervob62f3282011-02-10 22:38:50 -0300912/* Find socket with cid and source bdaddr.
913 * Returns closest match, locked.
914 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300916{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 struct sock *sk = NULL, *sk1 = NULL;
918 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 sk_for_each(sk, node, &l2cap_sk_list.head) {
923 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300924 continue;
925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300927 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 if (!bacmp(&bt_sk(sk)->src, src))
929 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300930
931 /* Closest match */
932 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300934 }
935 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300940}
941
942static void l2cap_le_conn_ready(struct l2cap_conn *conn)
943{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 struct l2cap_chan_list *list = &conn->chan_list;
945 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300946
947 BT_DBG("");
948
949 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300951 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300953 return;
954
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300955 bh_lock_sock(parent);
956
Ville Tervob62f3282011-02-10 22:38:50 -0300957 /* Check for backlog size */
958 if (sk_acceptq_is_full(parent)) {
959 BT_DBG("backlog full %d", parent->sk_ack_backlog);
960 goto clean;
961 }
962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
964 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300965 goto clean;
966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300968
969 hci_conn_hold(conn->hcon);
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -0300972 bacpy(&bt_sk(sk)->src, conn->src);
973 bacpy(&bt_sk(sk)->dst, conn->dst);
974
Gustavo F. Padovand1010242011-03-25 00:39:48 -0300975 bt_accept_enqueue(parent, sk);
976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -0300982 parent->sk_data_ready(parent, 0);
983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300985
986clean:
987 bh_unlock_sock(parent);
988}
989
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200990static void l2cap_conn_ready(struct l2cap_conn *conn)
991{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 struct l2cap_chan_list *l = &conn->chan_list;
993 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200994
995 BT_DBG("conn %p", conn);
996
Ville Tervob62f3282011-02-10 22:38:50 -0300997 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
998 l2cap_le_conn_ready(conn);
999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001001
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001002 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001003 bh_lock_sock(sk);
1004
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001005 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
1006 if (smp_conn_security(conn, l2cap_pi(sk)->sec_level))
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03001007 l2cap_chan_ready(sk);
Ville Tervoacd7d372011-02-10 22:38:49 -03001008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001009 } else if (sk->sk_type != SOCK_SEQPACKET &&
1010 sk->sk_type != SOCK_STREAM) {
1011 l2cap_sock_clear_timer(sk);
1012 sk->sk_state = BT_CONNECTED;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001013 sk->sk_state_change(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001014 } else if (sk->sk_state == BT_CONNECT)
1015 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001016
1017 bh_unlock_sock(sk);
1018 }
1019
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001020 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001021}
1022
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001023/* Notify sockets that we cannot guaranty reliability anymore */
1024static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1025{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001026 struct l2cap_chan_list *l = &conn->chan_list;
1027 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001028
1029 BT_DBG("conn %p", conn);
1030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001031 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001033 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1034 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001035 sk->sk_err = err;
1036 }
1037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001039}
1040
1041static void l2cap_info_timeout(unsigned long arg)
1042{
1043 struct l2cap_conn *conn = (void *) arg;
1044
Marcel Holtmann984947d2009-02-06 23:35:19 +01001045 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001046 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001047
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001048 l2cap_conn_start(conn);
1049}
1050
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001051static void security_timeout(unsigned long arg)
1052{
1053 struct l2cap_conn *conn = (void *) arg;
1054
1055 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1056}
1057
Linus Torvalds1da177e2005-04-16 15:20:36 -07001058static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1059{
Marcel Holtmann01394182006-07-03 10:02:46 +02001060 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061
Marcel Holtmann01394182006-07-03 10:02:46 +02001062 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063 return conn;
1064
Marcel Holtmann01394182006-07-03 10:02:46 +02001065 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1066 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068
1069 hcon->l2cap_data = conn;
1070 conn->hcon = hcon;
1071
Marcel Holtmann01394182006-07-03 10:02:46 +02001072 BT_DBG("hcon %p conn %p", hcon, conn);
1073
Ville Tervoacd7d372011-02-10 22:38:49 -03001074 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1075 conn->mtu = hcon->hdev->le_mtu;
1076 else
1077 conn->mtu = hcon->hdev->acl_mtu;
1078
Linus Torvalds1da177e2005-04-16 15:20:36 -07001079 conn->src = &hcon->hdev->bdaddr;
1080 conn->dst = &hcon->dst;
1081
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001082 conn->feat_mask = 0;
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001085 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001086
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001087 if (hcon->type == LE_LINK)
1088 setup_timer(&conn->security_timer, security_timeout,
1089 (unsigned long) conn);
1090 else
Ville Tervob62f3282011-02-10 22:38:50 -03001091 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001092 (unsigned long) conn);
1093
Marcel Holtmann2950f212009-02-12 14:02:50 +01001094 conn->disc_reason = 0x13;
1095
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 return conn;
1097}
1098
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001099static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001101 struct l2cap_conn *conn = hcon->l2cap_data;
1102 struct sock *sk;
1103 struct sock *next;
1104
1105 if (!conn)
1106 return;
1107
1108 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1109
1110 if ((conn->hcon == hcon) && (conn->rx_skb))
1111 kfree_skb(conn->rx_skb);
1112
1113 BT_DBG("conn->hcon %p", conn->hcon);
1114
1115 /* Kill channels */
1116 for (sk = conn->chan_list.head; sk; ) {
1117 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1118 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1119 next = l2cap_pi(sk)->next_c;
1120 bh_lock_sock(sk);
1121 l2cap_chan_del(sk, err);
1122 bh_unlock_sock(sk);
1123 l2cap_sock_kill(sk);
1124 sk = next;
1125 } else
1126 sk = l2cap_pi(sk)->next_c;
1127 }
1128
1129 if (conn->hcon == hcon) {
1130 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1131 del_timer_sync(&conn->info_timer);
1132
1133 hcon->l2cap_data = NULL;
1134
1135 kfree(conn);
1136 }
1137}
1138
1139static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1140{
1141 struct l2cap_chan_list *l = &conn->chan_list;
1142 write_lock_bh(&l->lock);
1143 __l2cap_chan_add(conn, sk);
1144 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001145}
1146
Linus Torvalds1da177e2005-04-16 15:20:36 -07001147/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148
1149/* Find socket with psm and source bdaddr.
1150 * Returns closest match.
1151 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001152static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001154 struct sock *sk = NULL, *sk1 = NULL;
1155 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 sk_for_each(sk, node, &l2cap_sk_list.head) {
1160 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161 continue;
1162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001163 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001165 if (!bacmp(&bt_sk(sk)->src, src))
1166 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167
1168 /* Closest match */
1169 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001171 }
1172 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001173
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001174 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001176 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177}
1178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180{
1181 bdaddr_t *src = &bt_sk(sk)->src;
1182 bdaddr_t *dst = &bt_sk(sk)->dst;
1183 struct l2cap_conn *conn;
1184 struct hci_conn *hcon;
1185 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001186 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001187 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001189 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001190 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001192 hdev = hci_get_route(dst, src);
1193 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001194 return -EHOSTUNREACH;
1195
1196 hci_dev_lock_bh(hdev);
1197
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001198 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001200 if (l2cap_pi(sk)->fixed_channel) {
1201 /* Fixed channels piggyback on existing ACL connections */
1202 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1203 if (!hcon || !hcon->l2cap_data) {
1204 err = -ENOTCONN;
1205 goto done;
1206 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001207
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001208 conn = hcon->l2cap_data;
1209 } else {
1210 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
1211 hcon = hci_connect(hdev, LE_LINK, 0, dst,
1212 l2cap_pi(sk)->sec_level, auth_type);
1213 else
1214 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
1215 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001216
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001217 if (IS_ERR(hcon)) {
1218 err = PTR_ERR(hcon);
1219 goto done;
1220 }
1221
1222 conn = l2cap_conn_add(hcon, 0);
1223 if (!conn) {
1224 hci_conn_put(hcon);
1225 err = -ENOMEM;
1226 goto done;
1227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228 }
1229
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 /* Update source addr of the socket */
1231 bacpy(src, conn->src);
1232
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001233 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001235 BT_DBG("hcon->state %d", (int) hcon->state);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001237 if (l2cap_pi(sk)->fixed_channel) {
1238 sk->sk_state = BT_CONNECTED;
1239 sk->sk_state_change(sk);
1240 } else {
1241 sk->sk_state = BT_CONNECT;
1242 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1243 sk->sk_state_change(sk);
1244
1245 if (hcon->state == BT_CONNECTED) {
1246 if (sk->sk_type != SOCK_SEQPACKET &&
1247 sk->sk_type != SOCK_STREAM) {
1248 l2cap_sock_clear_timer(sk);
1249 if (l2cap_check_security(sk)) {
1250 sk->sk_state = BT_CONNECTED;
1251 sk->sk_state_change(sk);
1252 }
1253 } else
1254 l2cap_do_start(sk);
1255 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256 }
1257
Ville Tervo30e76272011-02-22 16:10:53 -03001258 err = 0;
1259
Linus Torvalds1da177e2005-04-16 15:20:36 -07001260done:
1261 hci_dev_unlock_bh(hdev);
1262 hci_dev_put(hdev);
1263 return err;
1264}
1265
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001266int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001267{
1268 DECLARE_WAITQUEUE(wait, current);
1269 int err = 0;
1270 int timeo = HZ/5;
1271
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001272 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001273 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1274 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1275 set_current_state(TASK_INTERRUPTIBLE);
1276
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001277 if (!timeo)
1278 timeo = HZ/5;
1279
1280 if (signal_pending(current)) {
1281 err = sock_intr_errno(timeo);
1282 break;
1283 }
1284
1285 release_sock(sk);
1286 timeo = schedule_timeout(timeo);
1287 lock_sock(sk);
1288
1289 err = sock_error(sk);
1290 if (err)
1291 break;
1292 }
1293 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001294 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001295 return err;
1296}
1297
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001298static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001299{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001300 struct l2cap_pinfo *pi =
1301 container_of(work, struct l2cap_pinfo, tx_work);
1302 struct sock *sk = (struct sock *)pi;
1303 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001304
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 lock_sock(sk);
1306 l2cap_ertm_send(sk);
1307 release_sock(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001308}
1309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001311{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001312 struct sock *sk = skb->sk;
1313 int queued;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1316 if (queued < L2CAP_MIN_ERTM_QUEUED)
1317 queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001318}
1319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001321{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001322 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001324 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001326 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1327 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1328 BT_DBG("Sending on AMP connection %p %p",
1329 pi->ampcon, pi->ampchan);
1330 if (pi->ampchan)
1331 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1332 ACL_COMPLETE);
1333 else
1334 kfree_skb(skb);
1335 } else {
1336 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001337
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001338 bt_cb(skb)->force_active = pi->force_active;
1339 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001340
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001341 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1342 !l2cap_pi(sk)->flushable)
1343 flags = ACL_START_NO_FLUSH;
1344 else
1345 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001346
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001347 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001348 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001349}
1350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001351int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001352{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001353 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001354 struct l2cap_pinfo *pi = l2cap_pi(sk);
1355 struct bt_l2cap_control *control;
1356 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001357
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001358 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001359
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001360 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001361 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1364 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001365
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001366 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1367 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1368 return 0;
1369
1370 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1371 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1372 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1373
1374 skb = sk->sk_send_head;
1375
1376 bt_cb(skb)->retries = 1;
1377 control = &bt_cb(skb)->control;
1378
1379 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1380 control->final = 1;
1381 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1382 }
1383 control->reqseq = pi->buffer_seq;
1384 pi->last_acked_seq = pi->buffer_seq;
1385 control->txseq = pi->next_tx_seq;
1386
1387 if (pi->extended_control) {
1388 put_unaligned_le32(__pack_extended_control(control),
1389 skb->data + L2CAP_HDR_SIZE);
1390 } else {
1391 put_unaligned_le16(__pack_enhanced_control(control),
1392 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001393 }
1394
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001395 if (pi->fcs == L2CAP_FCS_CRC16)
1396 apply_fcs(skb);
1397
1398 /* Clone after data has been modified. Data is assumed to be
1399 read-only (for locking purposes) on cloned sk_buffs.
1400 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001401 tx_skb = skb_clone(skb, GFP_ATOMIC);
1402
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001403 tx_skb->sk = sk;
1404 tx_skb->destructor = l2cap_skb_destructor;
1405 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001407 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001409 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001410
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001411 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001413 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1414 pi->unacked_frames += 1;
1415 pi->frames_sent += 1;
1416 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1419 sk->sk_send_head = NULL;
1420 else
1421 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1422 }
1423
1424 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1425 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1426 atomic_read(&pi->ertm_queued));
1427
1428 return sent;
1429}
1430
1431int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1432{
1433 struct sk_buff *skb;
1434 struct l2cap_pinfo *pi = l2cap_pi(sk);
1435 struct bt_l2cap_control *control;
1436 int sent = 0;
1437
1438 BT_DBG("sk %p, skbs %p", sk, skbs);
1439
1440 if (sk->sk_state != BT_CONNECTED)
1441 return -ENOTCONN;
1442
1443 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1444 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1445 return 0;
1446
1447 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1448
1449 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1450 while (!skb_queue_empty(TX_QUEUE(sk))) {
1451
1452 skb = skb_dequeue(TX_QUEUE(sk));
1453
1454 BT_DBG("skb %p", skb);
1455
1456 bt_cb(skb)->retries = 1;
1457 control = &bt_cb(skb)->control;
1458
1459 BT_DBG("control %p", control);
1460
1461 control->reqseq = 0;
1462 control->txseq = pi->next_tx_seq;
1463
1464 if (pi->extended_control) {
1465 put_unaligned_le32(__pack_extended_control(control),
1466 skb->data + L2CAP_HDR_SIZE);
1467 } else {
1468 put_unaligned_le16(__pack_enhanced_control(control),
1469 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001470 }
1471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001472 if (pi->fcs == L2CAP_FCS_CRC16)
1473 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001474
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001475 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001479 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1480 pi->frames_sent += 1;
1481 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001482 }
1483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 BT_DBG("Sent %d", sent);
1485
1486 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001487}
1488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001490{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001491 while (len > 0) {
1492 if (iv->iov_len) {
1493 int copy = min_t(unsigned int, len, iv->iov_len);
1494 memcpy(kdata, iv->iov_base, copy);
1495 len -= copy;
1496 kdata += copy;
1497 iv->iov_base += copy;
1498 iv->iov_len -= copy;
1499 }
1500 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001501 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001503 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001504}
1505
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001506static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1507 int len, int count, struct sk_buff *skb,
1508 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001509{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001510 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001511 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001512 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001513 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1516 msg, (int)len, (int)count, skb);
1517
1518 if (!conn)
1519 return -ENOTCONN;
1520
1521 /* When resegmenting, data is copied from kernel space */
1522 if (reseg) {
1523 err = memcpy_fromkvec(skb_put(skb, count),
1524 (struct kvec *) msg->msg_iov, count);
1525 } else {
1526 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1527 count);
1528 }
1529
1530 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001531 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
1533 sent += count;
1534 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001535 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536
1537 /* Continuation fragments (no L2CAP header) */
1538 frag = &skb_shinfo(skb)->frag_list;
1539 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541 count = min_t(unsigned int, conn->mtu, len);
1542
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001543 /* Add room for the FCS if it fits */
1544 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1545 len + L2CAP_FCS_SIZE <= conn->mtu)
1546 skblen = count + L2CAP_FCS_SIZE;
1547 else
1548 skblen = count;
1549
1550 /* Don't use bt_skb_send_alloc() while resegmenting, since
1551 * it is not ok to block.
1552 */
1553 if (reseg) {
1554 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1555 if (*frag)
1556 skb_set_owner_w(*frag, sk);
1557 } else {
1558 *frag = bt_skb_send_alloc(sk, skblen,
1559 msg->msg_flags & MSG_DONTWAIT, &err);
1560 }
1561
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001563 return -EFAULT;
1564
1565 /* When resegmenting, data is copied from kernel space */
1566 if (reseg) {
1567 err = memcpy_fromkvec(skb_put(*frag, count),
1568 (struct kvec *) msg->msg_iov,
1569 count);
1570 } else {
1571 err = memcpy_fromiovec(skb_put(*frag, count),
1572 msg->msg_iov, count);
1573 }
1574
1575 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001576 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577
1578 sent += count;
1579 len -= count;
1580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001581 final = *frag;
1582
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583 frag = &(*frag)->next;
1584 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1587 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1588 if (reseg) {
1589 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1590 GFP_ATOMIC);
1591 if (*frag)
1592 skb_set_owner_w(*frag, sk);
1593 } else {
1594 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1595 msg->msg_flags & MSG_DONTWAIT,
1596 &err);
1597 }
1598
1599 if (!*frag)
1600 return -EFAULT;
1601
1602 final = *frag;
1603 }
1604
1605 skb_put(final, L2CAP_FCS_SIZE);
1606 }
1607
Linus Torvalds1da177e2005-04-16 15:20:36 -07001608 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001609}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001611struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001612{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001613 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001614 struct sk_buff *skb;
1615 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1616 struct l2cap_hdr *lh;
1617
1618 BT_DBG("sk %p len %d", sk, (int)len);
1619
1620 count = min_t(unsigned int, (conn->mtu - hlen), len);
1621 skb = bt_skb_send_alloc(sk, count + hlen,
1622 msg->msg_flags & MSG_DONTWAIT, &err);
1623 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001624 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001625
1626 /* Create L2CAP header */
1627 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001628 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001629 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001630 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001631
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001632 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001633 if (unlikely(err < 0)) {
1634 kfree_skb(skb);
1635 return ERR_PTR(err);
1636 }
1637 return skb;
1638}
1639
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001640struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001641{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001642 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001643 struct sk_buff *skb;
1644 int err, count, hlen = L2CAP_HDR_SIZE;
1645 struct l2cap_hdr *lh;
1646
1647 BT_DBG("sk %p len %d", sk, (int)len);
1648
1649 count = min_t(unsigned int, (conn->mtu - hlen), len);
1650 skb = bt_skb_send_alloc(sk, count + hlen,
1651 msg->msg_flags & MSG_DONTWAIT, &err);
1652 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001653 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001654
1655 /* Create L2CAP header */
1656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001657 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001658 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001660 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001661 if (unlikely(err < 0)) {
1662 kfree_skb(skb);
1663 return ERR_PTR(err);
1664 }
1665 return skb;
1666}
1667
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001668struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1669 struct msghdr *msg, size_t len,
1670 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001671{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001672 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673 int err, count, hlen;
1674 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001675 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001676 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 if (l2cap_pi(sk)->extended_control)
1679 hlen = L2CAP_EXTENDED_HDR_SIZE;
1680 else
1681 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001682
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001683 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001684 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001685
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001686 if (fcs == L2CAP_FCS_CRC16)
1687 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001688
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1690 sk, msg, (int)len, (int)sdulen, hlen);
1691
1692 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1693
1694 /* Allocate extra headroom for Qualcomm PAL. This is only
1695 * necessary in two places (here and when creating sframes)
1696 * because only unfragmented iframes and sframes are sent
1697 * using AMP controllers.
1698 */
1699 if (l2cap_pi(sk)->ampcon &&
1700 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1701 reserve = BT_SKB_RESERVE_80211;
1702
1703 /* Don't use bt_skb_send_alloc() while resegmenting, since
1704 * it is not ok to block.
1705 */
1706 if (reseg) {
1707 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1708 if (skb)
1709 skb_set_owner_w(skb, sk);
1710 } else {
1711 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1712 msg->msg_flags & MSG_DONTWAIT, &err);
1713 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001714 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001715 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001716
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001717 if (reserve)
1718 skb_reserve(skb, reserve);
1719
1720 bt_cb(skb)->control.fcs = fcs;
1721
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001722 /* Create L2CAP header */
1723 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001724 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1725 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001727 /* Control header is populated later */
1728 if (l2cap_pi(sk)->extended_control)
1729 put_unaligned_le32(0, skb_put(skb, 4));
1730 else
1731 put_unaligned_le16(0, skb_put(skb, 2));
1732
1733 if (sdulen)
1734 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1735
1736 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001737 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001738 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001739 kfree_skb(skb);
1740 return ERR_PTR(err);
1741 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001742
1743 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001744 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001745}
1746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001747static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001748{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001749 struct l2cap_pinfo *pi;
1750 struct sk_buff *acked_skb;
1751 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001752
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001753 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001754
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001755 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001756
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001757 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1758 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1761 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1762
1763 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1764 ackseq = __next_seq(ackseq, pi)) {
1765
1766 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1767 if (acked_skb) {
1768 skb_unlink(acked_skb, TX_QUEUE(sk));
1769 kfree_skb(acked_skb);
1770 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001771 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001772 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001774 pi->expected_ack_seq = reqseq;
1775
1776 if (pi->unacked_frames == 0)
1777 l2cap_ertm_stop_retrans_timer(pi);
1778
1779 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001780}
1781
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001782static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001783{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001784 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001785 int len;
1786 int reserve = 0;
1787 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001789 if (l2cap_pi(sk)->extended_control)
1790 len = L2CAP_EXTENDED_HDR_SIZE;
1791 else
1792 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1795 len += L2CAP_FCS_SIZE;
1796
1797 /* Allocate extra headroom for Qualcomm PAL */
1798 if (l2cap_pi(sk)->ampcon &&
1799 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1800 reserve = BT_SKB_RESERVE_80211;
1801
1802 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1803
1804 if (!skb)
1805 return ERR_PTR(-ENOMEM);
1806
1807 if (reserve)
1808 skb_reserve(skb, reserve);
1809
1810 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1811 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1812 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1813
1814 if (l2cap_pi(sk)->extended_control)
1815 put_unaligned_le32(control, skb_put(skb, 4));
1816 else
1817 put_unaligned_le16(control, skb_put(skb, 2));
1818
1819 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1820 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1821 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001822 }
1823
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001824 return skb;
1825}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001826
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001827static void l2cap_ertm_send_sframe(struct sock *sk,
1828 struct bt_l2cap_control *control)
1829{
1830 struct l2cap_pinfo *pi;
1831 struct sk_buff *skb;
1832 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001833
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001834 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001835
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001836 if (control->frame_type != 's')
1837 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1842 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1843 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1844 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1845 return;
1846 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001847
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001848 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1849 control->final = 1;
1850 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1851 }
1852
1853 if (control->super == L2CAP_SFRAME_RR)
1854 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1855 else if (control->super == L2CAP_SFRAME_RNR)
1856 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1857
1858 if (control->super != L2CAP_SFRAME_SREJ) {
1859 pi->last_acked_seq = control->reqseq;
1860 l2cap_ertm_stop_ack_timer(pi);
1861 }
1862
1863 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1864 (int) control->final, (int) control->poll,
1865 (int) control->super);
1866
1867 if (pi->extended_control)
1868 control_field = __pack_extended_control(control);
1869 else
1870 control_field = __pack_enhanced_control(control);
1871
1872 skb = l2cap_create_sframe_pdu(sk, control_field);
1873 if (!IS_ERR(skb))
1874 l2cap_do_send(sk, skb);
1875}
1876
1877static void l2cap_ertm_send_ack(struct sock *sk)
1878{
1879 struct l2cap_pinfo *pi = l2cap_pi(sk);
1880 struct bt_l2cap_control control;
1881 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1882 int threshold;
1883
1884 BT_DBG("sk %p", sk);
1885 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1886 (int)pi->buffer_seq);
1887
1888 memset(&control, 0, sizeof(control));
1889 control.frame_type = 's';
1890
1891 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1892 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1893 l2cap_ertm_stop_ack_timer(pi);
1894 control.super = L2CAP_SFRAME_RNR;
1895 control.reqseq = pi->buffer_seq;
1896 l2cap_ertm_send_sframe(sk, &control);
1897 } else {
1898 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1899 l2cap_ertm_send(sk);
1900 /* If any i-frames were sent, they included an ack */
1901 if (pi->buffer_seq == pi->last_acked_seq)
1902 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001903 }
1904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001905 /* Ack now if the tx window is 3/4ths full.
1906 * Calculate without mul or div
1907 */
1908 threshold = pi->tx_win;
1909 threshold += threshold << 1;
1910 threshold >>= 2;
1911
1912 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1913 threshold);
1914
1915 if (frames_to_ack >= threshold) {
1916 l2cap_ertm_stop_ack_timer(pi);
1917 control.super = L2CAP_SFRAME_RR;
1918 control.reqseq = pi->buffer_seq;
1919 l2cap_ertm_send_sframe(sk, &control);
1920 frames_to_ack = 0;
1921 }
1922
1923 if (frames_to_ack)
1924 l2cap_ertm_start_ack_timer(pi);
1925 }
1926}
1927
1928static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1929{
1930 struct l2cap_pinfo *pi;
1931 struct bt_l2cap_control control;
1932
1933 BT_DBG("sk %p, poll %d", sk, (int) poll);
1934
1935 pi = l2cap_pi(sk);
1936
1937 memset(&control, 0, sizeof(control));
1938 control.frame_type = 's';
1939 control.poll = poll;
1940
1941 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1942 control.super = L2CAP_SFRAME_RNR;
1943 else
1944 control.super = L2CAP_SFRAME_RR;
1945
1946 control.reqseq = pi->buffer_seq;
1947 l2cap_ertm_send_sframe(sk, &control);
1948}
1949
1950static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1951{
1952 struct l2cap_pinfo *pi;
1953 struct bt_l2cap_control control;
1954
1955 BT_DBG("sk %p", sk);
1956
1957 pi = l2cap_pi(sk);
1958
1959 memset(&control, 0, sizeof(control));
1960 control.frame_type = 's';
1961 control.final = 1;
1962 control.reqseq = pi->buffer_seq;
1963 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
1964
1965 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1966 control.super = L2CAP_SFRAME_RNR;
1967 l2cap_ertm_send_sframe(sk, &control);
1968 }
1969
1970 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1971 (pi->unacked_frames > 0))
1972 l2cap_ertm_start_retrans_timer(pi);
1973
1974 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
1975
1976 /* Send pending iframes */
1977 l2cap_ertm_send(sk);
1978
1979 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1980 /* F-bit wasn't sent in an s-frame or i-frame yet, so
1981 * send it now.
1982 */
1983 control.super = L2CAP_SFRAME_RR;
1984 l2cap_ertm_send_sframe(sk, &control);
1985 }
1986}
1987
1988static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
1989{
1990 struct bt_l2cap_control control;
1991 struct l2cap_pinfo *pi;
1992 u16 seq;
1993
1994 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
1995
1996 pi = l2cap_pi(sk);
1997 memset(&control, 0, sizeof(control));
1998 control.frame_type = 's';
1999 control.super = L2CAP_SFRAME_SREJ;
2000
2001 for (seq = pi->expected_tx_seq; seq != txseq;
2002 seq = __next_seq(seq, pi)) {
2003 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2004 control.reqseq = seq;
2005 l2cap_ertm_send_sframe(sk, &control);
2006 l2cap_seq_list_append(&pi->srej_list, seq);
2007 }
2008 }
2009
2010 pi->expected_tx_seq = __next_seq(txseq, pi);
2011}
2012
2013static void l2cap_ertm_send_srej_tail(struct sock *sk)
2014{
2015 struct bt_l2cap_control control;
2016 struct l2cap_pinfo *pi;
2017
2018 BT_DBG("sk %p", sk);
2019
2020 pi = l2cap_pi(sk);
2021
2022 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2023 return;
2024
2025 memset(&control, 0, sizeof(control));
2026 control.frame_type = 's';
2027 control.super = L2CAP_SFRAME_SREJ;
2028 control.reqseq = pi->srej_list.tail;
2029 l2cap_ertm_send_sframe(sk, &control);
2030}
2031
2032static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2033{
2034 struct bt_l2cap_control control;
2035 struct l2cap_pinfo *pi;
2036 u16 initial_head;
2037 u16 seq;
2038
2039 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2040
2041 pi = l2cap_pi(sk);
2042 memset(&control, 0, sizeof(control));
2043 control.frame_type = 's';
2044 control.super = L2CAP_SFRAME_SREJ;
2045
2046 /* Capture initial list head to allow only one pass through the list. */
2047 initial_head = pi->srej_list.head;
2048
2049 do {
2050 seq = l2cap_seq_list_pop(&pi->srej_list);
2051 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2052 break;
2053
2054 control.reqseq = seq;
2055 l2cap_ertm_send_sframe(sk, &control);
2056 l2cap_seq_list_append(&pi->srej_list, seq);
2057 } while (pi->srej_list.head != initial_head);
2058}
2059
2060static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2061{
2062 struct l2cap_pinfo *pi = l2cap_pi(sk);
2063 BT_DBG("sk %p", sk);
2064
2065 pi->expected_tx_seq = pi->buffer_seq;
2066 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2067 skb_queue_purge(SREJ_QUEUE(sk));
2068 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2069}
2070
2071static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2072 struct bt_l2cap_control *control,
2073 struct sk_buff_head *skbs, u8 event)
2074{
2075 struct l2cap_pinfo *pi;
2076 int err = 0;
2077
2078 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2079 (int)event);
2080 pi = l2cap_pi(sk);
2081
2082 switch (event) {
2083 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2084 if (sk->sk_send_head == NULL)
2085 sk->sk_send_head = skb_peek(skbs);
2086
2087 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2088 l2cap_ertm_send(sk);
2089 break;
2090 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2091 BT_DBG("Enter LOCAL_BUSY");
2092 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2093
2094 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2095 /* The SREJ_SENT state must be aborted if we are to
2096 * enter the LOCAL_BUSY state.
2097 */
2098 l2cap_ertm_abort_rx_srej_sent(sk);
2099 }
2100
2101 l2cap_ertm_send_ack(sk);
2102
2103 break;
2104 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2105 BT_DBG("Exit LOCAL_BUSY");
2106 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2107
2108 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2109 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2110 pi->amp_move_state =
2111 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2112 l2cap_send_move_chan_cfm(pi->conn, pi,
2113 pi->scid,
2114 L2CAP_MOVE_CHAN_CONFIRMED);
2115 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2116 } else if (pi->amp_move_role ==
2117 L2CAP_AMP_MOVE_RESPONDER) {
2118 pi->amp_move_state =
2119 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2120 l2cap_send_move_chan_rsp(pi->conn,
2121 pi->amp_move_cmd_ident,
2122 pi->dcid,
2123 L2CAP_MOVE_CHAN_SUCCESS);
2124 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002125 break;
2126 }
2127
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002128 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2129 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2130 struct bt_l2cap_control local_control;
2131
2132 memset(&local_control, 0, sizeof(local_control));
2133 local_control.frame_type = 's';
2134 local_control.super = L2CAP_SFRAME_RR;
2135 local_control.poll = 1;
2136 local_control.reqseq = pi->buffer_seq;
2137 l2cap_ertm_send_sframe(sk, &local_control);
2138
2139 pi->retry_count = 1;
2140 l2cap_ertm_start_monitor_timer(pi);
2141 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002142 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002143 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002144 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2145 l2cap_ertm_process_reqseq(sk, control->reqseq);
2146 break;
2147 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2148 l2cap_ertm_send_rr_or_rnr(sk, 1);
2149 pi->retry_count = 1;
2150 l2cap_ertm_start_monitor_timer(pi);
2151 l2cap_ertm_stop_ack_timer(pi);
2152 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2153 break;
2154 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2155 l2cap_ertm_send_rr_or_rnr(sk, 1);
2156 pi->retry_count = 1;
2157 l2cap_ertm_start_monitor_timer(pi);
2158 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2159 break;
2160 case L2CAP_ERTM_EVENT_RECV_FBIT:
2161 /* Nothing to process */
2162 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002163 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002164 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002165 }
2166
2167 return err;
2168}
2169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002170static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2171 struct bt_l2cap_control *control,
2172 struct sk_buff_head *skbs, u8 event)
2173{
2174 struct l2cap_pinfo *pi;
2175 int err = 0;
2176
2177 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2178 (int)event);
2179 pi = l2cap_pi(sk);
2180
2181 switch (event) {
2182 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2183 if (sk->sk_send_head == NULL)
2184 sk->sk_send_head = skb_peek(skbs);
2185 /* Queue data, but don't send. */
2186 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2187 break;
2188 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2189 BT_DBG("Enter LOCAL_BUSY");
2190 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2191
2192 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2193 /* The SREJ_SENT state must be aborted if we are to
2194 * enter the LOCAL_BUSY state.
2195 */
2196 l2cap_ertm_abort_rx_srej_sent(sk);
2197 }
2198
2199 l2cap_ertm_send_ack(sk);
2200
2201 break;
2202 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2203 BT_DBG("Exit LOCAL_BUSY");
2204 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2205
2206 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2207 struct bt_l2cap_control local_control;
2208 memset(&local_control, 0, sizeof(local_control));
2209 local_control.frame_type = 's';
2210 local_control.super = L2CAP_SFRAME_RR;
2211 local_control.poll = 1;
2212 local_control.reqseq = pi->buffer_seq;
2213 l2cap_ertm_send_sframe(sk, &local_control);
2214
2215 pi->retry_count = 1;
2216 l2cap_ertm_start_monitor_timer(pi);
2217 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2218 }
2219 break;
2220 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2221 l2cap_ertm_process_reqseq(sk, control->reqseq);
2222
2223 /* Fall through */
2224
2225 case L2CAP_ERTM_EVENT_RECV_FBIT:
2226 if (control && control->final) {
2227 l2cap_ertm_stop_monitor_timer(pi);
2228 if (pi->unacked_frames > 0)
2229 l2cap_ertm_start_retrans_timer(pi);
2230 pi->retry_count = 0;
2231 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2232 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2233 }
2234 break;
2235 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2236 /* Ignore */
2237 break;
2238 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2239 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2240 l2cap_ertm_send_rr_or_rnr(sk, 1);
2241 l2cap_ertm_start_monitor_timer(pi);
2242 pi->retry_count += 1;
2243 } else
2244 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2245 break;
2246 default:
2247 break;
2248 }
2249
2250 return err;
2251}
2252
2253int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2254 struct sk_buff_head *skbs, u8 event)
2255{
2256 struct l2cap_pinfo *pi;
2257 int err = 0;
2258
2259 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2260 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2261
2262 pi = l2cap_pi(sk);
2263
2264 switch (pi->tx_state) {
2265 case L2CAP_ERTM_TX_STATE_XMIT:
2266 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2267 break;
2268 case L2CAP_ERTM_TX_STATE_WAIT_F:
2269 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2270 break;
2271 default:
2272 /* Ignore event */
2273 break;
2274 }
2275
2276 return err;
2277}
2278
2279int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2280 struct msghdr *msg, size_t len, int reseg)
2281{
2282 struct sk_buff *skb;
2283 u16 sdu_len;
2284 size_t pdu_len;
2285 int err = 0;
2286 u8 sar;
2287
2288 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2289
2290 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2291 * so fragmented skbs are not used. The HCI layer's handling
2292 * of fragmented skbs is not compatible with ERTM's queueing.
2293 */
2294
2295 /* PDU size is derived from the HCI MTU */
2296 pdu_len = l2cap_pi(sk)->conn->mtu;
2297
2298 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2299 if (!l2cap_pi(sk)->ampcon)
2300 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2301
2302 /* Adjust for largest possible L2CAP overhead. */
2303 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2304
2305 /* Remote device may have requested smaller PDUs */
2306 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2307
2308 if (len <= pdu_len) {
2309 sar = L2CAP_SAR_UNSEGMENTED;
2310 sdu_len = 0;
2311 pdu_len = len;
2312 } else {
2313 sar = L2CAP_SAR_START;
2314 sdu_len = len;
2315 pdu_len -= L2CAP_SDULEN_SIZE;
2316 }
2317
2318 while (len) {
2319 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2320
2321 BT_DBG("iframe skb %p", skb);
2322
2323 if (IS_ERR(skb)) {
2324 __skb_queue_purge(seg_queue);
2325 return PTR_ERR(skb);
2326 }
2327
2328 bt_cb(skb)->control.sar = sar;
2329 __skb_queue_tail(seg_queue, skb);
2330
2331 len -= pdu_len;
2332 if (sdu_len) {
2333 sdu_len = 0;
2334 pdu_len += L2CAP_SDULEN_SIZE;
2335 }
2336
2337 if (len <= pdu_len) {
2338 sar = L2CAP_SAR_END;
2339 pdu_len = len;
2340 } else {
2341 sar = L2CAP_SAR_CONTINUE;
2342 }
2343 }
2344
2345 return err;
2346}
2347
2348static inline int is_initial_frame(u8 sar)
2349{
2350 return (sar == L2CAP_SAR_UNSEGMENTED ||
2351 sar == L2CAP_SAR_START);
2352}
2353
2354static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2355 size_t veclen)
2356{
2357 struct sk_buff *frag_iter;
2358
2359 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2360
2361 if (iv->iov_len + skb->len > veclen)
2362 return -ENOMEM;
2363
2364 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2365 iv->iov_len += skb->len;
2366
2367 skb_walk_frags(skb, frag_iter) {
2368 if (iv->iov_len + skb->len > veclen)
2369 return -ENOMEM;
2370
2371 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2372 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2373 frag_iter->len);
2374 iv->iov_len += frag_iter->len;
2375 }
2376
2377 return 0;
2378}
2379
2380int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2381{
2382 void *buf;
2383 int buflen;
2384 int err = 0;
2385 struct sk_buff *skb;
2386 struct msghdr msg;
2387 struct kvec iv;
2388 struct sk_buff_head old_frames;
2389 struct l2cap_pinfo *pi = l2cap_pi(sk);
2390
2391 BT_DBG("sk %p", sk);
2392
2393 if (skb_queue_empty(queue))
2394 return 0;
2395
2396 memset(&msg, 0, sizeof(msg));
2397 msg.msg_iov = (struct iovec *) &iv;
2398
2399 buflen = pi->omtu + L2CAP_FCS_SIZE;
2400 buf = kzalloc(buflen, GFP_TEMPORARY);
2401
2402 if (!buf) {
2403 BT_DBG("Could not allocate resegmentation buffer");
2404 return -ENOMEM;
2405 }
2406
2407 /* Move current frames off the original queue */
2408 __skb_queue_head_init(&old_frames);
2409 skb_queue_splice_tail_init(queue, &old_frames);
2410
2411 while (!skb_queue_empty(&old_frames)) {
2412 struct sk_buff_head current_sdu;
2413 u8 original_sar;
2414
2415 /* Reassemble each SDU from one or more PDUs */
2416
2417 iv.iov_base = buf;
2418 iv.iov_len = 0;
2419
2420 skb = skb_peek(&old_frames);
2421 original_sar = bt_cb(skb)->control.sar;
2422
2423 __skb_unlink(skb, &old_frames);
2424
2425 /* Append data to SDU */
2426 if (pi->extended_control)
2427 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2428 else
2429 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2430
2431 if (original_sar == L2CAP_SAR_START)
2432 skb_pull(skb, L2CAP_SDULEN_SIZE);
2433
2434 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2435
2436 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2437 iv.iov_len -= L2CAP_FCS_SIZE;
2438
2439 /* Free skb */
2440 kfree_skb(skb);
2441
2442 if (err)
2443 break;
2444
2445 while (!skb_queue_empty(&old_frames) && !err) {
2446 /* Check next frame */
2447 skb = skb_peek(&old_frames);
2448
2449 if (is_initial_frame(bt_cb(skb)->control.sar))
2450 break;
2451
2452 __skb_unlink(skb, &old_frames);
2453
2454 /* Append data to SDU */
2455 if (pi->extended_control)
2456 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2457 else
2458 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2459
2460 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2461 skb_pull(skb, L2CAP_SDULEN_SIZE);
2462
2463 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2464
2465 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2466 iv.iov_len -= L2CAP_FCS_SIZE;
2467
2468 /* Free skb */
2469 kfree_skb(skb);
2470 }
2471
2472 if (err)
2473 break;
2474
2475 /* Segment data */
2476
2477 __skb_queue_head_init(&current_sdu);
2478
2479 /* skbs for the SDU were just freed, but the
2480 * resegmenting process could produce more, smaller
2481 * skbs due to smaller PDUs and reduced HCI MTU. The
2482 * overhead from the sk_buff structs could put us over
2483 * the sk_sndbuf limit.
2484 *
2485 * Since this code is running in response to a
2486 * received poll/final packet, it cannot block.
2487 * Therefore, memory allocation needs to be allowed by
2488 * falling back to bt_skb_alloc() (with
2489 * skb_set_owner_w() to maintain sk_wmem_alloc
2490 * correctly).
2491 */
2492 msg.msg_iovlen = iv.iov_len;
2493 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2494 msg.msg_iovlen, 1);
2495
2496 if (err || skb_queue_empty(&current_sdu)) {
2497 BT_DBG("Error %d resegmenting data for socket %p",
2498 err, sk);
2499 __skb_queue_purge(&current_sdu);
2500 break;
2501 }
2502
2503 /* Fix up first PDU SAR bits */
2504 if (!is_initial_frame(original_sar)) {
2505 BT_DBG("Changing SAR bits, %d PDUs",
2506 skb_queue_len(&current_sdu));
2507 skb = skb_peek(&current_sdu);
2508
2509 if (skb_queue_len(&current_sdu) == 1) {
2510 /* Change SAR from 'unsegmented' to 'end' */
2511 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2512 } else {
2513 struct l2cap_hdr *lh;
2514 size_t hdrlen;
2515
2516 /* Change SAR from 'start' to 'continue' */
2517 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2518
2519 /* Start frames contain 2 bytes for
2520 * sdulen and continue frames don't.
2521 * Must rewrite header to eliminate
2522 * sdulen and then adjust l2cap frame
2523 * length.
2524 */
2525 if (pi->extended_control)
2526 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2527 else
2528 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2529
2530 memmove(skb->data + L2CAP_SDULEN_SIZE,
2531 skb->data, hdrlen);
2532 skb_pull(skb, L2CAP_SDULEN_SIZE);
2533 lh = (struct l2cap_hdr *)skb->data;
2534 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2535 L2CAP_SDULEN_SIZE);
2536 }
2537 }
2538
2539 /* Add to queue */
2540 skb_queue_splice_tail(&current_sdu, queue);
2541 }
2542
2543 __skb_queue_purge(&old_frames);
2544 if (err)
2545 __skb_queue_purge(queue);
2546
2547 kfree(buf);
2548
2549 BT_DBG("Queue resegmented, err=%d", err);
2550 return err;
2551}
2552
2553static void l2cap_resegment_worker(struct work_struct *work)
2554{
2555 int err = 0;
2556 struct l2cap_resegment_work *seg_work =
2557 container_of(work, struct l2cap_resegment_work, work);
2558 struct sock *sk = seg_work->sk;
2559
2560 kfree(seg_work);
2561
2562 BT_DBG("sk %p", sk);
2563 lock_sock(sk);
2564
2565 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2566 release_sock(sk);
2567 return;
2568 }
2569
2570 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2571
2572 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2573
2574 if (skb_queue_empty(TX_QUEUE(sk)))
2575 sk->sk_send_head = NULL;
2576 else
2577 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2578
2579 if (err)
2580 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2581 else
2582 l2cap_ertm_send(sk);
2583
2584 release_sock(sk);
2585}
2586
2587static int l2cap_setup_resegment(struct sock *sk)
2588{
2589 struct l2cap_resegment_work *seg_work;
2590
2591 BT_DBG("sk %p", sk);
2592
2593 if (skb_queue_empty(TX_QUEUE(sk)))
2594 return 0;
2595
2596 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2597 if (!seg_work)
2598 return -ENOMEM;
2599
2600 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
2601 seg_work->sk = sk;
2602
2603 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2604 kfree(seg_work);
2605 return -ENOMEM;
2606 }
2607
2608 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2609
2610 return 0;
2611}
2612
2613static inline int l2cap_rmem_available(struct sock *sk)
2614{
2615 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2616 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2617 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2618}
2619
2620static inline int l2cap_rmem_full(struct sock *sk)
2621{
2622 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2623 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2624 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2625}
2626
2627void l2cap_amp_move_init(struct sock *sk)
2628{
2629 BT_DBG("sk %p", sk);
2630
2631 if (!l2cap_pi(sk)->conn)
2632 return;
2633
2634 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2635 return;
2636
2637 if (l2cap_pi(sk)->amp_id == 0) {
2638 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2639 return;
2640 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2641 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2642 amp_create_physical(l2cap_pi(sk)->conn, sk);
2643 } else {
2644 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2645 l2cap_pi(sk)->amp_move_state =
2646 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2647 l2cap_pi(sk)->amp_move_id = 0;
2648 l2cap_amp_move_setup(sk);
2649 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2650 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2651 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2652 }
2653}
2654
2655static void l2cap_chan_ready(struct sock *sk)
2656{
2657 struct sock *parent = bt_sk(sk)->parent;
2658
2659 BT_DBG("sk %p, parent %p", sk, parent);
2660
2661 l2cap_pi(sk)->conf_state = 0;
2662 l2cap_sock_clear_timer(sk);
2663
2664 if (!parent) {
2665 /* Outgoing channel.
2666 * Wake up socket sleeping on connect.
2667 */
2668 sk->sk_state = BT_CONNECTED;
2669 sk->sk_state_change(sk);
2670 } else {
2671 /* Incoming channel.
2672 * Wake up socket sleeping on accept.
2673 */
2674 parent->sk_data_ready(parent, 0);
2675 }
2676}
2677
Linus Torvalds1da177e2005-04-16 15:20:36 -07002678/* Copy frame to all raw sockets on that connection */
2679static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2680{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002681 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002682 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002683 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002684
2685 BT_DBG("conn %p", conn);
2686
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002687 read_lock(&l->lock);
2688 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2689 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002690 continue;
2691
2692 /* Don't send frame to the socket it came from */
2693 if (skb->sk == sk)
2694 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002695 nskb = skb_clone(skb, GFP_ATOMIC);
2696 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002697 continue;
2698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002699 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002700 kfree_skb(nskb);
2701 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002702 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002703}
2704
2705/* ---- L2CAP signalling commands ---- */
2706static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2707 u8 code, u8 ident, u16 dlen, void *data)
2708{
2709 struct sk_buff *skb, **frag;
2710 struct l2cap_cmd_hdr *cmd;
2711 struct l2cap_hdr *lh;
2712 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002713 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002714
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002715 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2716 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002717
2718 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002719 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002720
2721 skb = bt_skb_alloc(count, GFP_ATOMIC);
2722 if (!skb)
2723 return NULL;
2724
2725 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002726 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002727
2728 if (conn->hcon->type == LE_LINK)
2729 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2730 else
2731 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002732
2733 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2734 cmd->code = code;
2735 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002736 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
2738 if (dlen) {
2739 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2740 memcpy(skb_put(skb, count), data, count);
2741 data += count;
2742 }
2743
2744 len -= skb->len;
2745
2746 /* Continuation fragments (no L2CAP header) */
2747 frag = &skb_shinfo(skb)->frag_list;
2748 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002749 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002750
2751 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2752 if (!*frag)
2753 goto fail;
2754
2755 memcpy(skb_put(*frag, count), data, count);
2756
2757 len -= count;
2758 data += count;
2759
2760 frag = &(*frag)->next;
2761 }
2762
2763 return skb;
2764
2765fail:
2766 kfree_skb(skb);
2767 return NULL;
2768}
2769
2770static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2771{
2772 struct l2cap_conf_opt *opt = *ptr;
2773 int len;
2774
2775 len = L2CAP_CONF_OPT_SIZE + opt->len;
2776 *ptr += len;
2777
2778 *type = opt->type;
2779 *olen = opt->len;
2780
2781 switch (opt->len) {
2782 case 1:
2783 *val = *((u8 *) opt->val);
2784 break;
2785
2786 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002787 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002788 break;
2789
2790 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002791 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002792 break;
2793
2794 default:
2795 *val = (unsigned long) opt->val;
2796 break;
2797 }
2798
2799 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2800 return len;
2801}
2802
Linus Torvalds1da177e2005-04-16 15:20:36 -07002803static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2804{
2805 struct l2cap_conf_opt *opt = *ptr;
2806
2807 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2808
2809 opt->type = type;
2810 opt->len = len;
2811
2812 switch (len) {
2813 case 1:
2814 *((u8 *) opt->val) = val;
2815 break;
2816
2817 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002818 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002819 break;
2820
2821 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002822 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002823 break;
2824
2825 default:
2826 memcpy(opt->val, (void *) val, len);
2827 break;
2828 }
2829
2830 *ptr += L2CAP_CONF_OPT_SIZE + len;
2831}
2832
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002833static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002834{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002835 struct delayed_work *delayed =
2836 container_of(work, struct delayed_work, work);
2837 struct l2cap_pinfo *pi =
2838 container_of(delayed, struct l2cap_pinfo, ack_work);
2839 struct sock *sk = (struct sock *)pi;
2840 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002841
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002842 BT_DBG("sk %p", sk);
2843
2844 if (!sk)
2845 return;
2846
2847 lock_sock(sk);
2848
2849 if (!l2cap_pi(sk)->conn) {
2850 release_sock(sk);
2851 return;
2852 }
2853
2854 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2855 l2cap_pi(sk)->last_acked_seq,
2856 l2cap_pi(sk));
2857
2858 if (frames_to_ack)
2859 l2cap_ertm_send_rr_or_rnr(sk, 0);
2860
2861 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002862}
2863
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002864static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002865{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002866 struct delayed_work *delayed =
2867 container_of(work, struct delayed_work, work);
2868 struct l2cap_pinfo *pi =
2869 container_of(delayed, struct l2cap_pinfo, retrans_work);
2870 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002871
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002872 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002873
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002874 if (!sk)
2875 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002879 if (!l2cap_pi(sk)->conn) {
2880 release_sock(sk);
2881 return;
2882 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2885 release_sock(sk);
2886}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002888static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2889{
2890 struct delayed_work *delayed =
2891 container_of(work, struct delayed_work, work);
2892 struct l2cap_pinfo *pi =
2893 container_of(delayed, struct l2cap_pinfo, monitor_work);
2894 struct sock *sk = (struct sock *)pi;
2895
2896 BT_DBG("sk %p", sk);
2897
2898 if (!sk)
2899 return;
2900
2901 lock_sock(sk);
2902
2903 if (!l2cap_pi(sk)->conn) {
2904 release_sock(sk);
2905 return;
2906 }
2907
2908 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2909
2910 release_sock(sk);
2911}
2912
2913static inline void l2cap_ertm_init(struct sock *sk)
2914{
2915 l2cap_pi(sk)->next_tx_seq = 0;
2916 l2cap_pi(sk)->expected_tx_seq = 0;
2917 l2cap_pi(sk)->expected_ack_seq = 0;
2918 l2cap_pi(sk)->unacked_frames = 0;
2919 l2cap_pi(sk)->buffer_seq = 0;
2920 l2cap_pi(sk)->frames_sent = 0;
2921 l2cap_pi(sk)->last_acked_seq = 0;
2922 l2cap_pi(sk)->sdu = NULL;
2923 l2cap_pi(sk)->sdu_last_frag = NULL;
2924 l2cap_pi(sk)->sdu_len = 0;
2925 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2926
2927 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2928 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2929
2930 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2931 l2cap_pi(sk)->rx_state);
2932
2933 l2cap_pi(sk)->amp_id = 0;
2934 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2935 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2936 l2cap_pi(sk)->amp_move_reqseq = 0;
2937 l2cap_pi(sk)->amp_move_event = 0;
2938
2939 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2940 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2941 l2cap_ertm_retrans_timeout);
2942 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2943 l2cap_ertm_monitor_timeout);
2944 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2945 skb_queue_head_init(SREJ_QUEUE(sk));
2946 skb_queue_head_init(TX_QUEUE(sk));
2947
2948 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2949 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
2950 l2cap_pi(sk)->remote_tx_win);
2951}
2952
2953void l2cap_ertm_destruct(struct sock *sk)
2954{
2955 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
2956 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
2957}
2958
2959void l2cap_ertm_shutdown(struct sock *sk)
2960{
2961 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
2962 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
2963 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
2964}
2965
2966void l2cap_ertm_recv_done(struct sock *sk)
2967{
2968 lock_sock(sk);
2969
2970 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
2971 release_sock(sk);
2972 return;
2973 }
2974
2975 /* Consume any queued incoming frames and update local busy status */
2976 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
2977 l2cap_ertm_rx_queued_iframes(sk))
2978 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2979 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2980 l2cap_rmem_available(sk))
2981 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
2982
2983 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002984}
2985
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03002986static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2987{
2988 switch (mode) {
2989 case L2CAP_MODE_STREAMING:
2990 case L2CAP_MODE_ERTM:
2991 if (l2cap_mode_supported(mode, remote_feat_mask))
2992 return mode;
2993 /* fall through */
2994 default:
2995 return L2CAP_MODE_BASIC;
2996 }
2997}
2998
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002999static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003000{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003001 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3002 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3003 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3004 pi->extended_control = 1;
3005 } else {
3006 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3007 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3008
3009 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3010 pi->extended_control = 0;
3011 }
3012}
3013
3014static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3015 struct hci_ext_fs *new,
3016 struct hci_ext_fs *agg)
3017{
3018 *agg = *cur;
3019 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3020 /* current flow spec has known rate */
3021 if ((new->max_sdu == 0xFFFF) ||
3022 (new->sdu_arr_time == 0xFFFFFFFF)) {
3023 /* new fs has unknown rate, so aggregate is unknown */
3024 agg->max_sdu = 0xFFFF;
3025 agg->sdu_arr_time = 0xFFFFFFFF;
3026 } else {
3027 /* new fs has known rate, so aggregate is known */
3028 u64 cur_rate;
3029 u64 new_rate;
3030 cur_rate = cur->max_sdu * 1000000ULL;
3031 if (cur->sdu_arr_time)
3032 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3033 new_rate = new->max_sdu * 1000000ULL;
3034 if (new->sdu_arr_time)
3035 new_rate = div_u64(new_rate, new->sdu_arr_time);
3036 cur_rate = cur_rate + new_rate;
3037 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3038 cur_rate);
3039 }
3040 }
3041}
3042
3043static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3044{
3045 struct hci_ext_fs tx_fs;
3046 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003047
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003048 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003050 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3051 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3052 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3053 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3054 return 0;
3055
3056 l2cap_aggregate_fs(&chan->tx_fs,
3057 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3058 l2cap_aggregate_fs(&chan->rx_fs,
3059 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3060 hci_chan_modify(chan, &tx_fs, &rx_fs);
3061 return 1;
3062}
3063
3064static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3065 struct hci_ext_fs *old,
3066 struct hci_ext_fs *agg)
3067{
3068 *agg = *cur;
3069 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3070 u64 cur_rate;
3071 u64 old_rate;
3072 cur_rate = cur->max_sdu * 1000000ULL;
3073 if (cur->sdu_arr_time)
3074 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3075 old_rate = old->max_sdu * 1000000ULL;
3076 if (old->sdu_arr_time)
3077 old_rate = div_u64(old_rate, old->sdu_arr_time);
3078 cur_rate = cur_rate - old_rate;
3079 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3080 cur_rate);
3081 }
3082}
3083
3084static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3085{
3086 struct hci_ext_fs tx_fs;
3087 struct hci_ext_fs rx_fs;
3088
3089 BT_DBG("chan %p", chan);
3090
3091 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3092 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3093 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3094 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3095 return 0;
3096
3097 l2cap_deaggregate_fs(&chan->tx_fs,
3098 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3099 l2cap_deaggregate_fs(&chan->rx_fs,
3100 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3101 hci_chan_modify(chan, &tx_fs, &rx_fs);
3102 return 1;
3103}
3104
3105static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3106{
3107 struct hci_dev *hdev;
3108 struct hci_conn *hcon;
3109 struct hci_chan *chan;
3110
3111 hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
3112 if (!hdev)
3113 return NULL;
3114
3115 BT_DBG("hdev %s", hdev->name);
3116
3117 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
3118 if (!hcon)
3119 return NULL;
3120
3121 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3122 if (chan) {
3123 l2cap_aggregate(chan, pi);
3124 hci_chan_hold(chan);
3125 return chan;
3126 }
3127
3128 if (bt_sk(pi)->parent) {
3129 /* Incoming connection */
3130 chan = hci_chan_accept(hcon,
3131 (struct hci_ext_fs *) &pi->local_fs,
3132 (struct hci_ext_fs *) &pi->remote_fs);
3133 } else {
3134 /* Outgoing connection */
3135 chan = hci_chan_create(hcon,
3136 (struct hci_ext_fs *) &pi->local_fs,
3137 (struct hci_ext_fs *) &pi->remote_fs);
3138 }
3139 return chan;
3140}
3141
3142int l2cap_build_conf_req(struct sock *sk, void *data)
3143{
3144 struct l2cap_pinfo *pi = l2cap_pi(sk);
3145 struct l2cap_conf_req *req = data;
3146 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3147 void *ptr = req->data;
3148
3149 BT_DBG("sk %p", sk);
3150
3151 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003152 goto done;
3153
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003154 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003155 case L2CAP_MODE_STREAMING:
3156 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003157 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003158 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003159
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003160 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003161 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003163 break;
3164 }
3165
3166done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 if (pi->imtu != L2CAP_DEFAULT_MTU)
3168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003169
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003170 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003171 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3173 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003174 break;
3175
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003176 rfc.txwin_size = 0;
3177 rfc.max_transmit = 0;
3178 rfc.retrans_timeout = 0;
3179 rfc.monitor_timeout = 0;
3180 rfc.max_pdu_size = 0;
3181
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3183 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003184 break;
3185
3186 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003187 l2cap_setup_txwin(pi);
3188 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3189 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3190 else
3191 rfc.txwin_size = pi->tx_win;
3192 rfc.max_transmit = pi->max_tx;
3193 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3194 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003195 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003196 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3197 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003198
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3200 (unsigned long) &rfc);
3201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003202 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3203 pi->extended_control) {
3204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3205 pi->tx_win);
3206 }
3207
3208 if (pi->amp_id) {
3209 /* default best effort extended flow spec */
3210 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3211 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3212 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3213 sizeof(fs), (unsigned long) &fs);
3214 }
3215
3216 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003217 break;
3218
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003219 if (pi->fcs == L2CAP_FCS_NONE ||
3220 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3221 pi->fcs = L2CAP_FCS_NONE;
3222 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003223 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003224 break;
3225
3226 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003227 rfc.txwin_size = 0;
3228 rfc.max_transmit = 0;
3229 rfc.retrans_timeout = 0;
3230 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003231 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003232 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3233 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003234
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3236 (unsigned long) &rfc);
3237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003238 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3239 pi->extended_control) {
3240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3241 }
3242
3243 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003244 break;
3245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003246 if (pi->fcs == L2CAP_FCS_NONE ||
3247 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3248 pi->fcs = L2CAP_FCS_NONE;
3249 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003250 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003251 break;
3252 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003253
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003254 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003255 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003256
3257 return ptr - data;
3258}
3259
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003260
3261static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003262{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003263 struct l2cap_pinfo *pi = l2cap_pi(sk);
3264 struct l2cap_conf_req *req = data;
3265 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3266 void *ptr = req->data;
3267 u32 be_flush_to;
3268
3269 BT_DBG("sk %p", sk);
3270
3271 /* convert to milliseconds, round up */
3272 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3273
3274 switch (pi->mode) {
3275 case L2CAP_MODE_ERTM:
3276 rfc.mode = L2CAP_MODE_ERTM;
3277 rfc.txwin_size = pi->tx_win;
3278 rfc.max_transmit = pi->max_tx;
3279 if (pi->amp_move_id) {
3280 rfc.retrans_timeout =
3281 cpu_to_le16((3 * be_flush_to) + 500);
3282 rfc.monitor_timeout =
3283 cpu_to_le16((3 * be_flush_to) + 500);
3284 } else {
3285 rfc.retrans_timeout =
3286 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3287 rfc.monitor_timeout =
3288 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3289 }
3290 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3291 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3292 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3293
3294 break;
3295
3296 default:
3297 return -ECONNREFUSED;
3298 }
3299
3300 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3301 (unsigned long) &rfc);
3302
3303 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3304
3305 /* TODO assign fcs for br/edr based on socket config option */
3306 if (pi->amp_move_id)
3307 pi->local_conf.fcs = L2CAP_FCS_NONE;
3308 else
3309 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3310
3311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3312 pi->local_conf.fcs);
3313
3314 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3315 }
3316
3317 req->dcid = cpu_to_le16(pi->dcid);
3318 req->flags = cpu_to_le16(0);
3319
3320 return ptr - data;
3321}
3322
3323static int l2cap_parse_conf_req(struct sock *sk, void *data)
3324{
3325 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003326 struct l2cap_conf_rsp *rsp = data;
3327 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003328 void *req = pi->conf_req;
3329 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003330 int type, hint, olen;
3331 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003332 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003333 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003334 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003335 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003337 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003338
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003339 while (len >= L2CAP_CONF_OPT_SIZE) {
3340 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003342 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003343 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003344
3345 switch (type) {
3346 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003347 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003348 break;
3349
3350 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003351 pi->flush_to = val;
3352 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3353 result = L2CAP_CONF_UNACCEPT;
3354 else
3355 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003356 break;
3357
3358 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003359 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3360 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003361 break;
3362
Marcel Holtmann6464f352007-10-20 13:39:51 +02003363 case L2CAP_CONF_RFC:
3364 if (olen == sizeof(rfc))
3365 memcpy(&rfc, (void *) val, olen);
3366 break;
3367
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003368 case L2CAP_CONF_FCS:
3369 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003370 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3371 pi->remote_conf.fcs = val;
3372 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003374 case L2CAP_CONF_EXT_FS:
3375 if (olen == sizeof(fs)) {
3376 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3377 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3378 result = L2CAP_CONF_UNACCEPT;
3379 break;
3380 }
3381 memcpy(&fs, (void *) val, olen);
3382 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3383 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3384 break;
3385 }
3386 pi->remote_conf.flush_to =
3387 le32_to_cpu(fs.flush_to);
3388 pi->remote_fs.id = fs.id;
3389 pi->remote_fs.type = fs.type;
3390 pi->remote_fs.max_sdu =
3391 le16_to_cpu(fs.max_sdu);
3392 pi->remote_fs.sdu_arr_time =
3393 le32_to_cpu(fs.sdu_arr_time);
3394 pi->remote_fs.acc_latency =
3395 le32_to_cpu(fs.acc_latency);
3396 pi->remote_fs.flush_to =
3397 le32_to_cpu(fs.flush_to);
3398 }
3399 break;
3400
3401 case L2CAP_CONF_EXT_WINDOW:
3402 pi->extended_control = 1;
3403 pi->remote_tx_win = val;
3404 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3405 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003406 break;
3407
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003408 default:
3409 if (hint)
3410 break;
3411
3412 result = L2CAP_CONF_UNKNOWN;
3413 *((u8 *) ptr++) = type;
3414 break;
3415 }
3416 }
3417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003418 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003419 goto done;
3420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003421 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003422 case L2CAP_MODE_STREAMING:
3423 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003424 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3425 pi->mode = l2cap_select_mode(rfc.mode,
3426 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003427 break;
3428 }
3429
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003430 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003431 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003432
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003433 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003434 }
3435
3436done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003437 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003438 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003439 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003441 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003442 return -ECONNREFUSED;
3443
3444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3445 sizeof(rfc), (unsigned long) &rfc);
3446 }
3447
3448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003449 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3450 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3451 return -ECONNREFUSED;
3452
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003453 if (result == L2CAP_CONF_SUCCESS) {
3454 /* Configure output options and let the other side know
3455 * which ones we don't like. */
3456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003457 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003458 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003459 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003460 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003461 else {
3462 pi->omtu = mtu;
3463 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3464 }
3465 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003466
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003467 switch (rfc.mode) {
3468 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003469 pi->fcs = L2CAP_FCS_NONE;
3470 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003471 break;
3472
3473 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003474 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3475 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003477 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003478
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003480
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003481 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003482 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003483 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003486 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003487
3488 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3489 sizeof(rfc), (unsigned long) &rfc);
3490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003491 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3492 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3493 sizeof(fs), (unsigned long) &fs);
3494
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003495 break;
3496
3497 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003498 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003500 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003501
3502 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3503 sizeof(rfc), (unsigned long) &rfc);
3504
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003505 break;
3506
3507 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003508 result = L2CAP_CONF_UNACCEPT;
3509
3510 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003511 rfc.mode = pi->mode;
3512 }
3513
3514 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3515 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3516 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3517 result = L2CAP_CONF_PENDING;
3518
3519 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3520 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003521 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003522 /* Trigger logical link creation only on AMP */
3523
Peter Krystadf453bb32011-07-19 17:23:34 -07003524 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003525 if (!chan)
3526 return -ECONNREFUSED;
3527
3528 chan->l2cap_sk = sk;
3529 if (chan->state == BT_CONNECTED)
3530 l2cap_create_cfm(chan, 0);
3531 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003532 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003533
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003534 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003535 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003536 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003537 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003538 rsp->result = cpu_to_le16(result);
3539 rsp->flags = cpu_to_le16(0x0000);
3540
3541 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003542}
3543
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003544static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003545{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003546 struct l2cap_pinfo *pi = l2cap_pi(sk);
3547 struct l2cap_conf_rsp *rsp = data;
3548 void *ptr = rsp->data;
3549 void *req = pi->conf_req;
3550 int len = pi->conf_len;
3551 int type, hint, olen;
3552 unsigned long val;
3553 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3554 struct l2cap_conf_ext_fs fs;
3555 u16 mtu = pi->omtu;
3556 u16 tx_win = pi->remote_tx_win;
3557 u16 result = L2CAP_CONF_SUCCESS;
3558
3559 BT_DBG("sk %p", sk);
3560
3561 while (len >= L2CAP_CONF_OPT_SIZE) {
3562 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3563
3564 hint = type & L2CAP_CONF_HINT;
3565 type &= L2CAP_CONF_MASK;
3566
3567 switch (type) {
3568 case L2CAP_CONF_MTU:
3569 mtu = val;
3570 break;
3571
3572 case L2CAP_CONF_FLUSH_TO:
3573 if (pi->amp_move_id)
3574 result = L2CAP_CONF_UNACCEPT;
3575 else
3576 pi->remote_conf.flush_to = val;
3577 break;
3578
3579 case L2CAP_CONF_QOS:
3580 if (pi->amp_move_id)
3581 result = L2CAP_CONF_UNACCEPT;
3582 break;
3583
3584 case L2CAP_CONF_RFC:
3585 if (olen == sizeof(rfc))
3586 memcpy(&rfc, (void *) val, olen);
3587 if (pi->mode != rfc.mode ||
3588 rfc.mode == L2CAP_MODE_BASIC)
3589 result = L2CAP_CONF_UNACCEPT;
3590 break;
3591
3592 case L2CAP_CONF_FCS:
3593 pi->remote_conf.fcs = val;
3594 break;
3595
3596 case L2CAP_CONF_EXT_FS:
3597 if (olen == sizeof(fs)) {
3598 memcpy(&fs, (void *) val, olen);
3599 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3600 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3601 else {
3602 pi->remote_conf.flush_to =
3603 le32_to_cpu(fs.flush_to);
3604 }
3605 }
3606 break;
3607
3608 case L2CAP_CONF_EXT_WINDOW:
3609 tx_win = val;
3610 break;
3611
3612 default:
3613 if (hint)
3614 break;
3615
3616 result = L2CAP_CONF_UNKNOWN;
3617 *((u8 *) ptr++) = type;
3618 break;
3619 }
3620 }
3621
3622 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3623 result, pi->mode, rfc.mode);
3624
3625 if (result == L2CAP_CONF_SUCCESS) {
3626 /* Configure output options and let the other side know
3627 * which ones we don't like. */
3628
3629 /* Don't allow mtu to decrease. */
3630 if (mtu < pi->omtu)
3631 result = L2CAP_CONF_UNACCEPT;
3632
3633 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3634
3635 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3636
3637 /* Don't allow extended transmit window to change. */
3638 if (tx_win != pi->remote_tx_win) {
3639 result = L2CAP_CONF_UNACCEPT;
3640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3641 pi->remote_tx_win);
3642 }
3643
3644 if (rfc.mode == L2CAP_MODE_ERTM) {
3645 pi->remote_conf.retrans_timeout =
3646 le16_to_cpu(rfc.retrans_timeout);
3647 pi->remote_conf.monitor_timeout =
3648 le16_to_cpu(rfc.monitor_timeout);
3649
3650 BT_DBG("remote conf monitor timeout %d",
3651 pi->remote_conf.monitor_timeout);
3652
3653 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3654 sizeof(rfc), (unsigned long) &rfc);
3655 }
3656
3657 }
3658
3659 if (result != L2CAP_CONF_SUCCESS)
3660 goto done;
3661
3662 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3663
3664 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3665 pi->flush_to = pi->remote_conf.flush_to;
3666 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3667
3668 if (pi->amp_move_id)
3669 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3670 else
3671 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3672 BT_DBG("mode %d monitor timeout %d",
3673 pi->mode, pi->monitor_timeout);
3674
3675 }
3676
3677done:
3678 rsp->scid = cpu_to_le16(pi->dcid);
3679 rsp->result = cpu_to_le16(result);
3680 rsp->flags = cpu_to_le16(0x0000);
3681
3682 return ptr - data;
3683}
3684
3685static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3686{
3687 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003688 struct l2cap_conf_req *req = data;
3689 void *ptr = req->data;
3690 int type, olen;
3691 unsigned long val;
3692 struct l2cap_conf_rfc rfc;
3693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003694 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003695
3696 while (len >= L2CAP_CONF_OPT_SIZE) {
3697 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3698
3699 switch (type) {
3700 case L2CAP_CONF_MTU:
3701 if (val < L2CAP_DEFAULT_MIN_MTU) {
3702 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003703 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003704 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003705 pi->imtu = val;
3706 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003707 break;
3708
3709 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003710 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003712 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003713 break;
3714
3715 case L2CAP_CONF_RFC:
3716 if (olen == sizeof(rfc))
3717 memcpy(&rfc, (void *)val, olen);
3718
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003719 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3720 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003721 return -ECONNREFUSED;
3722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003723 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003724
3725 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3726 sizeof(rfc), (unsigned long) &rfc);
3727 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003728
3729 case L2CAP_CONF_EXT_WINDOW:
3730 pi->tx_win = val;
3731
3732 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3733 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3734
3735 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3736 2, pi->tx_win);
3737 break;
3738
3739 default:
3740 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003741 }
3742 }
3743
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003744 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003745 return -ECONNREFUSED;
3746
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003747 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003748
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003749 if (*result == L2CAP_CONF_SUCCESS) {
3750 switch (rfc.mode) {
3751 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003752 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3753 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3754 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003755 break;
3756 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003757 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003758 }
3759 }
3760
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003761 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003762 req->flags = cpu_to_le16(0x0000);
3763
3764 return ptr - data;
3765}
3766
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003767static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003768{
3769 struct l2cap_conf_rsp *rsp = data;
3770 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003772 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003774 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003775 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003776 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003777
3778 return ptr - data;
3779}
3780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003781static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003782{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003783 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003784 int type, olen;
3785 unsigned long val;
3786 struct l2cap_conf_rfc rfc;
3787
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003788 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003789
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003790 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003791 return;
3792
3793 while (len >= L2CAP_CONF_OPT_SIZE) {
3794 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3795
3796 switch (type) {
3797 case L2CAP_CONF_RFC:
3798 if (olen == sizeof(rfc))
3799 memcpy(&rfc, (void *)val, olen);
3800 goto done;
3801 }
3802 }
3803
3804done:
3805 switch (rfc.mode) {
3806 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003807 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3808 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3809 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003810 break;
3811 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003812 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003813 }
3814}
3815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003816static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3817{
3818 struct l2cap_pinfo *pi = l2cap_pi(sk);
3819 int type, olen;
3820 unsigned long val;
3821 struct l2cap_conf_ext_fs fs;
3822
3823 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3824
3825 while (len >= L2CAP_CONF_OPT_SIZE) {
3826 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3827 if ((type == L2CAP_CONF_EXT_FS) &&
3828 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3829 memcpy(&fs, (void *)val, olen);
3830 pi->local_fs.id = fs.id;
3831 pi->local_fs.type = fs.type;
3832 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3833 pi->local_fs.sdu_arr_time =
3834 le32_to_cpu(fs.sdu_arr_time);
3835 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3836 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3837 break;
3838 }
3839 }
3840
3841}
3842
3843static int l2cap_finish_amp_move(struct sock *sk)
3844{
3845 struct l2cap_pinfo *pi;
3846 int err;
3847
3848 BT_DBG("sk %p", sk);
3849
3850 pi = l2cap_pi(sk);
3851
3852 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3853 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3854
3855 if (pi->ampcon)
3856 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3857 else
3858 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3859
3860 err = l2cap_setup_resegment(sk);
3861
3862 return err;
3863}
3864
3865static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3866 u16 result)
3867{
3868 int err = 0;
3869 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3870 struct l2cap_pinfo *pi = l2cap_pi(sk);
3871
3872 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3873
3874 if (pi->reconf_state == L2CAP_RECONF_NONE)
3875 return -ECONNREFUSED;
3876
3877 if (result == L2CAP_CONF_SUCCESS) {
3878 while (len >= L2CAP_CONF_OPT_SIZE) {
3879 int type, olen;
3880 unsigned long val;
3881
3882 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3883
3884 if (type == L2CAP_CONF_RFC) {
3885 if (olen == sizeof(rfc))
3886 memcpy(&rfc, (void *)val, olen);
3887 if (rfc.mode != pi->mode &&
3888 rfc.mode != L2CAP_MODE_ERTM) {
3889 err = -ECONNREFUSED;
3890 goto done;
3891 }
3892 break;
3893 }
3894 }
3895 }
3896
3897done:
3898 l2cap_ertm_stop_ack_timer(pi);
3899 l2cap_ertm_stop_retrans_timer(pi);
3900 l2cap_ertm_stop_monitor_timer(pi);
3901
3902 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3903 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3904
3905 /* Respond to poll */
3906 err = l2cap_answer_move_poll(sk);
3907
3908 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3909
3910 /* If moving to BR/EDR, use default timeout defined by
3911 * the spec */
3912 if (pi->amp_move_id == 0)
3913 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3914
3915 if (pi->mode == L2CAP_MODE_ERTM) {
3916 l2cap_ertm_tx(sk, NULL, NULL,
3917 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3918 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3919 }
3920 }
3921
3922 return err;
3923}
3924
3925
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003926static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3927{
3928 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3929
3930 if (rej->reason != 0x0000)
3931 return 0;
3932
3933 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3934 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003935 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003936
3937 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003938 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003939
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003940 l2cap_conn_start(conn);
3941 }
3942
3943 return 0;
3944}
3945
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003946static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
3947 struct l2cap_cmd_hdr *cmd,
3948 u8 *data, u8 rsp_code,
3949 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003950{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003951 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003952 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3953 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04003954 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003955 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003956
3957 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003958 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003959
3960 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
3961
3962 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003963 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
3964 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003965 result = L2CAP_CR_BAD_PSM;
3966 goto sendresp;
3967 }
3968
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003969 bh_lock_sock(parent);
3970
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003971 /* Check if the ACL is secure enough (if not SDP) */
3972 if (psm != cpu_to_le16(0x0001) &&
3973 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01003974 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003975 result = L2CAP_CR_SEC_BLOCK;
3976 goto response;
3977 }
3978
Linus Torvalds1da177e2005-04-16 15:20:36 -07003979 result = L2CAP_CR_NO_MEM;
3980
3981 /* Check for backlog size */
3982 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003983 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 goto response;
3985 }
3986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003987 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
3988 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 goto response;
3990
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003991 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003992
3993 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003994 if (__l2cap_get_chan_by_dcid(list, scid)) {
3995 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003996 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003997 l2cap_sock_kill(sk);
3998 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003999 goto response;
4000 }
4001
4002 hci_conn_hold(conn->hcon);
4003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004004 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004005 bacpy(&bt_sk(sk)->src, conn->src);
4006 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004007 l2cap_pi(sk)->psm = psm;
4008 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004009
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004010 bt_accept_enqueue(parent, sk);
4011
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004012 __l2cap_chan_add(conn, sk);
4013 dcid = l2cap_pi(sk)->scid;
4014 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004015
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004016 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004017
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004018 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004019
Marcel Holtmann984947d2009-02-06 23:35:19 +01004020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004021 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004022 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004023 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004024 result = L2CAP_CR_PEND;
4025 status = L2CAP_CS_AUTHOR_PEND;
4026 parent->sk_data_ready(parent, 0);
4027 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004028 /* Force pending result for AMP controllers.
4029 * The connection will succeed after the
4030 * physical link is up. */
4031 if (amp_id) {
4032 sk->sk_state = BT_CONNECT2;
4033 result = L2CAP_CR_PEND;
4034 } else {
4035 sk->sk_state = BT_CONFIG;
4036 result = L2CAP_CR_SUCCESS;
4037 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004038 status = L2CAP_CS_NO_INFO;
4039 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004040 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004041 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004042 result = L2CAP_CR_PEND;
4043 status = L2CAP_CS_AUTHEN_PEND;
4044 }
4045 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004046 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004047 result = L2CAP_CR_PEND;
4048 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004049 }
4050
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004052
4053response:
4054 bh_unlock_sock(parent);
4055
4056sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004057 rsp.scid = cpu_to_le16(scid);
4058 rsp.dcid = cpu_to_le16(dcid);
4059 rsp.result = cpu_to_le16(result);
4060 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004061 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004063 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004064 struct l2cap_info_req info;
4065 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4066
4067 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4068 conn->info_ident = l2cap_get_ident(conn);
4069
4070 mod_timer(&conn->info_timer, jiffies +
4071 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4072
4073 l2cap_send_cmd(conn, conn->info_ident,
4074 L2CAP_INFO_REQ, sizeof(info), &info);
4075 }
4076
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004077 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004078 result == L2CAP_CR_SUCCESS) {
4079 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004080 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004081 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004082 l2cap_build_conf_req(sk, buf), buf);
4083 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004084 }
4085
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004086 return sk;
4087}
4088
4089static inline int l2cap_connect_req(struct l2cap_conn *conn,
4090 struct l2cap_cmd_hdr *cmd, u8 *data)
4091{
4092 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004093 return 0;
4094}
4095
4096static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4097{
4098 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4099 u16 scid, dcid, result, status;
4100 struct sock *sk;
4101 u8 req[128];
4102
4103 scid = __le16_to_cpu(rsp->scid);
4104 dcid = __le16_to_cpu(rsp->dcid);
4105 result = __le16_to_cpu(rsp->result);
4106 status = __le16_to_cpu(rsp->status);
4107
4108 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4109
4110 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004111 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4112 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004113 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004114 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004115 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4116 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004117 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004118 }
4119
4120 switch (result) {
4121 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004122 sk->sk_state = BT_CONFIG;
4123 l2cap_pi(sk)->ident = 0;
4124 l2cap_pi(sk)->dcid = dcid;
4125 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004127 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004128 break;
4129
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004130 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4131
Linus Torvalds1da177e2005-04-16 15:20:36 -07004132 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004133 l2cap_build_conf_req(sk, req), req);
4134 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004135 break;
4136
4137 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004138 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004139 break;
4140
4141 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004142 /* don't delete l2cap channel if sk is owned by user */
4143 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004144 sk->sk_state = BT_DISCONN;
4145 l2cap_sock_clear_timer(sk);
4146 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004147 break;
4148 }
4149
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004150 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004151 break;
4152 }
4153
4154 bh_unlock_sock(sk);
4155 return 0;
4156}
4157
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004158static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004159{
4160 /* FCS is enabled only in ERTM or streaming mode, if one or both
4161 * sides request it.
4162 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004163 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4164 pi->fcs = L2CAP_FCS_NONE;
4165 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4166 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004167}
4168
Al Viro88219a02007-07-29 00:17:25 -07004169static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004170{
4171 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4172 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004173 u8 rspbuf[64];
4174 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004176 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004177 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004178
4179 dcid = __le16_to_cpu(req->dcid);
4180 flags = __le16_to_cpu(req->flags);
4181
4182 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004184 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4185 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004186 return -ENOENT;
4187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004188 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4189 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4190 sk->sk_state, l2cap_pi(sk)->rx_state,
4191 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4192 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004193
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004194 /* Detect a reconfig request due to channel move between
4195 * BR/EDR and AMP
4196 */
4197 if (sk->sk_state == BT_CONNECTED &&
4198 l2cap_pi(sk)->rx_state ==
4199 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4200 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4201
4202 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4203 amp_move_reconf = 1;
4204
4205 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004206 struct l2cap_cmd_rej rej;
4207
4208 rej.reason = cpu_to_le16(0x0002);
4209 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4210 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004211 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004212 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004213
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004214 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004215 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004216 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004217 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004218 l2cap_build_conf_rsp(sk, rspbuf,
4219 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004220 goto unlock;
4221 }
4222
4223 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004224 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4225 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004226
4227 if (flags & 0x0001) {
4228 /* Incomplete config. Send empty response. */
4229 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004230 l2cap_build_conf_rsp(sk, rspbuf,
4231 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004232 goto unlock;
4233 }
4234
4235 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004236 if (!amp_move_reconf)
4237 len = l2cap_parse_conf_req(sk, rspbuf);
4238 else
4239 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4240
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004241 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004242 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004243 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004246 l2cap_pi(sk)->conf_ident = cmd->ident;
4247 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4248
4249 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4250 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4251 !l2cap_pi(sk)->amp_id) {
4252 /* Send success response right after pending if using
4253 * lockstep config on BR/EDR
4254 */
4255 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4256 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4257 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4258 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004259
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004260 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004261 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004262
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004263 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004264 goto unlock;
4265
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004266 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004268 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4269 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004271 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4272 set_default_fcs(l2cap_pi(sk));
4273
4274 sk->sk_state = BT_CONNECTED;
4275
4276 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4277 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4278 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004279
Linus Torvalds1da177e2005-04-16 15:20:36 -07004280 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004281 goto unlock;
4282 }
4283
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004284 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004285 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004286 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004287 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004288 l2cap_build_conf_req(sk, buf), buf);
4289 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004290 }
4291
4292unlock:
4293 bh_unlock_sock(sk);
4294 return 0;
4295}
4296
4297static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4298{
4299 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4300 u16 scid, flags, result;
4301 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004302 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004303 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004304
4305 scid = __le16_to_cpu(rsp->scid);
4306 flags = __le16_to_cpu(rsp->flags);
4307 result = __le16_to_cpu(rsp->result);
4308
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004309 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4310 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004311
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004312 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4313 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004314 return 0;
4315
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004316 pi = l2cap_pi(sk);
4317
4318 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4319 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4320 goto done;
4321 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004322
Linus Torvalds1da177e2005-04-16 15:20:36 -07004323 switch (result) {
4324 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004325 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4326 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4327 /* Lockstep procedure requires a pending response
4328 * before success.
4329 */
4330 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4331 goto done;
4332 }
4333
4334 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004335 break;
4336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004337 case L2CAP_CONF_PENDING:
4338 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4339 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4340 goto done;
4341 }
4342
4343 l2cap_conf_rfc_get(sk, rsp->data, len);
4344
4345 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4346
Peter Krystadf453bb32011-07-19 17:23:34 -07004347 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004348
4349 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4350 struct hci_chan *chan;
4351
4352 /* Already sent a 'pending' response, so set up
4353 * the logical link now
4354 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004355 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004356 if (!chan) {
4357 l2cap_send_disconn_req(pi->conn, sk,
4358 ECONNRESET);
4359 goto done;
4360 }
4361
4362 chan->l2cap_sk = sk;
4363 if (chan->state == BT_CONNECTED)
4364 l2cap_create_cfm(chan, 0);
4365 }
4366
4367 goto done;
4368
Linus Torvalds1da177e2005-04-16 15:20:36 -07004369 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004370 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004371 char req[64];
4372
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004373 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004374 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004375 goto done;
4376 }
4377
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004378 /* throw out any old stored conf requests */
4379 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004380 len = l2cap_parse_conf_rsp(sk, rsp->data,
4381 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004382 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004383 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004384 goto done;
4385 }
4386
4387 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4388 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004389 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004390 if (result != L2CAP_CONF_SUCCESS)
4391 goto done;
4392 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004393 }
4394
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004395 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004396 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004397 l2cap_sock_set_timer(sk, HZ * 5);
4398 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004399 goto done;
4400 }
4401
4402 if (flags & 0x01)
4403 goto done;
4404
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004405 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004406
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004407 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4408 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004410 sk->sk_state = BT_CONNECTED;
4411
4412 if (pi->mode == L2CAP_MODE_ERTM ||
4413 pi->mode == L2CAP_MODE_STREAMING)
4414 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004415
Linus Torvalds1da177e2005-04-16 15:20:36 -07004416 l2cap_chan_ready(sk);
4417 }
4418
4419done:
4420 bh_unlock_sock(sk);
4421 return 0;
4422}
4423
4424static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4425{
4426 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4427 struct l2cap_disconn_rsp rsp;
4428 u16 dcid, scid;
4429 struct sock *sk;
4430
4431 scid = __le16_to_cpu(req->scid);
4432 dcid = __le16_to_cpu(req->dcid);
4433
4434 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4435
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004436 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4437 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004438 return 0;
4439
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004440 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4441 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004442 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4443
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004444 /* Only do cleanup if a disconnect request was not sent already */
4445 if (sk->sk_state != BT_DISCONN) {
4446 sk->sk_shutdown = SHUTDOWN_MASK;
4447
4448 skb_queue_purge(TX_QUEUE(sk));
4449
4450 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4451 skb_queue_purge(SREJ_QUEUE(sk));
4452
4453 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4454 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4455 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4456 }
4457 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004458
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004459 /* don't delete l2cap channel if sk is owned by user */
4460 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004461 sk->sk_state = BT_DISCONN;
4462 l2cap_sock_clear_timer(sk);
4463 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004464 bh_unlock_sock(sk);
4465 return 0;
4466 }
4467
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004468 l2cap_chan_del(sk, ECONNRESET);
4469
Linus Torvalds1da177e2005-04-16 15:20:36 -07004470 bh_unlock_sock(sk);
4471
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004472 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004473 return 0;
4474}
4475
4476static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4477{
4478 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4479 u16 dcid, scid;
4480 struct sock *sk;
4481
4482 scid = __le16_to_cpu(rsp->scid);
4483 dcid = __le16_to_cpu(rsp->dcid);
4484
4485 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4486
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004487 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4488 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004489 return 0;
4490
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004491 /* don't delete l2cap channel if sk is owned by user */
4492 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004493 sk->sk_state = BT_DISCONN;
4494 l2cap_sock_clear_timer(sk);
4495 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004496 bh_unlock_sock(sk);
4497 return 0;
4498 }
4499
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004500 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004501 bh_unlock_sock(sk);
4502
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004503 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004504 return 0;
4505}
4506
4507static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4508{
4509 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004510 u16 type;
4511
4512 type = __le16_to_cpu(req->type);
4513
4514 BT_DBG("type 0x%4.4x", type);
4515
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004516 if (type == L2CAP_IT_FEAT_MASK) {
4517 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004518 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004519 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4520 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4521 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004522 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004523 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004524 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004525 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004526 l2cap_send_cmd(conn, cmd->ident,
4527 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004528 } else if (type == L2CAP_IT_FIXED_CHAN) {
4529 u8 buf[12];
4530 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4531 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4532 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4533 memcpy(buf + 4, l2cap_fixed_chan, 8);
4534 l2cap_send_cmd(conn, cmd->ident,
4535 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004536 } else {
4537 struct l2cap_info_rsp rsp;
4538 rsp.type = cpu_to_le16(type);
4539 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4540 l2cap_send_cmd(conn, cmd->ident,
4541 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4542 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004543
4544 return 0;
4545}
4546
4547static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4548{
4549 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4550 u16 type, result;
4551
4552 type = __le16_to_cpu(rsp->type);
4553 result = __le16_to_cpu(rsp->result);
4554
4555 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4556
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004557 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4558 if (cmd->ident != conn->info_ident ||
4559 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4560 return 0;
4561
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004562 del_timer(&conn->info_timer);
4563
Ville Tervoadb08ed2010-08-04 09:43:33 +03004564 if (result != L2CAP_IR_SUCCESS) {
4565 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4566 conn->info_ident = 0;
4567
4568 l2cap_conn_start(conn);
4569
4570 return 0;
4571 }
4572
Marcel Holtmann984947d2009-02-06 23:35:19 +01004573 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004574 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004575
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004576 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004577 struct l2cap_info_req req;
4578 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4579
4580 conn->info_ident = l2cap_get_ident(conn);
4581
4582 l2cap_send_cmd(conn, conn->info_ident,
4583 L2CAP_INFO_REQ, sizeof(req), &req);
4584 } else {
4585 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4586 conn->info_ident = 0;
4587
4588 l2cap_conn_start(conn);
4589 }
4590 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004591 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004592 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004593 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004594
4595 l2cap_conn_start(conn);
4596 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004597
Linus Torvalds1da177e2005-04-16 15:20:36 -07004598 return 0;
4599}
4600
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004601static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4602 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4603{
4604 struct l2cap_move_chan_req req;
4605 u8 ident;
4606
4607 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4608 (int) dest_amp_id);
4609
4610 ident = l2cap_get_ident(conn);
4611 if (pi)
4612 pi->ident = ident;
4613
4614 req.icid = cpu_to_le16(icid);
4615 req.dest_amp_id = dest_amp_id;
4616
4617 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4618}
4619
4620static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4621 u16 icid, u16 result)
4622{
4623 struct l2cap_move_chan_rsp rsp;
4624
4625 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4626
4627 rsp.icid = cpu_to_le16(icid);
4628 rsp.result = cpu_to_le16(result);
4629
4630 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4631}
4632
4633static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4634 struct l2cap_pinfo *pi, u16 icid, u16 result)
4635{
4636 struct l2cap_move_chan_cfm cfm;
4637 u8 ident;
4638
4639 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4640
4641 ident = l2cap_get_ident(conn);
4642 if (pi)
4643 pi->ident = ident;
4644
4645 cfm.icid = cpu_to_le16(icid);
4646 cfm.result = cpu_to_le16(result);
4647
4648 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4649}
4650
4651static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4652 u16 icid)
4653{
4654 struct l2cap_move_chan_cfm_rsp rsp;
4655
4656 BT_DBG("icid %d", (int) icid);
4657
4658 rsp.icid = cpu_to_le16(icid);
4659 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4660}
4661
4662static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4663 struct l2cap_cmd_hdr *cmd, u8 *data)
4664{
4665 struct l2cap_create_chan_req *req =
4666 (struct l2cap_create_chan_req *) data;
4667 struct sock *sk;
4668 u16 psm, scid;
4669
4670 psm = le16_to_cpu(req->psm);
4671 scid = le16_to_cpu(req->scid);
4672
4673 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4674 (int) req->amp_id);
4675
4676 if (req->amp_id) {
4677 struct hci_dev *hdev;
4678
4679 /* Validate AMP controller id */
4680 hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
4681 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4682 struct l2cap_create_chan_rsp rsp;
4683
4684 rsp.dcid = 0;
4685 rsp.scid = cpu_to_le16(scid);
4686 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4687 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4688
4689 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4690 sizeof(rsp), &rsp);
4691
4692 if (hdev)
4693 hci_dev_put(hdev);
4694
4695 return 0;
4696 }
4697
4698 hci_dev_put(hdev);
4699 }
4700
4701 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4702 req->amp_id);
4703
4704 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
4705
4706 if (sk && req->amp_id)
4707 amp_accept_physical(conn, req->amp_id, sk);
4708
4709 return 0;
4710}
4711
4712static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4713 struct l2cap_cmd_hdr *cmd, u8 *data)
4714{
4715 BT_DBG("conn %p", conn);
4716
4717 return l2cap_connect_rsp(conn, cmd, data);
4718}
4719
4720static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4721 struct l2cap_cmd_hdr *cmd, u8 *data)
4722{
4723 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4724 struct sock *sk;
4725 struct l2cap_pinfo *pi;
4726 u16 icid = 0;
4727 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4728
4729 icid = le16_to_cpu(req->icid);
4730
4731 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4732
4733 read_lock(&conn->chan_list.lock);
4734 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4735 read_unlock(&conn->chan_list.lock);
4736
4737 if (!sk)
4738 goto send_move_response;
4739
4740 lock_sock(sk);
4741 pi = l2cap_pi(sk);
4742
4743 if (pi->scid < L2CAP_CID_DYN_START ||
4744 (pi->mode != L2CAP_MODE_ERTM &&
4745 pi->mode != L2CAP_MODE_STREAMING)) {
4746 goto send_move_response;
4747 }
4748
4749 if (pi->amp_id == req->dest_amp_id) {
4750 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4751 goto send_move_response;
4752 }
4753
4754 if (req->dest_amp_id) {
4755 struct hci_dev *hdev;
4756 hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
4757 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4758 if (hdev)
4759 hci_dev_put(hdev);
4760
4761 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4762 goto send_move_response;
4763 }
4764 }
4765
4766 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4767 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4768 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4769 bacmp(conn->src, conn->dst) > 0) {
4770 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4771 goto send_move_response;
4772 }
4773
4774 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4775 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4776 goto send_move_response;
4777 }
4778
4779 pi->amp_move_cmd_ident = cmd->ident;
4780 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4781 l2cap_amp_move_setup(sk);
4782 pi->amp_move_id = req->dest_amp_id;
4783 icid = pi->dcid;
4784
4785 if (req->dest_amp_id == 0) {
4786 /* Moving to BR/EDR */
4787 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4788 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4789 result = L2CAP_MOVE_CHAN_PENDING;
4790 } else {
4791 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4792 result = L2CAP_MOVE_CHAN_SUCCESS;
4793 }
4794 } else {
4795 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4796 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4797 result = L2CAP_MOVE_CHAN_PENDING;
4798 }
4799
4800send_move_response:
4801 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4802
4803 if (sk)
4804 release_sock(sk);
4805
4806 return 0;
4807}
4808
4809static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4810 struct l2cap_cmd_hdr *cmd, u8 *data)
4811{
4812 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4813 struct sock *sk;
4814 struct l2cap_pinfo *pi;
4815 u16 icid, result;
4816
4817 icid = le16_to_cpu(rsp->icid);
4818 result = le16_to_cpu(rsp->result);
4819
4820 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4821
4822 switch (result) {
4823 case L2CAP_MOVE_CHAN_SUCCESS:
4824 case L2CAP_MOVE_CHAN_PENDING:
4825 read_lock(&conn->chan_list.lock);
4826 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4827 read_unlock(&conn->chan_list.lock);
4828
4829 if (!sk) {
4830 l2cap_send_move_chan_cfm(conn, NULL, icid,
4831 L2CAP_MOVE_CHAN_UNCONFIRMED);
4832 break;
4833 }
4834
4835 lock_sock(sk);
4836 pi = l2cap_pi(sk);
4837
4838 l2cap_sock_clear_timer(sk);
4839 if (result == L2CAP_MOVE_CHAN_PENDING)
4840 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4841
4842 if (pi->amp_move_state ==
4843 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4844 /* Move confirm will be sent when logical link
4845 * is complete.
4846 */
4847 pi->amp_move_state =
4848 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4849 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4850 pi->amp_move_state ==
4851 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4852 /* Logical link is up or moving to BR/EDR,
4853 * proceed with move */
4854 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4855 pi->amp_move_state =
4856 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4857 } else {
4858 pi->amp_move_state =
4859 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4860 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4861 L2CAP_MOVE_CHAN_CONFIRMED);
4862 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4863 }
4864 } else if (pi->amp_move_state ==
4865 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4866 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4867 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4868 struct hci_chan *chan;
4869 /* Moving to AMP */
4870 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4871 /* Remote is ready, send confirm immediately
4872 * after logical link is ready
4873 */
4874 pi->amp_move_state =
4875 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4876 } else {
4877 /* Both logical link and move success
4878 * are required to confirm
4879 */
4880 pi->amp_move_state =
4881 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4882 }
4883 pi->remote_fs = default_fs;
4884 pi->local_fs = default_fs;
4885 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4886 if (!chan) {
4887 /* Logical link not available */
4888 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4889 L2CAP_MOVE_CHAN_UNCONFIRMED);
4890 break;
4891 }
4892 if (chan->state == BT_CONNECTED) {
4893 /* Logical link is already ready to go */
4894 pi->ampchan = chan;
4895 pi->ampcon = chan->conn;
4896 pi->ampcon->l2cap_data = pi->conn;
4897 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4898 /* Can confirm now */
4899 l2cap_send_move_chan_cfm(conn, pi,
4900 pi->scid,
4901 L2CAP_MOVE_CHAN_CONFIRMED);
4902 } else {
4903 /* Now only need move success
4904 * required to confirm
4905 */
4906 pi->amp_move_state =
4907 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4908 }
4909 } else
4910 chan->l2cap_sk = sk;
4911 } else {
4912 /* Any other amp move state means the move failed. */
4913 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4914 L2CAP_MOVE_CHAN_UNCONFIRMED);
4915 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4916 }
4917 break;
4918 default:
4919 /* Failed (including collision case) */
4920 read_lock(&conn->chan_list.lock);
4921 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4922 read_unlock(&conn->chan_list.lock);
4923
4924 if (!sk) {
4925 /* Could not locate channel, icid is best guess */
4926 l2cap_send_move_chan_cfm(conn, NULL, icid,
4927 L2CAP_MOVE_CHAN_UNCONFIRMED);
4928 break;
4929 }
4930
4931 lock_sock(sk);
4932 pi = l2cap_pi(sk);
4933
4934 l2cap_sock_clear_timer(sk);
4935
4936 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
4937 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
4938 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4939 else {
4940 /* Cleanup - cancel move */
4941 pi->amp_move_id = pi->amp_id;
4942 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4943 l2cap_amp_move_revert(sk);
4944 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
4945 }
4946 } else {
4947 /* State is STABLE so the confirm response is
4948 * ignored.
4949 */
4950 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4951 }
4952
4953 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4954 L2CAP_MOVE_CHAN_UNCONFIRMED);
4955 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4956 break;
4957 }
4958
4959 if (sk)
4960 release_sock(sk);
4961
4962 return 0;
4963}
4964
4965static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4966 struct l2cap_cmd_hdr *cmd, u8 *data)
4967{
4968 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
4969 struct sock *sk;
4970 u16 icid, result;
4971
4972 icid = le16_to_cpu(cfm->icid);
4973 result = le16_to_cpu(cfm->result);
4974
4975 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4976
4977 read_lock(&conn->chan_list.lock);
4978 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4979 read_unlock(&conn->chan_list.lock);
4980
4981 if (!sk) {
4982 BT_DBG("Bad channel (%d)", (int) icid);
4983 goto send_move_confirm_response;
4984 }
4985
4986 lock_sock(sk);
4987
4988 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
4989 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
4990 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
4991 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
4992 if ((!l2cap_pi(sk)->amp_id) &&
4993 (l2cap_pi(sk)->ampchan)) {
4994 /* Have moved off of AMP, free the channel */
4995 hci_chan_put(l2cap_pi(sk)->ampchan);
4996 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
4997 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
4998 l2cap_pi(sk));
4999 l2cap_pi(sk)->ampchan = NULL;
5000 l2cap_pi(sk)->ampcon = NULL;
5001 }
5002 l2cap_amp_move_success(sk);
5003 } else {
5004 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5005 l2cap_amp_move_revert(sk);
5006 }
5007 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5008 } else if (l2cap_pi(sk)->amp_move_state ==
5009 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5010 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5011 }
5012
5013send_move_confirm_response:
5014 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5015
5016 if (sk)
5017 release_sock(sk);
5018
5019 return 0;
5020}
5021
5022static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5023 struct l2cap_cmd_hdr *cmd, u8 *data)
5024{
5025 struct l2cap_move_chan_cfm_rsp *rsp =
5026 (struct l2cap_move_chan_cfm_rsp *) data;
5027 struct sock *sk;
5028 u16 icid;
5029
5030 icid = le16_to_cpu(rsp->icid);
5031
5032 BT_DBG("icid %d", (int) icid);
5033
5034 read_lock(&conn->chan_list.lock);
5035 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5036 read_unlock(&conn->chan_list.lock);
5037
5038 if (!sk)
5039 return 0;
5040
5041 lock_sock(sk);
5042
5043 l2cap_sock_clear_timer(sk);
5044
5045 if (l2cap_pi(sk)->amp_move_state ==
5046 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5047 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5048 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5049
5050 if (!l2cap_pi(sk)->amp_id) {
5051 /* Have moved off of AMP, free the channel */
5052 l2cap_pi(sk)->ampcon = NULL;
5053 if (l2cap_pi(sk)->ampchan) {
5054 hci_chan_put(l2cap_pi(sk)->ampchan);
5055 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5056 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5057 l2cap_pi(sk));
5058 }
5059 l2cap_pi(sk)->ampchan = NULL;
5060 }
5061
5062 l2cap_amp_move_success(sk);
5063
5064 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5065 }
5066
5067 release_sock(sk);
5068
5069 return 0;
5070}
5071
5072static void l2cap_amp_signal_worker(struct work_struct *work)
5073{
5074 int err = 0;
5075 struct l2cap_amp_signal_work *ampwork =
5076 container_of(work, struct l2cap_amp_signal_work, work);
5077
5078 switch (ampwork->cmd.code) {
5079 case L2CAP_MOVE_CHAN_REQ:
5080 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5081 ampwork->data);
5082 break;
5083
5084 case L2CAP_MOVE_CHAN_RSP:
5085 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5086 ampwork->data);
5087 break;
5088
5089 case L2CAP_MOVE_CHAN_CFM:
5090 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5091 ampwork->data);
5092 break;
5093
5094 case L2CAP_MOVE_CHAN_CFM_RSP:
5095 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5096 &ampwork->cmd, ampwork->data);
5097 break;
5098
5099 default:
5100 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5101 err = -EINVAL;
5102 break;
5103 }
5104
5105 if (err) {
5106 struct l2cap_cmd_rej rej;
5107 BT_DBG("error %d", err);
5108
5109 /* In this context, commands are only rejected with
5110 * "command not understood", code 0.
5111 */
5112 rej.reason = cpu_to_le16(0);
5113 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5114 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5115 }
5116
5117 kfree_skb(ampwork->skb);
5118 kfree(ampwork);
5119}
5120
5121void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5122 struct sock *sk)
5123{
5124 struct l2cap_pinfo *pi;
5125
5126 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5127 (int) local_id, (int) remote_id, sk);
5128
5129 lock_sock(sk);
5130
5131 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5132 release_sock(sk);
5133 return;
5134 }
5135
5136 pi = l2cap_pi(sk);
5137
5138 if (sk->sk_state != BT_CONNECTED) {
5139 if (bt_sk(sk)->parent) {
5140 struct l2cap_conn_rsp rsp;
5141 char buf[128];
5142 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5143 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5144
5145 /* Incoming channel on AMP */
5146 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5147 /* Send successful response */
5148 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5149 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5150 } else {
5151 /* Send negative response */
5152 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5153 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5154 }
5155
5156 l2cap_send_cmd(pi->conn, pi->ident,
5157 L2CAP_CREATE_CHAN_RSP,
5158 sizeof(rsp), &rsp);
5159
5160 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5161 sk->sk_state = BT_CONFIG;
5162 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5163 l2cap_send_cmd(pi->conn,
5164 l2cap_get_ident(pi->conn),
5165 L2CAP_CONF_REQ,
5166 l2cap_build_conf_req(sk, buf), buf);
5167 l2cap_pi(sk)->num_conf_req++;
5168 }
5169 } else {
5170 /* Outgoing channel on AMP */
5171 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5172 /* Revert to BR/EDR connect */
5173 l2cap_send_conn_req(sk);
5174 } else {
5175 pi->amp_id = local_id;
5176 l2cap_send_create_chan_req(sk, remote_id);
5177 }
5178 }
5179 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5180 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5181 l2cap_amp_move_setup(sk);
5182 pi->amp_move_id = local_id;
5183 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5184
5185 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5186 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5187 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5188 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5189 struct hci_chan *chan;
5190 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5191 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5192 pi->remote_fs = default_fs;
5193 pi->local_fs = default_fs;
5194 chan = l2cap_chan_admit(local_id, pi);
5195 if (chan) {
5196 if (chan->state == BT_CONNECTED) {
5197 /* Logical link is ready to go */
5198 pi->ampchan = chan;
5199 pi->ampcon = chan->conn;
5200 pi->ampcon->l2cap_data = pi->conn;
5201 pi->amp_move_state =
5202 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5203 l2cap_send_move_chan_rsp(pi->conn,
5204 pi->amp_move_cmd_ident, pi->dcid,
5205 L2CAP_MOVE_CHAN_SUCCESS);
5206 } else {
5207 /* Wait for logical link to be ready */
5208 chan->l2cap_sk = sk;
5209 pi->amp_move_state =
5210 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5211 }
5212 } else {
5213 /* Logical link not available */
5214 l2cap_send_move_chan_rsp(pi->conn,
5215 pi->amp_move_cmd_ident, pi->dcid,
5216 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5217 }
5218 } else {
5219 BT_DBG("result %d, role %d, local_busy %d", result,
5220 (int) pi->amp_move_role,
5221 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5222
5223 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5224 if (result == -EINVAL)
5225 l2cap_send_move_chan_rsp(pi->conn,
5226 pi->amp_move_cmd_ident, pi->dcid,
5227 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5228 else
5229 l2cap_send_move_chan_rsp(pi->conn,
5230 pi->amp_move_cmd_ident, pi->dcid,
5231 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5232 }
5233
5234 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5235 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5236
5237 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5238 l2cap_rmem_available(sk))
5239 l2cap_ertm_tx(sk, 0, 0,
5240 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5241
5242 /* Restart data transmission */
5243 l2cap_ertm_send(sk);
5244 }
5245
5246 release_sock(sk);
5247}
5248
5249int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5250{
5251 struct l2cap_pinfo *pi;
5252 struct sock *sk;
5253
5254 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5255
5256 sk = chan->l2cap_sk;
5257
5258 BT_DBG("sk %p", sk);
5259
5260 lock_sock(sk);
5261
5262 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5263 release_sock(sk);
5264 return 0;
5265 }
5266
5267 pi = l2cap_pi(sk);
5268
5269 if ((!status) && (chan != NULL)) {
5270 pi->ampchan = chan;
5271 pi->ampcon = chan->conn;
5272 pi->ampcon->l2cap_data = pi->conn;
5273
5274 if (sk->sk_state != BT_CONNECTED) {
5275 struct l2cap_conf_rsp rsp;
5276
5277 /* Must use spinlock to prevent concurrent
5278 * execution of l2cap_config_rsp()
5279 */
5280 bh_lock_sock(sk);
5281 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5282 l2cap_build_conf_rsp(sk, &rsp,
5283 L2CAP_CONF_SUCCESS, 0), &rsp);
5284 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5285
5286 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5287 set_default_fcs(l2cap_pi(sk));
5288
5289 sk->sk_state = BT_CONNECTED;
5290
5291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5292 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5293 l2cap_ertm_init(sk);
5294
5295 l2cap_chan_ready(sk);
5296 }
5297 bh_unlock_sock(sk);
5298 } else if (pi->amp_move_state ==
5299 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5300 /* Move confirm will be sent after a success
5301 * response is received
5302 */
5303 pi->amp_move_state =
5304 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5305 } else if (pi->amp_move_state ==
5306 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5307 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5308 pi->amp_move_state =
5309 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5310 else if (pi->amp_move_role ==
5311 L2CAP_AMP_MOVE_INITIATOR) {
5312 pi->amp_move_state =
5313 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5314 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5315 L2CAP_MOVE_CHAN_SUCCESS);
5316 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5317 } else if (pi->amp_move_role ==
5318 L2CAP_AMP_MOVE_RESPONDER) {
5319 pi->amp_move_state =
5320 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5321 l2cap_send_move_chan_rsp(pi->conn,
5322 pi->amp_move_cmd_ident, pi->dcid,
5323 L2CAP_MOVE_CHAN_SUCCESS);
5324 }
5325 } else {
5326 /* Move was not in expected state, free the
5327 * logical link
5328 */
5329 hci_chan_put(pi->ampchan);
5330 pi->ampcon = NULL;
5331 pi->ampchan = NULL;
5332 }
5333 } else {
5334 /* Logical link setup failed. */
5335
5336 if (sk->sk_state != BT_CONNECTED)
5337 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5338 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5339 l2cap_amp_move_revert(sk);
5340 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5341 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5342 l2cap_send_move_chan_rsp(pi->conn,
5343 pi->amp_move_cmd_ident, pi->dcid,
5344 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5345 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5346 if ((pi->amp_move_state ==
5347 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5348 (pi->amp_move_state ==
5349 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5350 /* Remote has only sent pending or
5351 * success responses, clean up
5352 */
5353 l2cap_amp_move_revert(sk);
5354 l2cap_pi(sk)->amp_move_role =
5355 L2CAP_AMP_MOVE_NONE;
5356 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5357 }
5358
5359 /* Other amp move states imply that the move
5360 * has already aborted
5361 */
5362 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5363 L2CAP_MOVE_CHAN_UNCONFIRMED);
5364 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5365 }
5366
5367 pi->ampcon = NULL;
5368 pi->ampchan = NULL;
5369 }
5370
5371 release_sock(sk);
5372 return 0;
5373}
5374
5375static void l2cap_logical_link_worker(struct work_struct *work)
5376{
5377 struct l2cap_logical_link_work *log_link_work =
5378 container_of(work, struct l2cap_logical_link_work, work);
5379
5380 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5381 kfree(log_link_work);
5382}
5383
5384static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5385{
5386 struct l2cap_logical_link_work *amp_work;
5387
5388 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5389 if (!amp_work)
5390 return -ENOMEM;
5391
5392 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5393 amp_work->chan = chan;
5394 amp_work->status = status;
5395 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5396 kfree(amp_work);
5397 return -ENOMEM;
5398 }
5399
5400 return 0;
5401}
5402
5403int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5404{
5405 struct l2cap_conn *conn = chan->conn->l2cap_data;
5406
5407 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5408
5409 /* TODO: if failed status restore previous fs */
5410 return 0;
5411}
5412
5413int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5414{
5415 struct l2cap_chan_list *l;
5416 struct l2cap_conn *conn = chan->conn->l2cap_data;
5417 struct sock *sk;
5418
5419 BT_DBG("chan %p conn %p", chan, conn);
5420
5421 if (!conn)
5422 return 0;
5423
5424 l = &conn->chan_list;
5425
5426 read_lock(&l->lock);
5427
5428 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5429 bh_lock_sock(sk);
5430 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5431 if (l2cap_pi(sk)->ampchan == chan) {
5432 l2cap_pi(sk)->ampchan = NULL;
5433 l2cap_amp_move_init(sk);
5434 }
5435 bh_unlock_sock(sk);
5436 }
5437
5438 read_unlock(&l->lock);
5439
5440 return 0;
5441
5442
5443}
5444
5445static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5446 u8 *data, struct sk_buff *skb)
5447{
5448 struct l2cap_amp_signal_work *amp_work;
5449
5450 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5451 if (!amp_work)
5452 return -ENOMEM;
5453
5454 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5455 amp_work->conn = conn;
5456 amp_work->cmd = *cmd;
5457 amp_work->data = data;
5458 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5459 if (!amp_work->skb) {
5460 kfree(amp_work);
5461 return -ENOMEM;
5462 }
5463
5464 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5465 kfree_skb(amp_work->skb);
5466 kfree(amp_work);
5467 return -ENOMEM;
5468 }
5469
5470 return 0;
5471}
5472
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005473static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005474 u16 to_multiplier)
5475{
5476 u16 max_latency;
5477
5478 if (min > max || min < 6 || max > 3200)
5479 return -EINVAL;
5480
5481 if (to_multiplier < 10 || to_multiplier > 3200)
5482 return -EINVAL;
5483
5484 if (max >= to_multiplier * 8)
5485 return -EINVAL;
5486
5487 max_latency = (to_multiplier * 8 / max) - 1;
5488 if (latency > 499 || latency > max_latency)
5489 return -EINVAL;
5490
5491 return 0;
5492}
5493
5494static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5495 struct l2cap_cmd_hdr *cmd, u8 *data)
5496{
5497 struct hci_conn *hcon = conn->hcon;
5498 struct l2cap_conn_param_update_req *req;
5499 struct l2cap_conn_param_update_rsp rsp;
5500 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005501 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005502
5503 if (!(hcon->link_mode & HCI_LM_MASTER))
5504 return -EINVAL;
5505
5506 cmd_len = __le16_to_cpu(cmd->len);
5507 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5508 return -EPROTO;
5509
5510 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005511 min = __le16_to_cpu(req->min);
5512 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005513 latency = __le16_to_cpu(req->latency);
5514 to_multiplier = __le16_to_cpu(req->to_multiplier);
5515
5516 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5517 min, max, latency, to_multiplier);
5518
5519 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005520
5521 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5522 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005523 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5524 else
5525 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5526
5527 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5528 sizeof(rsp), &rsp);
5529
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005530 if (!err)
5531 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5532
Claudio Takahaside731152011-02-11 19:28:55 -02005533 return 0;
5534}
5535
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005536static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005537 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5538 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005539{
5540 int err = 0;
5541
5542 switch (cmd->code) {
5543 case L2CAP_COMMAND_REJ:
5544 l2cap_command_rej(conn, cmd, data);
5545 break;
5546
5547 case L2CAP_CONN_REQ:
5548 err = l2cap_connect_req(conn, cmd, data);
5549 break;
5550
5551 case L2CAP_CONN_RSP:
5552 err = l2cap_connect_rsp(conn, cmd, data);
5553 break;
5554
5555 case L2CAP_CONF_REQ:
5556 err = l2cap_config_req(conn, cmd, cmd_len, data);
5557 break;
5558
5559 case L2CAP_CONF_RSP:
5560 err = l2cap_config_rsp(conn, cmd, data);
5561 break;
5562
5563 case L2CAP_DISCONN_REQ:
5564 err = l2cap_disconnect_req(conn, cmd, data);
5565 break;
5566
5567 case L2CAP_DISCONN_RSP:
5568 err = l2cap_disconnect_rsp(conn, cmd, data);
5569 break;
5570
5571 case L2CAP_ECHO_REQ:
5572 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5573 break;
5574
5575 case L2CAP_ECHO_RSP:
5576 break;
5577
5578 case L2CAP_INFO_REQ:
5579 err = l2cap_information_req(conn, cmd, data);
5580 break;
5581
5582 case L2CAP_INFO_RSP:
5583 err = l2cap_information_rsp(conn, cmd, data);
5584 break;
5585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005586 case L2CAP_CREATE_CHAN_REQ:
5587 err = l2cap_create_channel_req(conn, cmd, data);
5588 break;
5589
5590 case L2CAP_CREATE_CHAN_RSP:
5591 err = l2cap_create_channel_rsp(conn, cmd, data);
5592 break;
5593
5594 case L2CAP_MOVE_CHAN_REQ:
5595 case L2CAP_MOVE_CHAN_RSP:
5596 case L2CAP_MOVE_CHAN_CFM:
5597 case L2CAP_MOVE_CHAN_CFM_RSP:
5598 err = l2cap_sig_amp(conn, cmd, data, skb);
5599 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005600 default:
5601 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5602 err = -EINVAL;
5603 break;
5604 }
5605
5606 return err;
5607}
5608
5609static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5610 struct l2cap_cmd_hdr *cmd, u8 *data)
5611{
5612 switch (cmd->code) {
5613 case L2CAP_COMMAND_REJ:
5614 return 0;
5615
5616 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005617 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005618
5619 case L2CAP_CONN_PARAM_UPDATE_RSP:
5620 return 0;
5621
5622 default:
5623 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5624 return -EINVAL;
5625 }
5626}
5627
5628static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5629 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005630{
5631 u8 *data = skb->data;
5632 int len = skb->len;
5633 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005634 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005635
5636 l2cap_raw_recv(conn, skb);
5637
5638 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005639 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005640 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5641 data += L2CAP_CMD_HDR_SIZE;
5642 len -= L2CAP_CMD_HDR_SIZE;
5643
Al Viro88219a02007-07-29 00:17:25 -07005644 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005645
Al Viro88219a02007-07-29 00:17:25 -07005646 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647
Al Viro88219a02007-07-29 00:17:25 -07005648 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649 BT_DBG("corrupted command");
5650 break;
5651 }
5652
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005653 if (conn->hcon->type == LE_LINK)
5654 err = l2cap_le_sig_cmd(conn, &cmd, data);
5655 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005656 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5657 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005658
5659 if (err) {
5660 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005661
5662 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005663
5664 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005665 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005666 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5667 }
5668
Al Viro88219a02007-07-29 00:17:25 -07005669 data += cmd_len;
5670 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005671 }
5672
5673 kfree_skb(skb);
5674}
5675
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005676static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005677{
5678 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005679 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005680
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005681 if (pi->extended_control)
5682 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5683 else
5684 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5685
5686 if (pi->fcs == L2CAP_FCS_CRC16) {
5687 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005688 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5689 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005691 if (our_fcs != rcv_fcs) {
5692 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005693 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005694 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005695 }
5696 return 0;
5697}
5698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005699static void l2cap_ertm_pass_to_tx(struct sock *sk,
5700 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005701{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005702 BT_DBG("sk %p, control %p", sk, control);
5703 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005704}
5705
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005706static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5707 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005708{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005709 BT_DBG("sk %p, control %p", sk, control);
5710 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5711}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005712
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005713static void l2cap_ertm_resend(struct sock *sk)
5714{
5715 struct bt_l2cap_control control;
5716 struct l2cap_pinfo *pi;
5717 struct sk_buff *skb;
5718 struct sk_buff *tx_skb;
5719 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005720
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005721 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005723 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005724
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005725 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5726 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005728 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5729 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5730 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005732 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5733 seq = l2cap_seq_list_pop(&pi->retrans_list);
5734
5735 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5736 if (!skb) {
5737 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5738 (int) seq);
5739 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005740 }
5741
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005742 bt_cb(skb)->retries += 1;
5743 control = bt_cb(skb)->control;
5744
5745 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5746 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5747 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5748 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005749 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005750 }
5751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005752 control.reqseq = pi->buffer_seq;
5753 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5754 control.final = 1;
5755 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5756 } else {
5757 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005758 }
5759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005760 if (skb_cloned(skb)) {
5761 /* Cloned sk_buffs are read-only, so we need a
5762 * writeable copy
5763 */
5764 tx_skb = skb_copy(skb, GFP_ATOMIC);
5765 } else {
5766 tx_skb = skb_clone(skb, GFP_ATOMIC);
5767 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005768
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005769 /* Update skb contents */
5770 if (pi->extended_control) {
5771 put_unaligned_le32(__pack_extended_control(&control),
5772 tx_skb->data + L2CAP_HDR_SIZE);
5773 } else {
5774 put_unaligned_le16(__pack_enhanced_control(&control),
5775 tx_skb->data + L2CAP_HDR_SIZE);
5776 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005777
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005778 if (pi->fcs == L2CAP_FCS_CRC16)
5779 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005780
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005781 tx_skb->sk = sk;
5782 tx_skb->destructor = l2cap_skb_destructor;
5783 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005784
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005785 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005787 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005789 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005790 }
5791}
5792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005793static inline void l2cap_ertm_retransmit(struct sock *sk,
5794 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005795{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005796 BT_DBG("sk %p, control %p", sk, control);
5797
5798 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5799 l2cap_ertm_resend(sk);
5800}
5801
5802static void l2cap_ertm_retransmit_all(struct sock *sk,
5803 struct bt_l2cap_control *control)
5804{
5805 struct l2cap_pinfo *pi;
5806 struct sk_buff *skb;
5807
5808 BT_DBG("sk %p, control %p", sk, control);
5809
5810 pi = l2cap_pi(sk);
5811
5812 if (control->poll)
5813 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5814
5815 l2cap_seq_list_clear(&pi->retrans_list);
5816
5817 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5818 return;
5819
5820 if (pi->unacked_frames) {
5821 skb_queue_walk(TX_QUEUE(sk), skb) {
5822 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5823 skb == sk->sk_send_head)
5824 break;
5825 }
5826
5827 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5828 if (skb == sk->sk_send_head)
5829 break;
5830
5831 l2cap_seq_list_append(&pi->retrans_list,
5832 bt_cb(skb)->control.txseq);
5833 }
5834
5835 l2cap_ertm_resend(sk);
5836 }
5837}
5838
5839static inline void append_skb_frag(struct sk_buff *skb,
5840 struct sk_buff *new_frag, struct sk_buff **last_frag)
5841{
5842 /* skb->len reflects data in skb as well as all fragments
5843 skb->data_len reflects only data in fragments
5844 */
5845 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5846
5847 if (!skb_has_frag_list(skb))
5848 skb_shinfo(skb)->frag_list = new_frag;
5849
5850 new_frag->next = NULL;
5851
5852 (*last_frag)->next = new_frag;
5853 *last_frag = new_frag;
5854
5855 skb->len += new_frag->len;
5856 skb->data_len += new_frag->len;
5857 skb->truesize += new_frag->truesize;
5858}
5859
5860static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5861 struct bt_l2cap_control *control, struct sk_buff *skb)
5862{
5863 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005864 int err = -EINVAL;
5865
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005866 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5867 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005869 if (!control)
5870 return err;
5871
5872 pi = l2cap_pi(sk);
5873
5874 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5875 control->frame_type, control->sar, control->txseq,
5876 control->reqseq, control->final);
5877
5878 switch (control->sar) {
5879 case L2CAP_SAR_UNSEGMENTED:
5880 if (pi->sdu) {
5881 BT_DBG("Unexpected unsegmented PDU during reassembly");
5882 kfree_skb(pi->sdu);
5883 pi->sdu = NULL;
5884 pi->sdu_last_frag = NULL;
5885 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005886 }
5887
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005888 BT_DBG("Unsegmented");
5889 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005890 break;
5891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005892 case L2CAP_SAR_START:
5893 if (pi->sdu) {
5894 BT_DBG("Unexpected start PDU during reassembly");
5895 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005896 }
5897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005898 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005899 skb_pull(skb, 2);
5900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005901 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005902 err = -EMSGSIZE;
5903 break;
5904 }
5905
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005906 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005907 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005908
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005909 pi->sdu = skb;
5910 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005911
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005912 BT_DBG("Start");
5913
5914 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005915 err = 0;
5916 break;
5917
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005918 case L2CAP_SAR_CONTINUE:
5919 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005920 break;
5921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005922 append_skb_frag(pi->sdu, skb,
5923 &pi->sdu_last_frag);
5924 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005926 if (pi->sdu->len >= pi->sdu_len)
5927 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005928
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005929 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5930
5931 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005932 break;
5933
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005934 case L2CAP_SAR_END:
5935 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005936 break;
5937
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005938 append_skb_frag(pi->sdu, skb,
5939 &pi->sdu_last_frag);
5940 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005941
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005942 if (pi->sdu->len != pi->sdu_len)
5943 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005944
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005945 BT_DBG("End, reassembled %d", pi->sdu->len);
5946 /* If the sender used tiny PDUs, the rcv queuing could fail.
5947 * Applications that have issues here should use a larger
5948 * sk_rcvbuf.
5949 */
5950 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03005951
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005952 if (!err) {
5953 /* Reassembly complete */
5954 pi->sdu = NULL;
5955 pi->sdu_last_frag = NULL;
5956 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005957 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005958 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005959
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005960 default:
5961 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005962 break;
5963 }
5964
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005965 if (err) {
5966 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
5967 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
5968 if (pi->sdu) {
5969 kfree_skb(pi->sdu);
5970 pi->sdu = NULL;
5971 }
5972 pi->sdu_last_frag = NULL;
5973 pi->sdu_len = 0;
5974 if (skb)
5975 kfree_skb(skb);
5976 }
5977
5978 /* Update local busy state */
5979 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
5980 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
5981
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005982 return err;
5983}
5984
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005985static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005986{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005987 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005988 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
5989 * until a gap is encountered.
5990 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005992 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005994 BT_DBG("sk %p", sk);
5995 pi = l2cap_pi(sk);
5996
5997 while (l2cap_rmem_available(sk)) {
5998 struct sk_buff *skb;
5999 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6000 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6001
6002 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6003
6004 if (!skb)
6005 break;
6006
6007 skb_unlink(skb, SREJ_QUEUE(sk));
6008 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6009 err = l2cap_ertm_rx_expected_iframe(sk,
6010 &bt_cb(skb)->control, skb);
6011 if (err)
6012 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006013 }
6014
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006015 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6016 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6017 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006018 }
6019
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006020 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006021}
6022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006023static void l2cap_ertm_handle_srej(struct sock *sk,
6024 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006025{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006026 struct l2cap_pinfo *pi;
6027 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006028
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006029 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006031 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006033 if (control->reqseq == pi->next_tx_seq) {
6034 BT_DBG("Invalid reqseq %d, disconnecting",
6035 (int) control->reqseq);
6036 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006037 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006038 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006039
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006040 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006042 if (skb == NULL) {
6043 BT_DBG("Seq %d not available for retransmission",
6044 (int) control->reqseq);
6045 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006046 }
6047
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006048 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6049 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6050 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6051 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006052 }
6053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006054 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006056 if (control->poll) {
6057 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006058
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006059 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6060 l2cap_ertm_retransmit(sk, control);
6061 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006063 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6064 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6065 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006066 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006067 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006068 l2cap_ertm_pass_to_tx_fbit(sk, control);
6069
6070 if (control->final) {
6071 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6072 (pi->srej_save_reqseq == control->reqseq)) {
6073 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6074 } else {
6075 l2cap_ertm_retransmit(sk, control);
6076 }
6077 } else {
6078 l2cap_ertm_retransmit(sk, control);
6079 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6080 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6081 pi->srej_save_reqseq = control->reqseq;
6082 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006083 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006084 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006085}
6086
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006087static void l2cap_ertm_handle_rej(struct sock *sk,
6088 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006089{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006090 struct l2cap_pinfo *pi;
6091 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006092
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006093 BT_DBG("sk %p, control %p", sk, control);
6094
6095 pi = l2cap_pi(sk);
6096
6097 if (control->reqseq == pi->next_tx_seq) {
6098 BT_DBG("Invalid reqseq %d, disconnecting",
6099 (int) control->reqseq);
6100 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6101 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006102 }
6103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006104 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006106 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6107 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6108 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6109 return;
6110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006111
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006112 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6113
6114 l2cap_ertm_pass_to_tx(sk, control);
6115
6116 if (control->final) {
6117 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6118 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6119 else
6120 l2cap_ertm_retransmit_all(sk, control);
6121 } else {
6122 l2cap_ertm_retransmit_all(sk, control);
6123 l2cap_ertm_send(sk);
6124 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6125 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6126 }
6127}
6128
6129static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6130{
6131 struct l2cap_pinfo *pi;
6132
6133 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6134 pi = l2cap_pi(sk);
6135
6136 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6137 (int)pi->expected_tx_seq);
6138
6139 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6140 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6141 /* See notes below regarding "double poll" and
6142 * invalid packets.
6143 */
6144 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6145 BT_DBG("Invalid/Ignore - txseq outside "
6146 "tx window after SREJ sent");
6147 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6148 } else {
6149 BT_DBG("Invalid - bad txseq within tx "
6150 "window after SREJ sent");
6151 return L2CAP_ERTM_TXSEQ_INVALID;
6152 }
6153 }
6154
6155 if (pi->srej_list.head == txseq) {
6156 BT_DBG("Expected SREJ");
6157 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6158 }
6159
6160 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6161 BT_DBG("Duplicate SREJ - txseq already stored");
6162 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6163 }
6164
6165 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6166 BT_DBG("Unexpected SREJ - txseq not requested "
6167 "with SREJ");
6168 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6169 }
6170 }
6171
6172 if (pi->expected_tx_seq == txseq) {
6173 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6174 BT_DBG("Invalid - txseq outside tx window");
6175 return L2CAP_ERTM_TXSEQ_INVALID;
6176 } else {
6177 BT_DBG("Expected");
6178 return L2CAP_ERTM_TXSEQ_EXPECTED;
6179 }
6180 }
6181
6182 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6183 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6184 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6185 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6186 }
6187
6188 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6189 /* A source of invalid packets is a "double poll" condition,
6190 * where delays cause us to send multiple poll packets. If
6191 * the remote stack receives and processes both polls,
6192 * sequence numbers can wrap around in such a way that a
6193 * resent frame has a sequence number that looks like new data
6194 * with a sequence gap. This would trigger an erroneous SREJ
6195 * request.
6196 *
6197 * Fortunately, this is impossible with a tx window that's
6198 * less than half of the maximum sequence number, which allows
6199 * invalid frames to be safely ignored.
6200 *
6201 * With tx window sizes greater than half of the tx window
6202 * maximum, the frame is invalid and cannot be ignored. This
6203 * causes a disconnect.
6204 */
6205
6206 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6207 BT_DBG("Invalid/Ignore - txseq outside tx window");
6208 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6209 } else {
6210 BT_DBG("Invalid - txseq outside tx window");
6211 return L2CAP_ERTM_TXSEQ_INVALID;
6212 }
6213 } else {
6214 BT_DBG("Unexpected - txseq indicates missing frames");
6215 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6216 }
6217}
6218
6219static int l2cap_ertm_rx_state_recv(struct sock *sk,
6220 struct bt_l2cap_control *control,
6221 struct sk_buff *skb, u8 event)
6222{
6223 struct l2cap_pinfo *pi;
6224 int err = 0;
6225 bool skb_in_use = 0;
6226
6227 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6228 (int)event);
6229 pi = l2cap_pi(sk);
6230
6231 switch (event) {
6232 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6233 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6234 case L2CAP_ERTM_TXSEQ_EXPECTED:
6235 l2cap_ertm_pass_to_tx(sk, control);
6236
6237 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6238 BT_DBG("Busy, discarding expected seq %d",
6239 control->txseq);
6240 break;
6241 }
6242
6243 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6244 pi->buffer_seq = pi->expected_tx_seq;
6245 skb_in_use = 1;
6246
6247 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6248 if (err)
6249 break;
6250
6251 if (control->final) {
6252 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6253 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6254 else {
6255 control->final = 0;
6256 l2cap_ertm_retransmit_all(sk, control);
6257 l2cap_ertm_send(sk);
6258 }
6259 }
6260
6261 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6262 l2cap_ertm_send_ack(sk);
6263 break;
6264 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6265 l2cap_ertm_pass_to_tx(sk, control);
6266
6267 /* Can't issue SREJ frames in the local busy state.
6268 * Drop this frame, it will be seen as missing
6269 * when local busy is exited.
6270 */
6271 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6272 BT_DBG("Busy, discarding unexpected seq %d",
6273 control->txseq);
6274 break;
6275 }
6276
6277 /* There was a gap in the sequence, so an SREJ
6278 * must be sent for each missing frame. The
6279 * current frame is stored for later use.
6280 */
6281 skb_queue_tail(SREJ_QUEUE(sk), skb);
6282 skb_in_use = 1;
6283 BT_DBG("Queued %p (queue len %d)", skb,
6284 skb_queue_len(SREJ_QUEUE(sk)));
6285
6286 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6287 l2cap_seq_list_clear(&pi->srej_list);
6288 l2cap_ertm_send_srej(sk, control->txseq);
6289
6290 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6291 break;
6292 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6293 l2cap_ertm_pass_to_tx(sk, control);
6294 break;
6295 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6296 break;
6297 case L2CAP_ERTM_TXSEQ_INVALID:
6298 default:
6299 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6300 ECONNRESET);
6301 break;
6302 }
6303 break;
6304 case L2CAP_ERTM_EVENT_RECV_RR:
6305 l2cap_ertm_pass_to_tx(sk, control);
6306 if (control->final) {
6307 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6308
6309 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6310 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6311 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6312 pi->amp_move_state ==
6313 L2CAP_AMP_STATE_WAIT_PREPARE) {
6314 control->final = 0;
6315 l2cap_ertm_retransmit_all(sk, control);
6316 }
6317
6318 l2cap_ertm_send(sk);
6319 } else if (control->poll) {
6320 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6321 } else {
6322 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6323 pi->unacked_frames)
6324 l2cap_ertm_start_retrans_timer(pi);
6325 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6326 l2cap_ertm_send(sk);
6327 }
6328 break;
6329 case L2CAP_ERTM_EVENT_RECV_RNR:
6330 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6331 l2cap_ertm_pass_to_tx(sk, control);
6332 if (control && control->poll) {
6333 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6334 l2cap_ertm_send_rr_or_rnr(sk, 0);
6335 }
6336 l2cap_ertm_stop_retrans_timer(pi);
6337 l2cap_seq_list_clear(&pi->retrans_list);
6338 break;
6339 case L2CAP_ERTM_EVENT_RECV_REJ:
6340 l2cap_ertm_handle_rej(sk, control);
6341 break;
6342 case L2CAP_ERTM_EVENT_RECV_SREJ:
6343 l2cap_ertm_handle_srej(sk, control);
6344 break;
6345 default:
6346 break;
6347 }
6348
6349 if (skb && !skb_in_use) {
6350 BT_DBG("Freeing %p", skb);
6351 kfree_skb(skb);
6352 }
6353
6354 return err;
6355}
6356
6357static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6358 struct bt_l2cap_control *control,
6359 struct sk_buff *skb, u8 event)
6360{
6361 struct l2cap_pinfo *pi;
6362 int err = 0;
6363 u16 txseq = control->txseq;
6364 bool skb_in_use = 0;
6365
6366 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6367 (int)event);
6368 pi = l2cap_pi(sk);
6369
6370 switch (event) {
6371 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6372 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6373 case L2CAP_ERTM_TXSEQ_EXPECTED:
6374 /* Keep frame for reassembly later */
6375 l2cap_ertm_pass_to_tx(sk, control);
6376 skb_queue_tail(SREJ_QUEUE(sk), skb);
6377 skb_in_use = 1;
6378 BT_DBG("Queued %p (queue len %d)", skb,
6379 skb_queue_len(SREJ_QUEUE(sk)));
6380
6381 pi->expected_tx_seq = __next_seq(txseq, pi);
6382 break;
6383 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6384 l2cap_seq_list_pop(&pi->srej_list);
6385
6386 l2cap_ertm_pass_to_tx(sk, control);
6387 skb_queue_tail(SREJ_QUEUE(sk), skb);
6388 skb_in_use = 1;
6389 BT_DBG("Queued %p (queue len %d)", skb,
6390 skb_queue_len(SREJ_QUEUE(sk)));
6391
6392 err = l2cap_ertm_rx_queued_iframes(sk);
6393 if (err)
6394 break;
6395
6396 break;
6397 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6398 /* Got a frame that can't be reassembled yet.
6399 * Save it for later, and send SREJs to cover
6400 * the missing frames.
6401 */
6402 skb_queue_tail(SREJ_QUEUE(sk), skb);
6403 skb_in_use = 1;
6404 BT_DBG("Queued %p (queue len %d)", skb,
6405 skb_queue_len(SREJ_QUEUE(sk)));
6406
6407 l2cap_ertm_pass_to_tx(sk, control);
6408 l2cap_ertm_send_srej(sk, control->txseq);
6409 break;
6410 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6411 /* This frame was requested with an SREJ, but
6412 * some expected retransmitted frames are
6413 * missing. Request retransmission of missing
6414 * SREJ'd frames.
6415 */
6416 skb_queue_tail(SREJ_QUEUE(sk), skb);
6417 skb_in_use = 1;
6418 BT_DBG("Queued %p (queue len %d)", skb,
6419 skb_queue_len(SREJ_QUEUE(sk)));
6420
6421 l2cap_ertm_pass_to_tx(sk, control);
6422 l2cap_ertm_send_srej_list(sk, control->txseq);
6423 break;
6424 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6425 /* We've already queued this frame. Drop this copy. */
6426 l2cap_ertm_pass_to_tx(sk, control);
6427 break;
6428 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6429 /* Expecting a later sequence number, so this frame
6430 * was already received. Ignore it completely.
6431 */
6432 break;
6433 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6434 break;
6435 case L2CAP_ERTM_TXSEQ_INVALID:
6436 default:
6437 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6438 ECONNRESET);
6439 break;
6440 }
6441 break;
6442 case L2CAP_ERTM_EVENT_RECV_RR:
6443 l2cap_ertm_pass_to_tx(sk, control);
6444 if (control->final) {
6445 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6446
6447 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6448 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6449 else {
6450 control->final = 0;
6451 l2cap_ertm_retransmit_all(sk, control);
6452 }
6453
6454 l2cap_ertm_send(sk);
6455 } else if (control->poll) {
6456 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6457 pi->unacked_frames) {
6458 l2cap_ertm_start_retrans_timer(pi);
6459 }
6460 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6461 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6462 l2cap_ertm_send_srej_tail(sk);
6463 } else {
6464 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6465 pi->unacked_frames) {
6466 l2cap_ertm_start_retrans_timer(pi);
6467 }
6468 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6469 l2cap_ertm_send_ack(sk);
6470 }
6471 break;
6472 case L2CAP_ERTM_EVENT_RECV_RNR:
6473 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6474 l2cap_ertm_pass_to_tx(sk, control);
6475 if (control->poll)
6476 l2cap_ertm_send_srej_tail(sk);
6477 else {
6478 struct bt_l2cap_control rr_control;
6479 memset(&rr_control, 0, sizeof(rr_control));
6480 rr_control.frame_type = 's';
6481 rr_control.super = L2CAP_SFRAME_RR;
6482 rr_control.reqseq = pi->buffer_seq;
6483 l2cap_ertm_send_sframe(sk, &rr_control);
6484 }
6485
6486 break;
6487 case L2CAP_ERTM_EVENT_RECV_REJ:
6488 l2cap_ertm_handle_rej(sk, control);
6489 break;
6490 case L2CAP_ERTM_EVENT_RECV_SREJ:
6491 l2cap_ertm_handle_srej(sk, control);
6492 break;
6493 }
6494
6495 if (skb && !skb_in_use) {
6496 BT_DBG("Freeing %p", skb);
6497 kfree_skb(skb);
6498 }
6499
6500 return err;
6501}
6502
6503static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6504 struct bt_l2cap_control *control,
6505 struct sk_buff *skb, u8 event)
6506{
6507 struct l2cap_pinfo *pi;
6508 int err = 0;
6509 bool skb_in_use = 0;
6510
6511 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6512 (int)event);
6513 pi = l2cap_pi(sk);
6514
6515 /* Only handle expected frames, to avoid state changes. */
6516
6517 switch (event) {
6518 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6519 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6520 L2CAP_ERTM_TXSEQ_EXPECTED) {
6521 l2cap_ertm_pass_to_tx(sk, control);
6522
6523 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6524 BT_DBG("Busy, discarding expected seq %d",
6525 control->txseq);
6526 break;
6527 }
6528
6529 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6530 pi->buffer_seq = pi->expected_tx_seq;
6531 skb_in_use = 1;
6532
6533 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6534 if (err)
6535 break;
6536
6537 if (control->final) {
6538 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6539 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6540 else
6541 control->final = 0;
6542 }
6543 }
6544 break;
6545 case L2CAP_ERTM_EVENT_RECV_RR:
6546 case L2CAP_ERTM_EVENT_RECV_RNR:
6547 case L2CAP_ERTM_EVENT_RECV_REJ:
6548 l2cap_ertm_process_reqseq(sk, control->reqseq);
6549 break;
6550 case L2CAP_ERTM_EVENT_RECV_SREJ:
6551 /* Ignore */
6552 break;
6553 default:
6554 break;
6555 }
6556
6557 if (skb && !skb_in_use) {
6558 BT_DBG("Freeing %p", skb);
6559 kfree_skb(skb);
6560 }
6561
6562 return err;
6563}
6564
6565static int l2cap_answer_move_poll(struct sock *sk)
6566{
6567 struct l2cap_pinfo *pi;
6568 struct bt_l2cap_control control;
6569 int err = 0;
6570
6571 BT_DBG("sk %p", sk);
6572
6573 pi = l2cap_pi(sk);
6574
6575 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6576
6577 if (!skb_queue_empty(TX_QUEUE(sk)))
6578 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6579 else
6580 sk->sk_send_head = NULL;
6581
6582 /* Rewind next_tx_seq to the point expected
6583 * by the receiver.
6584 */
6585 pi->next_tx_seq = pi->amp_move_reqseq;
6586 pi->unacked_frames = 0;
6587
6588 err = l2cap_finish_amp_move(sk);
6589
6590 if (err)
6591 return err;
6592
6593 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6594 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6595
6596 memset(&control, 0, sizeof(control));
6597 control.reqseq = pi->amp_move_reqseq;
6598
6599 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6600 err = -EPROTO;
6601 else
6602 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6603 pi->amp_move_event);
6604
6605 return err;
6606}
6607
6608static void l2cap_amp_move_setup(struct sock *sk)
6609{
6610 struct l2cap_pinfo *pi;
6611 struct sk_buff *skb;
6612
6613 BT_DBG("sk %p", sk);
6614
6615 pi = l2cap_pi(sk);
6616
6617 l2cap_ertm_stop_ack_timer(pi);
6618 l2cap_ertm_stop_retrans_timer(pi);
6619 l2cap_ertm_stop_monitor_timer(pi);
6620
6621 pi->retry_count = 0;
6622 skb_queue_walk(TX_QUEUE(sk), skb) {
6623 if (bt_cb(skb)->retries)
6624 bt_cb(skb)->retries = 1;
6625 else
6626 break;
6627 }
6628
6629 pi->expected_tx_seq = pi->buffer_seq;
6630
6631 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6632 l2cap_seq_list_clear(&pi->retrans_list);
6633 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6634 skb_queue_purge(SREJ_QUEUE(sk));
6635
6636 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6637 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6638
6639 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6640 pi->rx_state);
6641
6642 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6643}
6644
6645static void l2cap_amp_move_revert(struct sock *sk)
6646{
6647 struct l2cap_pinfo *pi;
6648
6649 BT_DBG("sk %p", sk);
6650
6651 pi = l2cap_pi(sk);
6652
6653 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6654 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6655 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6656 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6657 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6658}
6659
6660static int l2cap_amp_move_reconf(struct sock *sk)
6661{
6662 struct l2cap_pinfo *pi;
6663 u8 buf[64];
6664 int err = 0;
6665
6666 BT_DBG("sk %p", sk);
6667
6668 pi = l2cap_pi(sk);
6669
6670 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6671 l2cap_build_amp_reconf_req(sk, buf), buf);
6672 return err;
6673}
6674
6675static void l2cap_amp_move_success(struct sock *sk)
6676{
6677 struct l2cap_pinfo *pi;
6678
6679 BT_DBG("sk %p", sk);
6680
6681 pi = l2cap_pi(sk);
6682
6683 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6684 int err = 0;
6685 /* Send reconfigure request */
6686 if (pi->mode == L2CAP_MODE_ERTM) {
6687 pi->reconf_state = L2CAP_RECONF_INT;
6688 if (enable_reconfig)
6689 err = l2cap_amp_move_reconf(sk);
6690
6691 if (err || !enable_reconfig) {
6692 pi->reconf_state = L2CAP_RECONF_NONE;
6693 l2cap_ertm_tx(sk, NULL, NULL,
6694 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6695 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6696 }
6697 } else
6698 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6699 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6700 if (pi->mode == L2CAP_MODE_ERTM)
6701 pi->rx_state =
6702 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6703 else
6704 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6705 }
6706}
6707
6708static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6709{
6710 /* Make sure reqseq is for a packet that has been sent but not acked */
6711 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6712 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6713}
6714
6715static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6716 struct sk_buff *skb)
6717{
6718 struct l2cap_pinfo *pi;
6719 int err = 0;
6720
6721 BT_DBG("sk %p, control %p, skb %p, state %d",
6722 sk, control, skb, l2cap_pi(sk)->rx_state);
6723
6724 pi = l2cap_pi(sk);
6725
6726 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6727 L2CAP_ERTM_TXSEQ_EXPECTED) {
6728 l2cap_ertm_pass_to_tx(sk, control);
6729
6730 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6731 __next_seq(pi->buffer_seq, pi));
6732
6733 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6734
6735 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6736 } else {
6737 if (pi->sdu) {
6738 kfree_skb(pi->sdu);
6739 pi->sdu = NULL;
6740 }
6741 pi->sdu_last_frag = NULL;
6742 pi->sdu_len = 0;
6743
6744 if (skb) {
6745 BT_DBG("Freeing %p", skb);
6746 kfree_skb(skb);
6747 }
6748 }
6749
6750 pi->last_acked_seq = control->txseq;
6751 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6752
6753 return err;
6754}
6755
6756static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6757 struct sk_buff *skb, u8 event)
6758{
6759 struct l2cap_pinfo *pi;
6760 int err = 0;
6761
6762 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6763 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6764
6765 pi = l2cap_pi(sk);
6766
6767 if (__valid_reqseq(pi, control->reqseq)) {
6768 switch (pi->rx_state) {
6769 case L2CAP_ERTM_RX_STATE_RECV:
6770 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6771 break;
6772 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6773 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6774 event);
6775 break;
6776 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6777 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6778 event);
6779 break;
6780 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6781 if (control->final) {
6782 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6783 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6784
6785 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6786 l2cap_ertm_process_reqseq(sk, control->reqseq);
6787
6788 if (!skb_queue_empty(TX_QUEUE(sk)))
6789 sk->sk_send_head =
6790 skb_peek(TX_QUEUE(sk));
6791 else
6792 sk->sk_send_head = NULL;
6793
6794 /* Rewind next_tx_seq to the point expected
6795 * by the receiver.
6796 */
6797 pi->next_tx_seq = control->reqseq;
6798 pi->unacked_frames = 0;
6799
6800 if (pi->ampcon)
6801 pi->conn->mtu =
6802 pi->ampcon->hdev->acl_mtu;
6803 else
6804 pi->conn->mtu =
6805 pi->conn->hcon->hdev->acl_mtu;
6806
6807 err = l2cap_setup_resegment(sk);
6808
6809 if (err)
6810 break;
6811
6812 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6813 event);
6814 }
6815 break;
6816 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6817 if (control->poll) {
6818 pi->amp_move_reqseq = control->reqseq;
6819 pi->amp_move_event = event;
6820 err = l2cap_answer_move_poll(sk);
6821 }
6822 break;
6823 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6824 if (control->poll) {
6825 pi->amp_move_reqseq = control->reqseq;
6826 pi->amp_move_event = event;
6827
6828 BT_DBG("amp_move_role 0x%2.2x, "
6829 "reconf_state 0x%2.2x",
6830 pi->amp_move_role, pi->reconf_state);
6831
6832 if (pi->reconf_state == L2CAP_RECONF_ACC)
6833 err = l2cap_amp_move_reconf(sk);
6834 else
6835 err = l2cap_answer_move_poll(sk);
6836 }
6837 break;
6838 default:
6839 /* shut it down */
6840 break;
6841 }
6842 } else {
6843 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6844 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6845 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6846 }
6847
6848 return err;
6849}
6850
6851void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6852{
6853 lock_sock(sk);
6854
6855 l2cap_pi(sk)->fixed_channel = 1;
6856
6857 l2cap_pi(sk)->imtu = opt->imtu;
6858 l2cap_pi(sk)->omtu = opt->omtu;
6859 l2cap_pi(sk)->remote_mps = opt->omtu;
6860 l2cap_pi(sk)->mps = opt->omtu;
6861 l2cap_pi(sk)->flush_to = opt->flush_to;
6862 l2cap_pi(sk)->mode = opt->mode;
6863 l2cap_pi(sk)->fcs = opt->fcs;
6864 l2cap_pi(sk)->max_tx = opt->max_tx;
6865 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6866 l2cap_pi(sk)->tx_win = opt->txwin_size;
6867 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6868 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6869 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6870
6871 if (opt->mode == L2CAP_MODE_ERTM ||
6872 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6873 l2cap_ertm_init(sk);
6874
6875 release_sock(sk);
6876
6877 return;
6878}
6879
6880static const u8 l2cap_ertm_rx_func_to_event[4] = {
6881 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6882 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6883};
6884
6885int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6886{
6887 struct l2cap_pinfo *pi;
6888 struct bt_l2cap_control *control;
6889 u16 len;
6890 u8 event;
6891 pi = l2cap_pi(sk);
6892
6893 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6894
6895 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006896 goto drop;
6897
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006898 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006899 case L2CAP_MODE_BASIC:
6900 /* If socket recv buffers overflows we drop data here
6901 * which is *bad* because L2CAP has to be reliable.
6902 * But we don't have any other choice. L2CAP doesn't
6903 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006904
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006905 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006906 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006908 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006909 goto done;
6910 break;
6911
6912 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006913 case L2CAP_MODE_STREAMING:
6914 control = &bt_cb(skb)->control;
6915 if (pi->extended_control) {
6916 __get_extended_control(get_unaligned_le32(skb->data),
6917 control);
6918 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006919 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006920 __get_enhanced_control(get_unaligned_le16(skb->data),
6921 control);
6922 skb_pull(skb, 2);
6923 }
6924
6925 len = skb->len;
6926
6927 if (l2cap_check_fcs(pi, skb))
6928 goto drop;
6929
6930 if ((control->frame_type == 'i') &&
6931 (control->sar == L2CAP_SAR_START))
6932 len -= 2;
6933
6934 if (pi->fcs == L2CAP_FCS_CRC16)
6935 len -= 2;
6936
6937 /*
6938 * We can just drop the corrupted I-frame here.
6939 * Receiver will miss it and start proper recovery
6940 * procedures and ask for retransmission.
6941 */
6942 if (len > pi->mps) {
6943 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6944 goto drop;
6945 }
6946
6947 if (control->frame_type == 'i') {
6948
6949 int err;
6950
6951 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6952 control->sar, control->reqseq, control->final,
6953 control->txseq);
6954
6955 /* Validate F-bit - F=0 always valid, F=1 only
6956 * valid in TX WAIT_F
6957 */
6958 if (control->final && (pi->tx_state !=
6959 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006960 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006961
6962 if (pi->mode != L2CAP_MODE_STREAMING) {
6963 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
6964 err = l2cap_ertm_rx(sk, control, skb, event);
6965 } else
6966 err = l2cap_strm_rx(sk, control, skb);
6967 if (err)
6968 l2cap_send_disconn_req(pi->conn, sk,
6969 ECONNRESET);
6970 } else {
6971 /* Only I-frames are expected in streaming mode */
6972 if (pi->mode == L2CAP_MODE_STREAMING)
6973 goto drop;
6974
6975 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6976 control->reqseq, control->final, control->poll,
6977 control->super);
6978
6979 if (len != 0) {
6980 l2cap_send_disconn_req(pi->conn, sk,
6981 ECONNRESET);
6982 goto drop;
6983 }
6984
6985 /* Validate F and P bits */
6986 if (control->final &&
6987 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
6988 || control->poll))
6989 goto drop;
6990
6991 event = l2cap_ertm_rx_func_to_event[control->super];
6992 if (l2cap_ertm_rx(sk, control, skb, event))
6993 l2cap_send_disconn_req(pi->conn, sk,
6994 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006995 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006996
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02006997 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006998
6999 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007000 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007001 break;
7002 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007003
7004drop:
7005 kfree_skb(skb);
7006
7007done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007008 return 0;
7009}
7010
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007011void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7012{
7013 lock_sock(sk);
7014 l2cap_data_channel(sk, skb);
7015 release_sock(sk);
7016}
7017
Al Viro8e036fc2007-07-29 00:16:36 -07007018static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007019{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007020 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007022 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7023 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007024 goto drop;
7025
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007026 bh_lock_sock(sk);
7027
Linus Torvalds1da177e2005-04-16 15:20:36 -07007028 BT_DBG("sk %p, len %d", sk, skb->len);
7029
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007030 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007031 goto drop;
7032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007033 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007034 goto drop;
7035
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007036 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007037 goto done;
7038
7039drop:
7040 kfree_skb(skb);
7041
7042done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007043 if (sk)
7044 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007045 return 0;
7046}
7047
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007048static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7049{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007050 struct sock *sk;
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007051
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007052 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
7053 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007054 goto drop;
7055
7056 bh_lock_sock(sk);
7057
7058 BT_DBG("sk %p, len %d", sk, skb->len);
7059
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007060 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007061 goto drop;
7062
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007063 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007064 goto drop;
7065
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007066 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007067 goto done;
7068
7069drop:
7070 kfree_skb(skb);
7071
7072done:
7073 if (sk)
7074 bh_unlock_sock(sk);
7075 return 0;
7076}
7077
Linus Torvalds1da177e2005-04-16 15:20:36 -07007078static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7079{
7080 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007081 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007082 u16 cid, len;
7083 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007084
7085 skb_pull(skb, L2CAP_HDR_SIZE);
7086 cid = __le16_to_cpu(lh->cid);
7087 len = __le16_to_cpu(lh->len);
7088
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007089 if (len != skb->len) {
7090 kfree_skb(skb);
7091 return;
7092 }
7093
Linus Torvalds1da177e2005-04-16 15:20:36 -07007094 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7095
7096 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007097 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007098 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007099 l2cap_sig_channel(conn, skb);
7100 break;
7101
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007102 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007103 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007104 skb_pull(skb, 2);
7105 l2cap_conless_channel(conn, psm, skb);
7106 break;
7107
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007108 case L2CAP_CID_LE_DATA:
7109 l2cap_att_channel(conn, cid, skb);
7110 break;
7111
Anderson Brigliaea370122011-06-07 18:46:31 -03007112 case L2CAP_CID_SMP:
7113 if (smp_sig_channel(conn, skb))
7114 l2cap_conn_del(conn->hcon, EACCES);
7115 break;
7116
Linus Torvalds1da177e2005-04-16 15:20:36 -07007117 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007118 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7119 if (sk) {
7120 if (sock_owned_by_user(sk)) {
7121 BT_DBG("backlog sk %p", sk);
7122 if (sk_add_backlog(sk, skb))
7123 kfree_skb(skb);
7124 } else
7125 l2cap_data_channel(sk, skb);
7126
7127 bh_unlock_sock(sk);
7128 } else if (cid == L2CAP_CID_A2MP) {
7129 BT_DBG("A2MP");
7130 amp_conn_ind(conn, skb);
7131 } else {
7132 BT_DBG("unknown cid 0x%4.4x", cid);
7133 kfree_skb(skb);
7134 }
7135
Linus Torvalds1da177e2005-04-16 15:20:36 -07007136 break;
7137 }
7138}
7139
7140/* ---- L2CAP interface with lower layer (HCI) ---- */
7141
7142static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7143{
7144 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007145 register struct sock *sk;
7146 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007147
7148 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007149 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007150
7151 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7152
7153 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007154 read_lock(&l2cap_sk_list.lock);
7155 sk_for_each(sk, node, &l2cap_sk_list.head) {
7156 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007157 continue;
7158
7159 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007160 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007161 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007162 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007163 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007164 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7165 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007166 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007167 lm2 |= HCI_LM_MASTER;
7168 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007169 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007170 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171
7172 return exact ? lm1 : lm2;
7173}
7174
7175static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7176{
Marcel Holtmann01394182006-07-03 10:02:46 +02007177 struct l2cap_conn *conn;
7178
Linus Torvalds1da177e2005-04-16 15:20:36 -07007179 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7180
Ville Tervoacd7d372011-02-10 22:38:49 -03007181 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007182 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007183
7184 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007185 conn = l2cap_conn_add(hcon, status);
7186 if (conn)
7187 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007188 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007189 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007190
7191 return 0;
7192}
7193
Marcel Holtmann2950f212009-02-12 14:02:50 +01007194static int l2cap_disconn_ind(struct hci_conn *hcon)
7195{
7196 struct l2cap_conn *conn = hcon->l2cap_data;
7197
7198 BT_DBG("hcon %p", hcon);
7199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007200 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007201 return 0x13;
7202
7203 return conn->disc_reason;
7204}
7205
7206static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007207{
7208 BT_DBG("hcon %p reason %d", hcon, reason);
7209
Ville Tervoacd7d372011-02-10 22:38:49 -03007210 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007211 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007213 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007214
Linus Torvalds1da177e2005-04-16 15:20:36 -07007215 return 0;
7216}
7217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007218static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007219{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007220 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007221 return;
7222
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007223 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007224 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7225 l2cap_sock_clear_timer(sk);
7226 l2cap_sock_set_timer(sk, HZ * 5);
7227 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7228 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007229 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007230 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7231 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007232 }
7233}
7234
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007235static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007236{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007237 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007238 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007239 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007240
Marcel Holtmann01394182006-07-03 10:02:46 +02007241 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007242 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007243
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007244 l = &conn->chan_list;
7245
Linus Torvalds1da177e2005-04-16 15:20:36 -07007246 BT_DBG("conn %p", conn);
7247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007248 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007250 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251 bh_lock_sock(sk);
7252
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007253 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007255 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007256 if (!status && encrypt) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007257 l2cap_pi(sk)->sec_level = hcon->sec_level;
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03007258 del_timer(&conn->security_timer);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007259 l2cap_chan_ready(sk);
Vinicius Costa Gomesc1d5e1d2011-07-07 18:59:34 -03007260 smp_distribute_keys(conn, 0);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007261 }
7262
7263 bh_unlock_sock(sk);
7264 continue;
7265 }
7266
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007267 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007268 bh_unlock_sock(sk);
7269 continue;
7270 }
7271
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007272 if (!status && (sk->sk_state == BT_CONNECTED ||
7273 sk->sk_state == BT_CONFIG)) {
7274 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007275 bh_unlock_sock(sk);
7276 continue;
7277 }
7278
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007279 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007280 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007281 l2cap_pi(sk)->conf_state |=
7282 L2CAP_CONF_CONNECT_PEND;
7283 if (l2cap_pi(sk)->amp_pref ==
7284 BT_AMP_POLICY_PREFER_AMP) {
7285 amp_create_physical(l2cap_pi(sk)->conn,
7286 sk);
7287 } else
7288 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007289 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007290 l2cap_sock_clear_timer(sk);
7291 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007292 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007293 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007294 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007295 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007296
7297 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007298 if (l2cap_pi(sk)->amp_id) {
7299 amp_accept_physical(conn,
7300 l2cap_pi(sk)->amp_id, sk);
7301 bh_unlock_sock(sk);
7302 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007303 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007304
7305 sk->sk_state = BT_CONFIG;
7306 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007307 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007308 sk->sk_state = BT_DISCONN;
7309 l2cap_sock_set_timer(sk, HZ / 10);
7310 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007311 }
7312
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007313 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7314 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7315 rsp.result = cpu_to_le16(result);
7316 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7317 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7318 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007319 }
7320
Linus Torvalds1da177e2005-04-16 15:20:36 -07007321 bh_unlock_sock(sk);
7322 }
7323
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007324 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007325
Linus Torvalds1da177e2005-04-16 15:20:36 -07007326 return 0;
7327}
7328
7329static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7330{
7331 struct l2cap_conn *conn = hcon->l2cap_data;
7332
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007333 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7334 goto drop;
7335
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007336 if (!conn)
7337 conn = l2cap_conn_add(hcon, 0);
7338
7339 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007340 goto drop;
7341
7342 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7343
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007344 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007345 struct l2cap_hdr *hdr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007346 struct sock *sk;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007347 u16 cid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007348 int len;
7349
7350 if (conn->rx_len) {
7351 BT_ERR("Unexpected start frame (len %d)", skb->len);
7352 kfree_skb(conn->rx_skb);
7353 conn->rx_skb = NULL;
7354 conn->rx_len = 0;
7355 l2cap_conn_unreliable(conn, ECOMM);
7356 }
7357
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007358 /* Start fragment always begin with Basic L2CAP header */
7359 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 BT_ERR("Frame is too short (len %d)", skb->len);
7361 l2cap_conn_unreliable(conn, ECOMM);
7362 goto drop;
7363 }
7364
7365 hdr = (struct l2cap_hdr *) skb->data;
7366 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007367 cid = __le16_to_cpu(hdr->cid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007368
7369 if (len == skb->len) {
7370 /* Complete frame received */
7371 l2cap_recv_frame(conn, skb);
7372 return 0;
7373 }
7374
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007375 if (flags & ACL_CONT) {
7376 BT_ERR("Complete frame is incomplete "
7377 "(len %d, expected len %d)",
7378 skb->len, len);
7379 l2cap_conn_unreliable(conn, ECOMM);
7380 goto drop;
7381 }
7382
Linus Torvalds1da177e2005-04-16 15:20:36 -07007383 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7384
7385 if (skb->len > len) {
7386 BT_ERR("Frame is too long (len %d, expected len %d)",
7387 skb->len, len);
7388 l2cap_conn_unreliable(conn, ECOMM);
7389 goto drop;
7390 }
7391
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007392 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007393
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007394 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
7395 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
7396 len, l2cap_pi(sk)->imtu);
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007397 bh_unlock_sock(sk);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007398 l2cap_conn_unreliable(conn, ECOMM);
7399 goto drop;
Andrei Emeltchenko89794812010-09-15 14:28:44 +03007400 }
7401
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007402 if (sk)
7403 bh_unlock_sock(sk);
7404
Linus Torvalds1da177e2005-04-16 15:20:36 -07007405 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007406 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7407 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007408 goto drop;
7409
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007410 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007411 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007412 conn->rx_len = len - skb->len;
7413 } else {
7414 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7415
7416 if (!conn->rx_len) {
7417 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7418 l2cap_conn_unreliable(conn, ECOMM);
7419 goto drop;
7420 }
7421
7422 if (skb->len > conn->rx_len) {
7423 BT_ERR("Fragment is too long (len %d, expected %d)",
7424 skb->len, conn->rx_len);
7425 kfree_skb(conn->rx_skb);
7426 conn->rx_skb = NULL;
7427 conn->rx_len = 0;
7428 l2cap_conn_unreliable(conn, ECOMM);
7429 goto drop;
7430 }
7431
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007432 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007433 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007434 conn->rx_len -= skb->len;
7435
7436 if (!conn->rx_len) {
7437 /* Complete frame received */
7438 l2cap_recv_frame(conn, conn->rx_skb);
7439 conn->rx_skb = NULL;
7440 }
7441 }
7442
7443drop:
7444 kfree_skb(skb);
7445 return 0;
7446}
7447
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007448static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007449{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007450 struct sock *sk;
7451 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007453 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007455 sk_for_each(sk, node, &l2cap_sk_list.head) {
7456 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007457
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007458 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007459 batostr(&bt_sk(sk)->src),
7460 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007461 sk->sk_state, __le16_to_cpu(pi->psm),
7462 pi->scid, pi->dcid,
7463 pi->imtu, pi->omtu, pi->sec_level,
7464 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007465 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007467 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007468
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007469 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007470}
7471
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007472static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7473{
7474 return single_open(file, l2cap_debugfs_show, inode->i_private);
7475}
7476
7477static const struct file_operations l2cap_debugfs_fops = {
7478 .open = l2cap_debugfs_open,
7479 .read = seq_read,
7480 .llseek = seq_lseek,
7481 .release = single_release,
7482};
7483
7484static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007485
Linus Torvalds1da177e2005-04-16 15:20:36 -07007486static struct hci_proto l2cap_hci_proto = {
7487 .name = "L2CAP",
7488 .id = HCI_PROTO_L2CAP,
7489 .connect_ind = l2cap_connect_ind,
7490 .connect_cfm = l2cap_connect_cfm,
7491 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007492 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007493 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007494 .recv_acldata = l2cap_recv_acldata,
7495 .create_cfm = l2cap_create_cfm,
7496 .modify_cfm = l2cap_modify_cfm,
7497 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007498};
7499
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007500int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007501{
7502 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007503
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007504 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007505 if (err < 0)
7506 return err;
7507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007508 _l2cap_wq = create_singlethread_workqueue("l2cap");
7509 if (!_l2cap_wq) {
7510 err = -ENOMEM;
7511 goto error;
7512 }
7513
Linus Torvalds1da177e2005-04-16 15:20:36 -07007514 err = hci_register_proto(&l2cap_hci_proto);
7515 if (err < 0) {
7516 BT_ERR("L2CAP protocol registration failed");
7517 bt_sock_unregister(BTPROTO_L2CAP);
7518 goto error;
7519 }
7520
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007521 if (bt_debugfs) {
7522 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7523 bt_debugfs, NULL, &l2cap_debugfs_fops);
7524 if (!l2cap_debugfs)
7525 BT_ERR("Failed to create L2CAP debug file");
7526 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007527
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007528 if (amp_init() < 0) {
7529 BT_ERR("AMP Manager initialization failed");
7530 goto error;
7531 }
7532
Linus Torvalds1da177e2005-04-16 15:20:36 -07007533 return 0;
7534
7535error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007536 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007537 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007538 return err;
7539}
7540
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007541void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007542{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007543 amp_exit();
7544
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007545 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007546
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007547 flush_workqueue(_l2cap_wq);
7548 destroy_workqueue(_l2cap_wq);
7549
Linus Torvalds1da177e2005-04-16 15:20:36 -07007550 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7551 BT_ERR("L2CAP protocol unregistration failed");
7552
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007553 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007554}
7555
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007556module_param(disable_ertm, bool, 0644);
7557MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007558
7559module_param(enable_reconfig, bool, 0644);
7560MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");