blob: d103bef6b3ad0396b8b9d24c98b7a50587f5a3f5 [file] [log] [blame]
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 BlueZ - Bluetooth protocol stack for Linux
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003 Copyright (c) 2000-2001, 2010-2011 Code Aurora Forum. All rights reserved.
Gustavo F. Padovance5706b2010-07-13 11:57:11 -03004 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
Gustavo F. Padovan5d8868f2010-07-16 16:18:39 -03005 Copyright (C) 2010 Google Inc.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
12
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090017 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
Linus Torvalds1da177e2005-04-16 15:20:36 -070020 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +090022 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
Linus Torvalds1da177e2005-04-16 15:20:36 -070024 SOFTWARE IS DISCLAIMED.
25*/
26
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020027/* Bluetooth L2CAP core. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <linux/module.h>
30
31#include <linux/types.h>
Randy Dunlap4fc268d2006-01-11 12:17:47 -080032#include <linux/capability.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <linux/errno.h>
34#include <linux/kernel.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <linux/sched.h>
36#include <linux/slab.h>
37#include <linux/poll.h>
38#include <linux/fcntl.h>
39#include <linux/init.h>
40#include <linux/interrupt.h>
41#include <linux/socket.h>
42#include <linux/skbuff.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070043#include <linux/list.h>
Marcel Holtmannbe9d1222005-11-08 09:57:38 -080044#include <linux/device.h>
Marcel Holtmannaef7d972010-03-21 05:27:45 +010045#include <linux/debugfs.h>
46#include <linux/seq_file.h>
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -030047#include <linux/uaccess.h>
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -030048#include <linux/crc16.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070049#include <linux/math64.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050#include <net/sock.h>
51
52#include <asm/system.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070053#include <asm/unaligned.h>
54
55#include <net/bluetooth/bluetooth.h>
56#include <net/bluetooth/hci_core.h>
57#include <net/bluetooth/l2cap.h>
Anderson Brigliaea370122011-06-07 18:46:31 -030058#include <net/bluetooth/smp.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070059#include <net/bluetooth/amp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070060
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -020061int disable_ertm;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070062int enable_reconfig;
Marcel Holtmannf0709e02007-10-20 13:38:51 +020063
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -070064static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070065static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP | L2CAP_FC_A2MP, };
Linus Torvalds1da177e2005-04-16 15:20:36 -070066
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067struct workqueue_struct *_l2cap_wq;
68
69struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71};
72
73static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
74 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id);
75static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
76 struct l2cap_pinfo *pi, u16 icid, u16 result);
77static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
78 u16 icid, u16 result);
79
80static void l2cap_amp_move_setup(struct sock *sk);
81static void l2cap_amp_move_success(struct sock *sk);
82static void l2cap_amp_move_revert(struct sock *sk);
83
84static int l2cap_ertm_rx_queued_iframes(struct sock *sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -070085
Linus Torvalds1da177e2005-04-16 15:20:36 -070086static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
87 u8 code, u8 ident, u16 dlen, void *data);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088static int l2cap_answer_move_poll(struct sock *sk);
89static int l2cap_create_cfm(struct hci_chan *chan, u8 status);
90static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi);
91static void l2cap_chan_ready(struct sock *sk);
92static void l2cap_conn_del(struct hci_conn *hcon, int err);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -030093
Marcel Holtmann01394182006-07-03 10:02:46 +020094/* ---- L2CAP channels ---- */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070095static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
Gustavo F. Padovan4a6aa522011-05-17 14:34:52 -030096{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070097 struct sock *s;
98 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
99 if (l2cap_pi(s)->dcid == cid)
100 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200101 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700102 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200103}
104
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700105/* Find channel with given DCID.
106 * Returns locked socket */
107static inline struct sock *l2cap_get_chan_by_dcid(struct l2cap_chan_list *l,
108 u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200109{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110 struct sock *s;
111 read_lock(&l->lock);
112 s = __l2cap_get_chan_by_dcid(l, cid);
113 if (s)
114 bh_lock_sock(s);
115 read_unlock(&l->lock);
116 return s;
117}
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300118
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700119static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
120{
121 struct sock *s;
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->scid == cid)
124 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200125 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200127}
128
129/* Find channel with given SCID.
130 * Returns locked socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
Marcel Holtmann01394182006-07-03 10:02:46 +0200132{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700133 struct sock *s;
134 read_lock(&l->lock);
135 s = __l2cap_get_chan_by_scid(l, cid);
136 if (s)
137 bh_lock_sock(s);
138 read_unlock(&l->lock);
139 return s;
Marcel Holtmann01394182006-07-03 10:02:46 +0200140}
141
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
Marcel Holtmann01394182006-07-03 10:02:46 +0200143{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700144 struct sock *s;
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->ident == ident)
147 break;
Marcel Holtmann01394182006-07-03 10:02:46 +0200148 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700149 return s;
150}
151
152static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
153{
154 struct sock *s;
155 read_lock(&l->lock);
156 s = __l2cap_get_chan_by_ident(l, ident);
157 if (s)
158 bh_lock_sock(s);
159 read_unlock(&l->lock);
160 return s;
161}
162
163static inline struct sk_buff *l2cap_ertm_seq_in_queue(struct sk_buff_head *head,
164 u16 seq)
165{
166 struct sk_buff *skb;
167
168 skb_queue_walk(head, skb) {
169 if (bt_cb(skb)->control.txseq == seq)
170 return skb;
171 }
172
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300173 return NULL;
Marcel Holtmann01394182006-07-03 10:02:46 +0200174}
175
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700176static int l2cap_seq_list_init(struct l2cap_seq_list *seq_list, u16 size)
Marcel Holtmann01394182006-07-03 10:02:46 +0200177{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178 u16 allocSize = 1;
179 int err = 0;
180 int i;
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300181
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700182 /* Actual allocated size must be a power of 2 */
183 while (allocSize && allocSize <= size)
184 allocSize <<= 1;
185 if (!allocSize)
186 return -ENOMEM;
Marcel Holtmann01394182006-07-03 10:02:46 +0200187
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700188 seq_list->list = kzalloc(sizeof(u16) * allocSize, GFP_ATOMIC);
189 if (!seq_list->list)
190 return -ENOMEM;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300191
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700192 seq_list->size = allocSize;
193 seq_list->mask = allocSize - 1;
194 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
195 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
196 for (i = 0; i < allocSize; i++)
197 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300198
Gustavo F. Padovan73b2ec12011-04-18 19:36:44 -0300199 return err;
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300200}
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202static inline void l2cap_seq_list_free(struct l2cap_seq_list *seq_list)
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300203{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204 kfree(seq_list->list);
Gustavo F. Padovan9e4425f2011-04-18 18:38:43 -0300205}
206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700207static inline bool l2cap_seq_list_contains(struct l2cap_seq_list *seq_list,
208 u16 seq)
209{
210 return seq_list->list[seq & seq_list->mask] != L2CAP_SEQ_LIST_CLEAR;
211}
212
213static u16 l2cap_seq_list_remove(struct l2cap_seq_list *seq_list, u16 seq)
214{
215 u16 mask = seq_list->mask;
216
217 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
218
219 if (seq_list->head == L2CAP_SEQ_LIST_CLEAR) {
220 /* In case someone tries to pop the head of an empty list */
221 BT_DBG("List empty");
222 return L2CAP_SEQ_LIST_CLEAR;
223 } else if (seq_list->head == seq) {
224 /* Head can be removed quickly */
225 BT_DBG("Remove head");
226 seq_list->head = seq_list->list[seq & mask];
227 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
228
229 if (seq_list->head == L2CAP_SEQ_LIST_TAIL) {
230 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
231 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
232 }
233 } else {
234 /* Non-head item must be found first */
235 u16 prev = seq_list->head;
236 BT_DBG("Find and remove");
237 while (seq_list->list[prev & mask] != seq) {
238 prev = seq_list->list[prev & mask];
239 if (prev == L2CAP_SEQ_LIST_TAIL) {
240 BT_DBG("seq %d not in list", (int) seq);
241 return L2CAP_SEQ_LIST_CLEAR;
242 }
243 }
244
245 seq_list->list[prev & mask] = seq_list->list[seq & mask];
246 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_CLEAR;
247 if (seq_list->tail == seq)
248 seq_list->tail = prev;
249 }
250 return seq;
251}
252
253static inline u16 l2cap_seq_list_pop(struct l2cap_seq_list *seq_list)
254{
255 return l2cap_seq_list_remove(seq_list, seq_list->head);
256}
257
258static void l2cap_seq_list_clear(struct l2cap_seq_list *seq_list)
259{
260 if (seq_list->head != L2CAP_SEQ_LIST_CLEAR) {
261 u16 i;
262 for (i = 0; i < seq_list->size; i++)
263 seq_list->list[i] = L2CAP_SEQ_LIST_CLEAR;
264
265 seq_list->head = L2CAP_SEQ_LIST_CLEAR;
266 seq_list->tail = L2CAP_SEQ_LIST_CLEAR;
267 }
268}
269
270static void l2cap_seq_list_append(struct l2cap_seq_list *seq_list, u16 seq)
271{
272 u16 mask = seq_list->mask;
273
274 BT_DBG("seq_list %p, seq %d", seq_list, (int) seq);
275
276 if (seq_list->list[seq & mask] == L2CAP_SEQ_LIST_CLEAR) {
277 if (seq_list->tail == L2CAP_SEQ_LIST_CLEAR)
278 seq_list->head = seq;
279 else
280 seq_list->list[seq_list->tail & mask] = seq;
281
282 seq_list->tail = seq;
283 seq_list->list[seq & mask] = L2CAP_SEQ_LIST_TAIL;
284 }
285}
286
287static u16 __pack_enhanced_control(struct bt_l2cap_control *control)
288{
289 u16 packed;
290
291 packed = (control->reqseq << L2CAP_CTRL_REQSEQ_SHIFT) &
292 L2CAP_CTRL_REQSEQ;
293 packed |= (control->final << L2CAP_CTRL_FINAL_SHIFT) &
294 L2CAP_CTRL_FINAL;
295
296 if (control->frame_type == 's') {
297 packed |= (control->poll << L2CAP_CTRL_POLL_SHIFT) &
298 L2CAP_CTRL_POLL;
299 packed |= (control->super << L2CAP_CTRL_SUPERVISE_SHIFT) &
300 L2CAP_CTRL_SUPERVISE;
301 packed |= L2CAP_CTRL_FRAME_TYPE;
302 } else {
303 packed |= (control->sar << L2CAP_CTRL_SAR_SHIFT) &
304 L2CAP_CTRL_SAR;
305 packed |= (control->txseq << L2CAP_CTRL_TXSEQ_SHIFT) &
306 L2CAP_CTRL_TXSEQ;
307 }
308
309 return packed;
310}
311
312static void __get_enhanced_control(u16 enhanced,
313 struct bt_l2cap_control *control)
314{
315 control->reqseq = (enhanced & L2CAP_CTRL_REQSEQ) >>
316 L2CAP_CTRL_REQSEQ_SHIFT;
317 control->final = (enhanced & L2CAP_CTRL_FINAL) >>
318 L2CAP_CTRL_FINAL_SHIFT;
319
320 if (enhanced & L2CAP_CTRL_FRAME_TYPE) {
321 control->frame_type = 's';
322 control->poll = (enhanced & L2CAP_CTRL_POLL) >>
323 L2CAP_CTRL_POLL_SHIFT;
324 control->super = (enhanced & L2CAP_CTRL_SUPERVISE) >>
325 L2CAP_CTRL_SUPERVISE_SHIFT;
326
327 control->sar = 0;
328 control->txseq = 0;
329 } else {
330 control->frame_type = 'i';
331 control->sar = (enhanced & L2CAP_CTRL_SAR) >>
332 L2CAP_CTRL_SAR_SHIFT;
333 control->txseq = (enhanced & L2CAP_CTRL_TXSEQ) >>
334 L2CAP_CTRL_TXSEQ_SHIFT;
335
336 control->poll = 0;
337 control->super = 0;
338 }
339}
340
341static u32 __pack_extended_control(struct bt_l2cap_control *control)
342{
343 u32 packed;
344
345 packed = (control->reqseq << L2CAP_EXT_CTRL_REQSEQ_SHIFT) &
346 L2CAP_EXT_CTRL_REQSEQ;
347 packed |= (control->final << L2CAP_EXT_CTRL_FINAL_SHIFT) &
348 L2CAP_EXT_CTRL_FINAL;
349
350 if (control->frame_type == 's') {
351 packed |= (control->poll << L2CAP_EXT_CTRL_POLL_SHIFT) &
352 L2CAP_EXT_CTRL_POLL;
353 packed |= (control->super << L2CAP_EXT_CTRL_SUPERVISE_SHIFT) &
354 L2CAP_EXT_CTRL_SUPERVISE;
355 packed |= L2CAP_EXT_CTRL_FRAME_TYPE;
356 } else {
357 packed |= (control->sar << L2CAP_EXT_CTRL_SAR_SHIFT) &
358 L2CAP_EXT_CTRL_SAR;
359 packed |= (control->txseq << L2CAP_EXT_CTRL_TXSEQ_SHIFT) &
360 L2CAP_EXT_CTRL_TXSEQ;
361 }
362
363 return packed;
364}
365
366static void __get_extended_control(u32 extended,
367 struct bt_l2cap_control *control)
368{
369 control->reqseq = (extended & L2CAP_EXT_CTRL_REQSEQ) >>
370 L2CAP_EXT_CTRL_REQSEQ_SHIFT;
371 control->final = (extended & L2CAP_EXT_CTRL_FINAL) >>
372 L2CAP_EXT_CTRL_FINAL_SHIFT;
373
374 if (extended & L2CAP_EXT_CTRL_FRAME_TYPE) {
375 control->frame_type = 's';
376 control->poll = (extended & L2CAP_EXT_CTRL_POLL) >>
377 L2CAP_EXT_CTRL_POLL_SHIFT;
378 control->super = (extended & L2CAP_EXT_CTRL_SUPERVISE) >>
379 L2CAP_EXT_CTRL_SUPERVISE_SHIFT;
380
381 control->sar = 0;
382 control->txseq = 0;
383 } else {
384 control->frame_type = 'i';
385 control->sar = (extended & L2CAP_EXT_CTRL_SAR) >>
386 L2CAP_EXT_CTRL_SAR_SHIFT;
387 control->txseq = (extended & L2CAP_EXT_CTRL_TXSEQ) >>
388 L2CAP_EXT_CTRL_TXSEQ_SHIFT;
389
390 control->poll = 0;
391 control->super = 0;
392 }
393}
394
395static inline void l2cap_ertm_stop_ack_timer(struct l2cap_pinfo *pi)
396{
397 BT_DBG("pi %p", pi);
398 __cancel_delayed_work(&pi->ack_work);
399}
400
401static inline void l2cap_ertm_start_ack_timer(struct l2cap_pinfo *pi)
402{
403 BT_DBG("pi %p, pending %d", pi, delayed_work_pending(&pi->ack_work));
404 if (!delayed_work_pending(&pi->ack_work)) {
405 queue_delayed_work(_l2cap_wq, &pi->ack_work,
406 msecs_to_jiffies(L2CAP_DEFAULT_ACK_TO));
407 }
408}
409
410static inline void l2cap_ertm_stop_retrans_timer(struct l2cap_pinfo *pi)
411{
412 BT_DBG("pi %p", pi);
413 __cancel_delayed_work(&pi->retrans_work);
414}
415
416static inline void l2cap_ertm_start_retrans_timer(struct l2cap_pinfo *pi)
417{
418 BT_DBG("pi %p", pi);
419 if (!delayed_work_pending(&pi->monitor_work) && pi->retrans_timeout) {
420 __cancel_delayed_work(&pi->retrans_work);
421 queue_delayed_work(_l2cap_wq, &pi->retrans_work,
422 msecs_to_jiffies(pi->retrans_timeout));
423 }
424}
425
426static inline void l2cap_ertm_stop_monitor_timer(struct l2cap_pinfo *pi)
427{
428 BT_DBG("pi %p", pi);
429 __cancel_delayed_work(&pi->monitor_work);
430}
431
432static inline void l2cap_ertm_start_monitor_timer(struct l2cap_pinfo *pi)
433{
434 BT_DBG("pi %p", pi);
435 l2cap_ertm_stop_retrans_timer(pi);
436 __cancel_delayed_work(&pi->monitor_work);
437 if (pi->monitor_timeout) {
438 queue_delayed_work(_l2cap_wq, &pi->monitor_work,
439 msecs_to_jiffies(pi->monitor_timeout));
440 }
441}
442
443static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
Marcel Holtmann01394182006-07-03 10:02:46 +0200444{
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300445 u16 cid = L2CAP_CID_DYN_START;
Marcel Holtmann01394182006-07-03 10:02:46 +0200446
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -0300447 for (; cid < L2CAP_CID_DYN_END; cid++) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700448 if (!__l2cap_get_chan_by_scid(l, cid))
Marcel Holtmann01394182006-07-03 10:02:46 +0200449 return cid;
450 }
451
452 return 0;
453}
454
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700455static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300456{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 sock_hold(sk);
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300458
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700459 if (l->head)
460 l2cap_pi(l->head)->prev_c = sk;
461
462 l2cap_pi(sk)->next_c = l->head;
463 l2cap_pi(sk)->prev_c = NULL;
464 l->head = sk;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300465}
466
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700467static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300468{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700469 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300470
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700471 write_lock_bh(&l->lock);
472 if (sk == l->head)
473 l->head = next;
474
475 if (next)
476 l2cap_pi(next)->prev_c = prev;
477 if (prev)
478 l2cap_pi(prev)->next_c = next;
479 write_unlock_bh(&l->lock);
480
481 __sock_put(sk);
Gustavo F. Padovan13003e02011-05-02 18:25:01 -0300482}
483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700484static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300485{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700486 struct l2cap_chan_list *l = &conn->chan_list;
Gustavo F. Padovanf1b394a2011-06-03 00:19:47 -0300487
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -0300488 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700489 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
Marcel Holtmann01394182006-07-03 10:02:46 +0200490
Marcel Holtmann2950f212009-02-12 14:02:50 +0100491 conn->disc_reason = 0x13;
492
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700493 l2cap_pi(sk)->conn = conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200494
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700495 if (!l2cap_pi(sk)->fixed_channel &&
496 (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)) {
Ville Tervob62f3282011-02-10 22:38:50 -0300497 if (conn->hcon->type == LE_LINK) {
498 /* LE connection */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700499 if (l2cap_pi(sk)->imtu < L2CAP_LE_DEFAULT_MTU)
500 l2cap_pi(sk)->imtu = L2CAP_LE_DEFAULT_MTU;
501 if (l2cap_pi(sk)->omtu < L2CAP_LE_DEFAULT_MTU)
502 l2cap_pi(sk)->omtu = L2CAP_LE_DEFAULT_MTU;
503
504 l2cap_pi(sk)->scid = L2CAP_CID_LE_DATA;
505 l2cap_pi(sk)->dcid = L2CAP_CID_LE_DATA;
Ville Tervob62f3282011-02-10 22:38:50 -0300506 } else {
507 /* Alloc CID for connection-oriented socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700508 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
509 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Ville Tervob62f3282011-02-10 22:38:50 -0300510 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700511 } else if (sk->sk_type == SOCK_DGRAM) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200512 /* Connectionless socket */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700513 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
514 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
515 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
516 } else if (sk->sk_type == SOCK_RAW) {
Marcel Holtmann01394182006-07-03 10:02:46 +0200517 /* Raw socket can send/recv signalling messages only */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
519 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
520 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann01394182006-07-03 10:02:46 +0200521 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700522 /* Otherwise, do not set scid/dcid/omtu. These will be set up
523 * by l2cap_fixed_channel_config()
524 */
Marcel Holtmann01394182006-07-03 10:02:46 +0200525
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700526 __l2cap_chan_link(l, sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200527}
528
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900529/* Delete channel.
Marcel Holtmann01394182006-07-03 10:02:46 +0200530 * Must be called on the locked socket. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700531void l2cap_chan_del(struct sock *sk, int err)
Marcel Holtmann01394182006-07-03 10:02:46 +0200532{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700533 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann01394182006-07-03 10:02:46 +0200534 struct sock *parent = bt_sk(sk)->parent;
535
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700536 l2cap_sock_clear_timer(sk);
Marcel Holtmann01394182006-07-03 10:02:46 +0200537
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700538 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
Marcel Holtmann01394182006-07-03 10:02:46 +0200539
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +0900540 if (conn) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700541 /* Unlink from channel list */
542 l2cap_chan_unlink(&conn->chan_list, sk);
543 l2cap_pi(sk)->conn = NULL;
544 if (!l2cap_pi(sk)->fixed_channel)
545 hci_conn_put(conn->hcon);
Marcel Holtmann01394182006-07-03 10:02:46 +0200546 }
547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700548 if (l2cap_pi(sk)->ampcon) {
549 l2cap_pi(sk)->ampcon->l2cap_data = NULL;
550 l2cap_pi(sk)->ampcon = NULL;
551 if (l2cap_pi(sk)->ampchan) {
552 hci_chan_put(l2cap_pi(sk)->ampchan);
553 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
554 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
555 l2cap_pi(sk));
556 }
557 l2cap_pi(sk)->ampchan = NULL;
558 l2cap_pi(sk)->amp_id = 0;
559 }
560
561 sk->sk_state = BT_CLOSED;
Marcel Holtmann01394182006-07-03 10:02:46 +0200562 sock_set_flag(sk, SOCK_ZAPPED);
563
564 if (err)
565 sk->sk_err = err;
566
567 if (parent) {
568 bt_accept_unlink(sk);
569 parent->sk_data_ready(parent, 0);
570 } else
571 sk->sk_state_change(sk);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300572
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700573 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovan2ead70b2011-04-01 15:13:36 -0300574
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700575 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
576 if (l2cap_pi(sk)->sdu)
577 kfree_skb(l2cap_pi(sk)->sdu);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300578
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700579 skb_queue_purge(SREJ_QUEUE(sk));
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300580
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700581 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
582 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
583 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300584 }
Marcel Holtmann01394182006-07-03 10:02:46 +0200585}
586
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700587static inline u8 l2cap_get_auth_type(struct sock *sk)
Gustavo F. Padovan6e9e43f2011-04-28 17:55:53 -0300588{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700589 if (sk->sk_type == SOCK_RAW) {
590 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530591 case BT_SECURITY_HIGH:
592 return HCI_AT_DEDICATED_BONDING_MITM;
593 case BT_SECURITY_MEDIUM:
594 return HCI_AT_DEDICATED_BONDING;
595 default:
596 return HCI_AT_NO_BONDING;
597 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700598 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
599 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
600 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
Johan Hedberg8556edd32011-01-19 12:06:50 +0530601
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700602 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
Johan Hedberg8556edd32011-01-19 12:06:50 +0530603 return HCI_AT_NO_BONDING_MITM;
604 else
605 return HCI_AT_NO_BONDING;
606 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700607 switch (l2cap_pi(sk)->sec_level) {
Johan Hedberg8556edd32011-01-19 12:06:50 +0530608 case BT_SECURITY_HIGH:
609 return HCI_AT_GENERAL_BONDING_MITM;
610 case BT_SECURITY_MEDIUM:
611 return HCI_AT_GENERAL_BONDING;
612 default:
613 return HCI_AT_NO_BONDING;
614 }
615 }
616}
617
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200618/* Service level security */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700619static inline int l2cap_check_security(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200620{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700621 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100622 __u8 auth_type;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200623
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700624 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann0684e5f2009-02-09 02:48:38 +0100625
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700626 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
627 auth_type);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200628}
629
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700630u8 l2cap_get_ident(struct l2cap_conn *conn)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200631{
632 u8 id;
633
634 /* Get next available identificator.
635 * 1 - 128 are used by kernel.
636 * 129 - 199 are reserved.
637 * 200 - 254 are used by utilities like l2ping, etc.
638 */
639
640 spin_lock_bh(&conn->lock);
641
642 if (++conn->tx_ident > 128)
643 conn->tx_ident = 1;
644
645 id = conn->tx_ident;
646
647 spin_unlock_bh(&conn->lock);
648
649 return id;
650}
651
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700652static void apply_fcs(struct sk_buff *skb)
653{
654 size_t len;
655 u16 partial_crc;
656 struct sk_buff *iter;
657 struct sk_buff *final_frag = skb;
658
659 if (skb_has_frag_list(skb))
660 len = skb_headlen(skb);
661 else
662 len = skb->len - L2CAP_FCS_SIZE;
663
664 partial_crc = crc16(0, (u8 *) skb->data, len);
665
666 skb_walk_frags(skb, iter) {
667 len = iter->len;
668 if (!iter->next)
669 len -= L2CAP_FCS_SIZE;
670
671 partial_crc = crc16(partial_crc, iter->data, len);
672 final_frag = iter;
673 }
674
675 put_unaligned_le16(partial_crc,
676 final_frag->data + final_frag->len - L2CAP_FCS_SIZE);
677}
678
679void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200680{
681 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200682 u8 flags;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200683
684 BT_DBG("code 0x%2.2x", code);
685
686 if (!skb)
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -0300687 return;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200688
Andrei Emeltchenkoe7021122011-01-03 11:14:36 +0200689 if (lmp_no_flush_capable(conn->hcon->hdev))
690 flags = ACL_START_NO_FLUSH;
691 else
692 flags = ACL_START;
693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700694 bt_cb(skb)->force_active = 1;
Jaikumar Ganesh514abe62011-05-23 18:06:04 -0700695
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700696 hci_send_acl(conn->hcon, NULL, skb, flags);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200697}
698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700699static inline int __l2cap_no_conn_pending(struct sock *sk)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300700{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700701 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -0300702}
703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700704static void l2cap_send_conn_req(struct sock *sk)
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300705{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700706 struct l2cap_conn_req req;
707 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
708 req.psm = l2cap_pi(sk)->psm;
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300709
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700710 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
Gustavo F. Padovan2ab25cd2009-10-03 02:34:40 -0300711
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700712 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
713 L2CAP_CONN_REQ, sizeof(req), &req);
Gustavo F. Padovan7e743092009-08-26 04:04:03 -0300714}
715
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700716static void l2cap_send_create_chan_req(struct sock *sk, u8 amp_id)
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300717{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700718 struct l2cap_create_chan_req req;
719 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
720 req.psm = l2cap_pi(sk)->psm;
721 req.amp_id = amp_id;
722
723 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
724 l2cap_pi(sk)->ident = l2cap_get_ident(l2cap_pi(sk)->conn);
725
726 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
727 L2CAP_CREATE_CHAN_REQ, sizeof(req), &req);
Andrei Emeltchenkoe501d052010-07-08 12:14:41 +0300728}
729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700730static void l2cap_do_start(struct sock *sk)
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200731{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700732 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200733
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
Marcel Holtmann984947d2009-02-06 23:35:19 +0100735 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
736 return;
737
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700738 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
739 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200740
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700741 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
742 amp_create_physical(l2cap_pi(sk)->conn, sk);
743 else
744 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200745 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200746 } else {
747 struct l2cap_info_req req;
748 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
749
750 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
751 conn->info_ident = l2cap_get_ident(conn);
752
753 mod_timer(&conn->info_timer, jiffies +
754 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
755
756 l2cap_send_cmd(conn, conn->info_ident,
757 L2CAP_INFO_REQ, sizeof(req), &req);
758 }
759}
760
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300761static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
762{
763 u32 local_feat_mask = l2cap_feat_mask;
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -0300764 if (!disable_ertm)
Gustavo F. Padovancf6c2c02010-06-07 20:54:45 -0300765 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
766
767 switch (mode) {
768 case L2CAP_MODE_ERTM:
769 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
770 case L2CAP_MODE_STREAMING:
771 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
772 default:
773 return 0x00;
774 }
775}
776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700777void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300778{
779 struct l2cap_disconn_req req;
780
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300781 if (!conn)
782 return;
783
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700784 skb_queue_purge(TX_QUEUE(sk));
Gustavo F. Padovane92c8e72011-04-01 00:53:45 -0300785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700786 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
787 skb_queue_purge(SREJ_QUEUE(sk));
788
789 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
790 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
791 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300792 }
793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700794 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
795 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300796 l2cap_send_cmd(conn, l2cap_get_ident(conn),
797 L2CAP_DISCONN_REQ, sizeof(req), &req);
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -0300798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700799 sk->sk_state = BT_DISCONN;
Gustavo F. Padovan9b108fc2010-05-20 16:21:53 -0300800 sk->sk_err = err;
Gustavo F. Padovan22121fc2009-07-23 10:27:23 -0300801}
802
Linus Torvalds1da177e2005-04-16 15:20:36 -0700803/* ---- L2CAP connections ---- */
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200804static void l2cap_conn_start(struct l2cap_conn *conn)
805{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700806 struct l2cap_chan_list *l = &conn->chan_list;
807 struct sock_del_list del, *tmp1, *tmp2;
808 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200809
810 BT_DBG("conn %p", conn);
811
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700812 INIT_LIST_HEAD(&del.list);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200813
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700814 read_lock(&l->lock);
Gustavo F. Padovanbaa7e1f2011-03-31 16:17:41 -0300815
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700816 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +0200817 bh_lock_sock(sk);
818
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700819 if (sk->sk_type != SOCK_SEQPACKET &&
820 sk->sk_type != SOCK_STREAM) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200821 bh_unlock_sock(sk);
822 continue;
823 }
824
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700825 if (sk->sk_state == BT_CONNECT) {
826 if (!l2cap_check_security(sk) ||
827 !__l2cap_no_conn_pending(sk)) {
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300828 bh_unlock_sock(sk);
829 continue;
Marcel Holtmannb1235d72008-07-14 20:13:54 +0200830 }
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700832 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
833 conn->feat_mask)
834 && l2cap_pi(sk)->conf_state &
835 L2CAP_CONF_STATE2_DEVICE) {
836 tmp1 = kzalloc(sizeof(struct sock_del_list),
837 GFP_ATOMIC);
838 tmp1->sk = sk;
839 list_add_tail(&tmp1->list, &del.list);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300840 bh_unlock_sock(sk);
841 continue;
842 }
843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700844 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700846 if (l2cap_pi(sk)->amp_pref == BT_AMP_POLICY_PREFER_AMP)
847 amp_create_physical(l2cap_pi(sk)->conn, sk);
848 else
849 l2cap_send_conn_req(sk);
Gustavo F. Padovan47731de2010-07-09 16:38:35 -0300850
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700851 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200852 struct l2cap_conn_rsp rsp;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300853 char buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700854 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
855 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200856
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700857 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100858 if (bt_sk(sk)->defer_setup) {
859 struct sock *parent = bt_sk(sk)->parent;
860 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
861 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
Nick Pellyf86e4b02010-04-08 16:23:32 -0700862 if (parent)
863 parent->sk_data_ready(parent, 0);
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100864
865 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700866 sk->sk_state = BT_CONFIG;
Marcel Holtmannf66dc812009-01-15 21:57:00 +0100867 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
868 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
869 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200870 } else {
871 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
872 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
873 }
874
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700875 if (rsp.result == cpu_to_le16(L2CAP_CR_SUCCESS) &&
876 l2cap_pi(sk)->amp_id) {
877 amp_accept_physical(conn,
878 l2cap_pi(sk)->amp_id, sk);
879 bh_unlock_sock(sk);
880 continue;
881 }
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300882
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700883 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
884 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
885
886 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300887 rsp.result != L2CAP_CR_SUCCESS) {
888 bh_unlock_sock(sk);
889 continue;
890 }
891
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700892 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -0300893 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700894 l2cap_build_conf_req(sk, buf), buf);
895 l2cap_pi(sk)->num_conf_req++;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200896 }
897
898 bh_unlock_sock(sk);
899 }
900
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700901 read_unlock(&l->lock);
902
903 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
904 bh_lock_sock(tmp1->sk);
905 __l2cap_sock_close(tmp1->sk, ECONNRESET);
906 bh_unlock_sock(tmp1->sk);
907 list_del(&tmp1->list);
908 kfree(tmp1);
909 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200910}
911
Ville Tervob62f3282011-02-10 22:38:50 -0300912/* Find socket with cid and source bdaddr.
913 * Returns closest match, locked.
914 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700915static struct sock *l2cap_get_sock_by_scid(int state, __le16 cid, bdaddr_t *src)
Ville Tervob62f3282011-02-10 22:38:50 -0300916{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700917 struct sock *sk = NULL, *sk1 = NULL;
918 struct hlist_node *node;
Ville Tervob62f3282011-02-10 22:38:50 -0300919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700920 read_lock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300921
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700922 sk_for_each(sk, node, &l2cap_sk_list.head) {
923 if (state && sk->sk_state != state)
Ville Tervob62f3282011-02-10 22:38:50 -0300924 continue;
925
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700926 if (l2cap_pi(sk)->scid == cid) {
Ville Tervob62f3282011-02-10 22:38:50 -0300927 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700928 if (!bacmp(&bt_sk(sk)->src, src))
929 break;
Ville Tervob62f3282011-02-10 22:38:50 -0300930
931 /* Closest match */
932 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700933 sk1 = sk;
Ville Tervob62f3282011-02-10 22:38:50 -0300934 }
935 }
Gustavo F. Padovan280f2942011-04-13 19:01:22 -0300936
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700937 read_unlock(&l2cap_sk_list.lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300938
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700939 return node ? sk : sk1;
Ville Tervob62f3282011-02-10 22:38:50 -0300940}
941
942static void l2cap_le_conn_ready(struct l2cap_conn *conn)
943{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700944 struct l2cap_chan_list *list = &conn->chan_list;
945 struct sock *parent, *uninitialized_var(sk);
Ville Tervob62f3282011-02-10 22:38:50 -0300946
947 BT_DBG("");
948
949 /* Check if we have socket listening on cid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700950 parent = l2cap_get_sock_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
Ville Tervob62f3282011-02-10 22:38:50 -0300951 conn->src);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700952 if (!parent)
Ville Tervob62f3282011-02-10 22:38:50 -0300953 return;
954
Gustavo F. Padovan62f3a2c2011-04-14 18:34:34 -0300955 bh_lock_sock(parent);
956
Ville Tervob62f3282011-02-10 22:38:50 -0300957 /* Check for backlog size */
958 if (sk_acceptq_is_full(parent)) {
959 BT_DBG("backlog full %d", parent->sk_ack_backlog);
960 goto clean;
961 }
962
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700963 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
964 if (!sk)
Ville Tervob62f3282011-02-10 22:38:50 -0300965 goto clean;
966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700967 write_lock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300968
969 hci_conn_hold(conn->hcon);
970
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700971 l2cap_sock_init(sk, parent);
Ville Tervob62f3282011-02-10 22:38:50 -0300972 bacpy(&bt_sk(sk)->src, conn->src);
973 bacpy(&bt_sk(sk)->dst, conn->dst);
974
Gustavo F. Padovand1010242011-03-25 00:39:48 -0300975 bt_accept_enqueue(parent, sk);
976
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700977 __l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -0300978
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700979 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Ville Tervob62f3282011-02-10 22:38:50 -0300980
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700981 sk->sk_state = BT_CONNECTED;
Ville Tervob62f3282011-02-10 22:38:50 -0300982 parent->sk_data_ready(parent, 0);
983
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700984 write_unlock_bh(&list->lock);
Ville Tervob62f3282011-02-10 22:38:50 -0300985
986clean:
987 bh_unlock_sock(parent);
988}
989
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200990static void l2cap_conn_ready(struct l2cap_conn *conn)
991{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700992 struct l2cap_chan_list *l = &conn->chan_list;
993 struct sock *sk;
Marcel Holtmann79d554a2008-07-14 20:13:44 +0200994
995 BT_DBG("conn %p", conn);
996
Ville Tervob62f3282011-02-10 22:38:50 -0300997 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
998 l2cap_le_conn_ready(conn);
999
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001000 read_lock(&l->lock);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001001
Brian Gixa68668b2011-08-11 15:49:36 -07001002 if (l->head) {
1003 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1004 bh_lock_sock(sk);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02001005
Brian Gixa68668b2011-08-11 15:49:36 -07001006 if (conn->hcon->type == LE_LINK) {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001007 u8 sec_level = l2cap_pi(sk)->sec_level;
1008 u8 pending_sec = conn->hcon->pending_sec_level;
1009
1010 if (pending_sec > sec_level)
1011 sec_level = pending_sec;
1012
Brian Gix065e8ff2011-09-29 15:14:08 -07001013 if (smp_conn_security(conn, sec_level)) {
Brian Gixa68668b2011-08-11 15:49:36 -07001014 l2cap_chan_ready(sk);
Brian Gix065e8ff2011-09-29 15:14:08 -07001015 hci_conn_put(conn->hcon);
1016 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001017
Brian Gixa68668b2011-08-11 15:49:36 -07001018 } else if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1022 sk->sk_state_change(sk);
1023 } else if (sk->sk_state == BT_CONNECT)
1024 l2cap_do_start(sk);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001025
Brian Gixa68668b2011-08-11 15:49:36 -07001026 bh_unlock_sock(sk);
1027 }
1028 } else if (conn->hcon->type == LE_LINK) {
1029 smp_conn_security(conn, BT_SECURITY_HIGH);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001030 }
1031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001032 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001033}
1034
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001035/* Notify sockets that we cannot guaranty reliability anymore */
1036static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
1037{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001038 struct l2cap_chan_list *l = &conn->chan_list;
1039 struct sock *sk;
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001040
1041 BT_DBG("conn %p", conn);
1042
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001043 read_lock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001044
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001045 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
1046 if (l2cap_pi(sk)->force_reliable)
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001047 sk->sk_err = err;
1048 }
1049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001050 read_unlock(&l->lock);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001051}
1052
1053static void l2cap_info_timeout(unsigned long arg)
1054{
1055 struct l2cap_conn *conn = (void *) arg;
1056
Marcel Holtmann984947d2009-02-06 23:35:19 +01001057 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01001058 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01001059
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001060 l2cap_conn_start(conn);
1061}
1062
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1064{
Marcel Holtmann01394182006-07-03 10:02:46 +02001065 struct l2cap_conn *conn = hcon->l2cap_data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
Marcel Holtmann01394182006-07-03 10:02:46 +02001067 if (conn || status)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 return conn;
1069
Marcel Holtmann01394182006-07-03 10:02:46 +02001070 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1071 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001072 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073
1074 hcon->l2cap_data = conn;
1075 conn->hcon = hcon;
1076
Marcel Holtmann01394182006-07-03 10:02:46 +02001077 BT_DBG("hcon %p conn %p", hcon, conn);
1078
Ville Tervoacd7d372011-02-10 22:38:49 -03001079 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1080 conn->mtu = hcon->hdev->le_mtu;
1081 else
1082 conn->mtu = hcon->hdev->acl_mtu;
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 conn->src = &hcon->hdev->bdaddr;
1085 conn->dst = &hcon->dst;
1086
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02001087 conn->feat_mask = 0;
1088
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 spin_lock_init(&conn->lock);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001090 rwlock_init(&conn->chan_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001091
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001092 if (hcon->type == LE_LINK)
Brian Gixe9ceb522011-09-22 10:46:35 -07001093 setup_timer(&hcon->smp_timer, smp_timeout,
Vinicius Costa Gomesb19d5ce2011-06-14 13:37:41 -03001094 (unsigned long) conn);
1095 else
Ville Tervob62f3282011-02-10 22:38:50 -03001096 setup_timer(&conn->info_timer, l2cap_info_timeout,
Dave Young45054dc2009-10-18 20:28:30 +00001097 (unsigned long) conn);
1098
Marcel Holtmann2950f212009-02-12 14:02:50 +01001099 conn->disc_reason = 0x13;
1100
Linus Torvalds1da177e2005-04-16 15:20:36 -07001101 return conn;
1102}
1103
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001104static void l2cap_conn_del(struct hci_conn *hcon, int err)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001105{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001106 struct l2cap_conn *conn = hcon->l2cap_data;
1107 struct sock *sk;
1108 struct sock *next;
1109
1110 if (!conn)
1111 return;
1112
1113 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1114
1115 if ((conn->hcon == hcon) && (conn->rx_skb))
1116 kfree_skb(conn->rx_skb);
1117
1118 BT_DBG("conn->hcon %p", conn->hcon);
1119
1120 /* Kill channels */
1121 for (sk = conn->chan_list.head; sk; ) {
1122 BT_DBG("ampcon %p", l2cap_pi(sk)->ampcon);
1123 if ((conn->hcon == hcon) || (l2cap_pi(sk)->ampcon == hcon)) {
1124 next = l2cap_pi(sk)->next_c;
1125 bh_lock_sock(sk);
1126 l2cap_chan_del(sk, err);
1127 bh_unlock_sock(sk);
1128 l2cap_sock_kill(sk);
1129 sk = next;
1130 } else
1131 sk = l2cap_pi(sk)->next_c;
1132 }
1133
1134 if (conn->hcon == hcon) {
1135 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1136 del_timer_sync(&conn->info_timer);
1137
1138 hcon->l2cap_data = NULL;
1139
1140 kfree(conn);
1141 }
1142}
1143
1144static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk)
1145{
1146 struct l2cap_chan_list *l = &conn->chan_list;
1147 write_lock_bh(&l->lock);
1148 __l2cap_chan_add(conn, sk);
1149 write_unlock_bh(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001150}
1151
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152/* ---- Socket interface ---- */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001153
1154/* Find socket with psm and source bdaddr.
1155 * Returns closest match.
1156 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001157static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001159 struct sock *sk = NULL, *sk1 = NULL;
1160 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001162 read_lock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001163
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001164 sk_for_each(sk, node, &l2cap_sk_list.head) {
1165 if (state && sk->sk_state != state)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001166 continue;
1167
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001168 if (l2cap_pi(sk)->psm == psm) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169 /* Exact match. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001170 if (!bacmp(&bt_sk(sk)->src, src))
1171 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001172
1173 /* Closest match */
1174 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001175 sk1 = sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176 }
1177 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001178
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001179 read_unlock(&l2cap_sk_list.lock);
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00001180
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001181 return node ? sk : sk1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001182}
1183
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001184int l2cap_do_connect(struct sock *sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185{
1186 bdaddr_t *src = &bt_sk(sk)->src;
1187 bdaddr_t *dst = &bt_sk(sk)->dst;
1188 struct l2cap_conn *conn;
1189 struct hci_conn *hcon;
1190 struct hci_dev *hdev;
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001191 __u8 auth_type;
Marcel Holtmann44d0e482009-04-20 07:09:16 +02001192 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193
Marcel Holtmannf29972d2009-02-12 05:07:45 +01001194 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001195 l2cap_pi(sk)->psm);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001196
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03001197 hdev = hci_get_route(dst, src);
1198 if (!hdev)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001199 return -EHOSTUNREACH;
1200
1201 hci_dev_lock_bh(hdev);
1202
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001203 auth_type = l2cap_get_auth_type(sk);
Marcel Holtmann09ab6f42008-09-09 07:19:20 +02001204
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001205 if (l2cap_pi(sk)->fixed_channel) {
1206 /* Fixed channels piggyback on existing ACL connections */
1207 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, dst);
1208 if (!hcon || !hcon->l2cap_data) {
1209 err = -ENOTCONN;
1210 goto done;
1211 }
Ville Tervoacd7d372011-02-10 22:38:49 -03001212
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001213 conn = hcon->l2cap_data;
1214 } else {
Brian Gix2e2f50d2011-09-13 12:36:04 -07001215 if (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001216 hcon = hci_connect(hdev, LE_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001217 l2cap_pi(sk)->sec_level, auth_type);
Brian Gix2e2f50d2011-09-13 12:36:04 -07001218 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001219 hcon = hci_connect(hdev, ACL_LINK, 0, dst,
Brian Gixdfdd9362011-08-18 09:58:02 -07001220 l2cap_pi(sk)->sec_level, auth_type);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001222 if (IS_ERR(hcon)) {
1223 err = PTR_ERR(hcon);
1224 goto done;
1225 }
1226
1227 conn = l2cap_conn_add(hcon, 0);
1228 if (!conn) {
1229 hci_conn_put(hcon);
1230 err = -ENOMEM;
1231 goto done;
1232 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 /* Update source addr of the socket */
1236 bacpy(src, conn->src);
1237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001238 l2cap_chan_add(conn, sk);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03001239
Brian Gixa68668b2011-08-11 15:49:36 -07001240 if ((l2cap_pi(sk)->fixed_channel) ||
1241 (l2cap_pi(sk)->dcid == L2CAP_CID_LE_DATA &&
1242 hcon->state == BT_CONNECTED)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001243 sk->sk_state = BT_CONNECTED;
1244 sk->sk_state_change(sk);
1245 } else {
1246 sk->sk_state = BT_CONNECT;
1247 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1248 sk->sk_state_change(sk);
1249
1250 if (hcon->state == BT_CONNECTED) {
1251 if (sk->sk_type != SOCK_SEQPACKET &&
1252 sk->sk_type != SOCK_STREAM) {
1253 l2cap_sock_clear_timer(sk);
1254 if (l2cap_check_security(sk)) {
1255 sk->sk_state = BT_CONNECTED;
1256 sk->sk_state_change(sk);
1257 }
1258 } else
1259 l2cap_do_start(sk);
1260 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261 }
1262
Ville Tervo30e76272011-02-22 16:10:53 -03001263 err = 0;
1264
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265done:
1266 hci_dev_unlock_bh(hdev);
1267 hci_dev_put(hdev);
1268 return err;
1269}
1270
Gustavo F. Padovandcba0db2011-02-04 03:08:36 -02001271int __l2cap_wait_ack(struct sock *sk)
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001272{
1273 DECLARE_WAITQUEUE(wait, current);
1274 int err = 0;
1275 int timeo = HZ/5;
1276
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001277 add_wait_queue(sk_sleep(sk), &wait);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001278 while (l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn &&
1279 atomic_read(&l2cap_pi(sk)->ertm_queued)) {
1280 set_current_state(TASK_INTERRUPTIBLE);
1281
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001282 if (!timeo)
1283 timeo = HZ/5;
1284
1285 if (signal_pending(current)) {
1286 err = sock_intr_errno(timeo);
1287 break;
1288 }
1289
1290 release_sock(sk);
1291 timeo = schedule_timeout(timeo);
1292 lock_sock(sk);
1293
1294 err = sock_error(sk);
1295 if (err)
1296 break;
1297 }
1298 set_current_state(TASK_RUNNING);
Marcel Holtmann2b0b05d2010-05-10 11:33:10 +02001299 remove_wait_queue(sk_sleep(sk), &wait);
Gustavo F. Padovan6161c032010-05-01 16:15:44 -03001300 return err;
1301}
1302
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001303static void l2cap_ertm_tx_worker(struct work_struct *work)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001304{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001305 struct l2cap_pinfo *pi =
1306 container_of(work, struct l2cap_pinfo, tx_work);
1307 struct sock *sk = (struct sock *)pi;
1308 BT_DBG("%p", pi);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001309
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001310 lock_sock(sk);
1311 l2cap_ertm_send(sk);
1312 release_sock(sk);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001313}
1314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001315static void l2cap_skb_destructor(struct sk_buff *skb)
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001316{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001317 struct sock *sk = skb->sk;
1318 int queued;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001319
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001320 queued = atomic_sub_return(1, &l2cap_pi(sk)->ertm_queued);
1321 if (queued < L2CAP_MIN_ERTM_QUEUED)
1322 queue_work(_l2cap_wq, &l2cap_pi(sk)->tx_work);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001323}
1324
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001325void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001326{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001327 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001328
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001329 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001330
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001331 if (pi->ampcon && (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
1332 pi->amp_move_state == L2CAP_AMP_STATE_WAIT_PREPARE)) {
1333 BT_DBG("Sending on AMP connection %p %p",
1334 pi->ampcon, pi->ampchan);
1335 if (pi->ampchan)
1336 hci_send_acl(pi->ampcon, pi->ampchan, skb,
1337 ACL_COMPLETE);
1338 else
1339 kfree_skb(skb);
1340 } else {
1341 u16 flags;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001343 bt_cb(skb)->force_active = pi->force_active;
1344 BT_DBG("Sending on BR/EDR connection %p", pi->conn->hcon);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001346 if (lmp_no_flush_capable(pi->conn->hcon->hdev) &&
1347 !l2cap_pi(sk)->flushable)
1348 flags = ACL_START_NO_FLUSH;
1349 else
1350 flags = ACL_START;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001351
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001352 hci_send_acl(pi->conn->hcon, NULL, skb, flags);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001353 }
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03001354}
1355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001356int l2cap_ertm_send(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001357{
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001358 struct sk_buff *skb, *tx_skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001359 struct l2cap_pinfo *pi = l2cap_pi(sk);
1360 struct bt_l2cap_control *control;
1361 int sent = 0;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03001362
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001363 BT_DBG("sk %p", sk);
Gustavo F. Padovanf11d6762010-05-01 16:15:44 -03001364
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001365 if (sk->sk_state != BT_CONNECTED)
Gustavo F. Padovanc13ffa62010-05-13 20:50:12 -03001366 return -ENOTCONN;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001367
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001368 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
1369 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001370
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001371 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1372 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1373 return 0;
1374
1375 while (sk->sk_send_head && (pi->unacked_frames < pi->remote_tx_win) &&
1376 atomic_read(&pi->ertm_queued) < L2CAP_MAX_ERTM_QUEUED &&
1377 (pi->tx_state == L2CAP_ERTM_TX_STATE_XMIT)) {
1378
1379 skb = sk->sk_send_head;
1380
1381 bt_cb(skb)->retries = 1;
1382 control = &bt_cb(skb)->control;
1383
1384 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1385 control->final = 1;
1386 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1387 }
1388 control->reqseq = pi->buffer_seq;
1389 pi->last_acked_seq = pi->buffer_seq;
1390 control->txseq = pi->next_tx_seq;
1391
1392 if (pi->extended_control) {
1393 put_unaligned_le32(__pack_extended_control(control),
1394 skb->data + L2CAP_HDR_SIZE);
1395 } else {
1396 put_unaligned_le16(__pack_enhanced_control(control),
1397 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001398 }
1399
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001400 if (pi->fcs == L2CAP_FCS_CRC16)
1401 apply_fcs(skb);
1402
1403 /* Clone after data has been modified. Data is assumed to be
1404 read-only (for locking purposes) on cloned sk_buffs.
1405 */
Andrei Emeltchenkoe420aba2009-12-23 13:07:14 +02001406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001408 tx_skb->sk = sk;
1409 tx_skb->destructor = l2cap_skb_destructor;
1410 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001412 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan95ffa972010-06-18 20:37:33 -03001413
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001414 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovane299c1c2011-06-10 21:28:49 -03001415
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001416 l2cap_ertm_start_retrans_timer(pi);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001417
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001418 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1419 pi->unacked_frames += 1;
1420 pi->frames_sent += 1;
1421 sent += 1;
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001423 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1424 sk->sk_send_head = NULL;
1425 else
1426 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1427 }
1428
1429 BT_DBG("Sent %d, %d unacked, %d in ERTM queue, %d in HCI queue", sent,
1430 (int) pi->unacked_frames, skb_queue_len(TX_QUEUE(sk)),
1431 atomic_read(&pi->ertm_queued));
1432
1433 return sent;
1434}
1435
1436int l2cap_strm_tx(struct sock *sk, struct sk_buff_head *skbs)
1437{
1438 struct sk_buff *skb;
1439 struct l2cap_pinfo *pi = l2cap_pi(sk);
1440 struct bt_l2cap_control *control;
1441 int sent = 0;
1442
1443 BT_DBG("sk %p, skbs %p", sk, skbs);
1444
1445 if (sk->sk_state != BT_CONNECTED)
1446 return -ENOTCONN;
1447
1448 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1449 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
1450 return 0;
1451
1452 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
1453
1454 BT_DBG("skb queue empty 0x%2.2x", skb_queue_empty(TX_QUEUE(sk)));
1455 while (!skb_queue_empty(TX_QUEUE(sk))) {
1456
1457 skb = skb_dequeue(TX_QUEUE(sk));
1458
1459 BT_DBG("skb %p", skb);
1460
1461 bt_cb(skb)->retries = 1;
1462 control = &bt_cb(skb)->control;
1463
1464 BT_DBG("control %p", control);
1465
1466 control->reqseq = 0;
1467 control->txseq = pi->next_tx_seq;
1468
1469 if (pi->extended_control) {
1470 put_unaligned_le32(__pack_extended_control(control),
1471 skb->data + L2CAP_HDR_SIZE);
1472 } else {
1473 put_unaligned_le16(__pack_enhanced_control(control),
1474 skb->data + L2CAP_HDR_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001475 }
1476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001477 if (pi->fcs == L2CAP_FCS_CRC16)
1478 apply_fcs(skb);
Gustavo F. Padovan9a9c6a32010-05-01 16:15:43 -03001479
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001480 l2cap_do_send(sk, skb);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001482 BT_DBG("Sent txseq %d", (int)control->txseq);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001484 pi->next_tx_seq = __next_seq(pi->next_tx_seq, pi);
1485 pi->frames_sent += 1;
1486 sent += 1;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001487 }
1488
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001489 BT_DBG("Sent %d", sent);
1490
1491 return 0;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001492}
1493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001494static int memcpy_fromkvec(unsigned char *kdata, struct kvec *iv, int len)
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001495{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001496 while (len > 0) {
1497 if (iv->iov_len) {
1498 int copy = min_t(unsigned int, len, iv->iov_len);
1499 memcpy(kdata, iv->iov_base, copy);
1500 len -= copy;
1501 kdata += copy;
1502 iv->iov_base += copy;
1503 iv->iov_len -= copy;
1504 }
1505 iv++;
Gustavo F. Padovan9e917af2010-05-01 16:15:37 -03001506 }
Gustavo F. Padovandfc909b2010-05-01 16:15:45 -03001507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001508 return 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001509}
1510
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001511static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg,
1512 int len, int count, struct sk_buff *skb,
1513 int reseg)
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03001514{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001515 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001516 struct sk_buff **frag;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001517 struct sk_buff *final;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001518 int err, sent = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001519
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001520 BT_DBG("sk %p, msg %p, len %d, count %d, skb %p", sk,
1521 msg, (int)len, (int)count, skb);
1522
1523 if (!conn)
1524 return -ENOTCONN;
1525
1526 /* When resegmenting, data is copied from kernel space */
1527 if (reseg) {
1528 err = memcpy_fromkvec(skb_put(skb, count),
1529 (struct kvec *) msg->msg_iov, count);
1530 } else {
1531 err = memcpy_fromiovec(skb_put(skb, count), msg->msg_iov,
1532 count);
1533 }
1534
1535 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001536 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 sent += count;
1539 len -= count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001540 final = skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 /* Continuation fragments (no L2CAP header) */
1543 frag = &skb_shinfo(skb)->frag_list;
1544 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001545 int skblen;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 count = min_t(unsigned int, conn->mtu, len);
1547
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001548 /* Add room for the FCS if it fits */
1549 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16 &&
1550 len + L2CAP_FCS_SIZE <= conn->mtu)
1551 skblen = count + L2CAP_FCS_SIZE;
1552 else
1553 skblen = count;
1554
1555 /* Don't use bt_skb_send_alloc() while resegmenting, since
1556 * it is not ok to block.
1557 */
1558 if (reseg) {
1559 *frag = bt_skb_alloc(skblen, GFP_ATOMIC);
1560 if (*frag)
1561 skb_set_owner_w(*frag, sk);
1562 } else {
1563 *frag = bt_skb_send_alloc(sk, skblen,
1564 msg->msg_flags & MSG_DONTWAIT, &err);
1565 }
1566
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 if (!*frag)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001568 return -EFAULT;
1569
1570 /* When resegmenting, data is copied from kernel space */
1571 if (reseg) {
1572 err = memcpy_fromkvec(skb_put(*frag, count),
1573 (struct kvec *) msg->msg_iov,
1574 count);
1575 } else {
1576 err = memcpy_fromiovec(skb_put(*frag, count),
1577 msg->msg_iov, count);
1578 }
1579
1580 if (err)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001581 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001582
1583 sent += count;
1584 len -= count;
1585
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001586 final = *frag;
1587
Linus Torvalds1da177e2005-04-16 15:20:36 -07001588 frag = &(*frag)->next;
1589 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001591 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16) {
1592 if (skb_tailroom(final) < L2CAP_FCS_SIZE) {
1593 if (reseg) {
1594 *frag = bt_skb_alloc(L2CAP_FCS_SIZE,
1595 GFP_ATOMIC);
1596 if (*frag)
1597 skb_set_owner_w(*frag, sk);
1598 } else {
1599 *frag = bt_skb_send_alloc(sk, L2CAP_FCS_SIZE,
1600 msg->msg_flags & MSG_DONTWAIT,
1601 &err);
1602 }
1603
1604 if (!*frag)
1605 return -EFAULT;
1606
1607 final = *frag;
1608 }
1609
1610 skb_put(final, L2CAP_FCS_SIZE);
1611 }
1612
Linus Torvalds1da177e2005-04-16 15:20:36 -07001613 return sent;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001614}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001615
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001616struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001617{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001618 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001619 struct sk_buff *skb;
1620 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1621 struct l2cap_hdr *lh;
1622
1623 BT_DBG("sk %p len %d", sk, (int)len);
1624
1625 count = min_t(unsigned int, (conn->mtu - hlen), len);
1626 skb = bt_skb_send_alloc(sk, count + hlen,
1627 msg->msg_flags & MSG_DONTWAIT, &err);
1628 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001629 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001630
1631 /* Create L2CAP header */
1632 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001633 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001634 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001635 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001636
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001637 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001638 if (unlikely(err < 0)) {
1639 kfree_skb(skb);
1640 return ERR_PTR(err);
1641 }
1642 return skb;
1643}
1644
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001645struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001646{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001647 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001648 struct sk_buff *skb;
1649 int err, count, hlen = L2CAP_HDR_SIZE;
1650 struct l2cap_hdr *lh;
1651
1652 BT_DBG("sk %p len %d", sk, (int)len);
1653
1654 count = min_t(unsigned int, (conn->mtu - hlen), len);
1655 skb = bt_skb_send_alloc(sk, count + hlen,
1656 msg->msg_flags & MSG_DONTWAIT, &err);
1657 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001658 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001659
1660 /* Create L2CAP header */
1661 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001662 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001663 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1664
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001665 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, 0);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001666 if (unlikely(err < 0)) {
1667 kfree_skb(skb);
1668 return ERR_PTR(err);
1669 }
1670 return skb;
1671}
1672
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001673struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk,
1674 struct msghdr *msg, size_t len,
1675 u16 sdulen, int reseg)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001676{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001677 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001678 int err, count, hlen;
1679 int reserve = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001680 struct l2cap_hdr *lh;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001681 u8 fcs = l2cap_pi(sk)->fcs;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001683 if (l2cap_pi(sk)->extended_control)
1684 hlen = L2CAP_EXTENDED_HDR_SIZE;
1685 else
1686 hlen = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovan0ee0d202010-05-01 16:15:41 -03001687
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001688 if (sdulen)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001689 hlen += L2CAP_SDULEN_SIZE;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001690
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001691 if (fcs == L2CAP_FCS_CRC16)
1692 hlen += L2CAP_FCS_SIZE;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03001693
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001694 BT_DBG("sk %p, msg %p, len %d, sdulen %d, hlen %d",
1695 sk, msg, (int)len, (int)sdulen, hlen);
1696
1697 count = min_t(unsigned int, (l2cap_pi(sk)->conn->mtu - hlen), len);
1698
1699 /* Allocate extra headroom for Qualcomm PAL. This is only
1700 * necessary in two places (here and when creating sframes)
1701 * because only unfragmented iframes and sframes are sent
1702 * using AMP controllers.
1703 */
1704 if (l2cap_pi(sk)->ampcon &&
1705 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1706 reserve = BT_SKB_RESERVE_80211;
1707
1708 /* Don't use bt_skb_send_alloc() while resegmenting, since
1709 * it is not ok to block.
1710 */
1711 if (reseg) {
1712 skb = bt_skb_alloc(count + hlen + reserve, GFP_ATOMIC);
1713 if (skb)
1714 skb_set_owner_w(skb, sk);
1715 } else {
1716 skb = bt_skb_send_alloc(sk, count + hlen + reserve,
1717 msg->msg_flags & MSG_DONTWAIT, &err);
1718 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001719 if (!skb)
Gustavo F. Padovan0175d622010-09-24 20:30:57 -03001720 return ERR_PTR(err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001721
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001722 if (reserve)
1723 skb_reserve(skb, reserve);
1724
1725 bt_cb(skb)->control.fcs = fcs;
1726
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001727 /* Create L2CAP header */
1728 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001729 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1730 lh->len = cpu_to_le16(len + hlen - L2CAP_HDR_SIZE);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001731
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001732 /* Control header is populated later */
1733 if (l2cap_pi(sk)->extended_control)
1734 put_unaligned_le32(0, skb_put(skb, 4));
1735 else
1736 put_unaligned_le16(0, skb_put(skb, 2));
1737
1738 if (sdulen)
1739 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1740
1741 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb, reseg);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001742 if (unlikely(err < 0)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001743 BT_DBG("err %d", err);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001744 kfree_skb(skb);
1745 return ERR_PTR(err);
1746 }
Gustavo F. Padovane90bac02009-08-20 22:26:00 -03001747
1748 bt_cb(skb)->retries = 0;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03001749 return skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750}
1751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001752static void l2cap_ertm_process_reqseq(struct sock *sk, u16 reqseq)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001753{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001754 struct l2cap_pinfo *pi;
1755 struct sk_buff *acked_skb;
1756 u16 ackseq;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001757
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001758 BT_DBG("sk %p, reqseq %d", sk, (int) reqseq);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001759
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001760 pi = l2cap_pi(sk);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001762 if (pi->unacked_frames == 0 || reqseq == pi->expected_ack_seq)
1763 return;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001764
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001765 BT_DBG("expected_ack_seq %d, unacked_frames %d",
1766 (int) pi->expected_ack_seq, (int) pi->unacked_frames);
1767
1768 for (ackseq = pi->expected_ack_seq; ackseq != reqseq;
1769 ackseq = __next_seq(ackseq, pi)) {
1770
1771 acked_skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), ackseq);
1772 if (acked_skb) {
1773 skb_unlink(acked_skb, TX_QUEUE(sk));
1774 kfree_skb(acked_skb);
1775 pi->unacked_frames--;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001776 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001777 }
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001779 pi->expected_ack_seq = reqseq;
1780
1781 if (pi->unacked_frames == 0)
1782 l2cap_ertm_stop_retrans_timer(pi);
1783
1784 BT_DBG("unacked_frames %d", (int) pi->unacked_frames);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03001785}
1786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001787static struct sk_buff *l2cap_create_sframe_pdu(struct sock *sk, u32 control)
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001788{
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001789 struct sk_buff *skb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001790 int len;
1791 int reserve = 0;
1792 struct l2cap_hdr *lh;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001793
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001794 if (l2cap_pi(sk)->extended_control)
1795 len = L2CAP_EXTENDED_HDR_SIZE;
1796 else
1797 len = L2CAP_ENHANCED_HDR_SIZE;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001798
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001799 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1800 len += L2CAP_FCS_SIZE;
1801
1802 /* Allocate extra headroom for Qualcomm PAL */
1803 if (l2cap_pi(sk)->ampcon &&
1804 l2cap_pi(sk)->ampcon->hdev->manufacturer == 0x001d)
1805 reserve = BT_SKB_RESERVE_80211;
1806
1807 skb = bt_skb_alloc(len + reserve, GFP_ATOMIC);
1808
1809 if (!skb)
1810 return ERR_PTR(-ENOMEM);
1811
1812 if (reserve)
1813 skb_reserve(skb, reserve);
1814
1815 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1816 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1817 lh->len = cpu_to_le16(len - L2CAP_HDR_SIZE);
1818
1819 if (l2cap_pi(sk)->extended_control)
1820 put_unaligned_le32(control, skb_put(skb, 4));
1821 else
1822 put_unaligned_le16(control, skb_put(skb, 2));
1823
1824 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16) {
1825 u16 fcs = crc16(0, (u8 *) skb->data, skb->len);
1826 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001827 }
1828
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001829 return skb;
1830}
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001831
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001832static void l2cap_ertm_send_sframe(struct sock *sk,
1833 struct bt_l2cap_control *control)
1834{
1835 struct l2cap_pinfo *pi;
1836 struct sk_buff *skb;
1837 u32 control_field;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001838
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001839 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001840
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001841 if (control->frame_type != 's')
1842 return;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001843
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001844 pi = l2cap_pi(sk);
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001845
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001846 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
1847 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE &&
1848 pi->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
1849 BT_DBG("AMP error - attempted S-Frame send during AMP move");
1850 return;
1851 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001852
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001853 if ((pi->conn_state & L2CAP_CONN_SEND_FBIT) && !control->poll) {
1854 control->final = 1;
1855 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1856 }
1857
1858 if (control->super == L2CAP_SFRAME_RR)
1859 pi->conn_state &= ~L2CAP_CONN_SENT_RNR;
1860 else if (control->super == L2CAP_SFRAME_RNR)
1861 pi->conn_state |= L2CAP_CONN_SENT_RNR;
1862
1863 if (control->super != L2CAP_SFRAME_SREJ) {
1864 pi->last_acked_seq = control->reqseq;
1865 l2cap_ertm_stop_ack_timer(pi);
1866 }
1867
1868 BT_DBG("reqseq %d, final %d, poll %d, super %d", (int) control->reqseq,
1869 (int) control->final, (int) control->poll,
1870 (int) control->super);
1871
1872 if (pi->extended_control)
1873 control_field = __pack_extended_control(control);
1874 else
1875 control_field = __pack_enhanced_control(control);
1876
1877 skb = l2cap_create_sframe_pdu(sk, control_field);
1878 if (!IS_ERR(skb))
1879 l2cap_do_send(sk, skb);
1880}
1881
1882static void l2cap_ertm_send_ack(struct sock *sk)
1883{
1884 struct l2cap_pinfo *pi = l2cap_pi(sk);
1885 struct bt_l2cap_control control;
1886 u16 frames_to_ack = __delta_seq(pi->buffer_seq, pi->last_acked_seq, pi);
1887 int threshold;
1888
1889 BT_DBG("sk %p", sk);
1890 BT_DBG("last_acked_seq %d, buffer_seq %d", (int)pi->last_acked_seq,
1891 (int)pi->buffer_seq);
1892
1893 memset(&control, 0, sizeof(control));
1894 control.frame_type = 's';
1895
1896 if ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
1897 pi->rx_state == L2CAP_ERTM_RX_STATE_RECV) {
1898 l2cap_ertm_stop_ack_timer(pi);
1899 control.super = L2CAP_SFRAME_RNR;
1900 control.reqseq = pi->buffer_seq;
1901 l2cap_ertm_send_sframe(sk, &control);
1902 } else {
1903 if (!(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1904 l2cap_ertm_send(sk);
1905 /* If any i-frames were sent, they included an ack */
1906 if (pi->buffer_seq == pi->last_acked_seq)
1907 frames_to_ack = 0;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03001908 }
1909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001910 /* Ack now if the tx window is 3/4ths full.
1911 * Calculate without mul or div
1912 */
1913 threshold = pi->tx_win;
1914 threshold += threshold << 1;
1915 threshold >>= 2;
1916
1917 BT_DBG("frames_to_ack %d, threshold %d", (int)frames_to_ack,
1918 threshold);
1919
1920 if (frames_to_ack >= threshold) {
1921 l2cap_ertm_stop_ack_timer(pi);
1922 control.super = L2CAP_SFRAME_RR;
1923 control.reqseq = pi->buffer_seq;
1924 l2cap_ertm_send_sframe(sk, &control);
1925 frames_to_ack = 0;
1926 }
1927
1928 if (frames_to_ack)
1929 l2cap_ertm_start_ack_timer(pi);
1930 }
1931}
1932
1933static void l2cap_ertm_send_rr_or_rnr(struct sock *sk, bool poll)
1934{
1935 struct l2cap_pinfo *pi;
1936 struct bt_l2cap_control control;
1937
1938 BT_DBG("sk %p, poll %d", sk, (int) poll);
1939
1940 pi = l2cap_pi(sk);
1941
1942 memset(&control, 0, sizeof(control));
1943 control.frame_type = 's';
1944 control.poll = poll;
1945
1946 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
1947 control.super = L2CAP_SFRAME_RNR;
1948 else
1949 control.super = L2CAP_SFRAME_RR;
1950
1951 control.reqseq = pi->buffer_seq;
1952 l2cap_ertm_send_sframe(sk, &control);
1953}
1954
1955static void l2cap_ertm_send_i_or_rr_or_rnr(struct sock *sk)
1956{
1957 struct l2cap_pinfo *pi;
1958 struct bt_l2cap_control control;
1959
1960 BT_DBG("sk %p", sk);
1961
1962 pi = l2cap_pi(sk);
1963
1964 memset(&control, 0, sizeof(control));
1965 control.frame_type = 's';
1966 control.final = 1;
1967 control.reqseq = pi->buffer_seq;
1968 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
1969
1970 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1971 control.super = L2CAP_SFRAME_RNR;
1972 l2cap_ertm_send_sframe(sk, &control);
1973 }
1974
1975 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1976 (pi->unacked_frames > 0))
1977 l2cap_ertm_start_retrans_timer(pi);
1978
1979 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
1980
1981 /* Send pending iframes */
1982 l2cap_ertm_send(sk);
1983
1984 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1985 /* F-bit wasn't sent in an s-frame or i-frame yet, so
1986 * send it now.
1987 */
1988 control.super = L2CAP_SFRAME_RR;
1989 l2cap_ertm_send_sframe(sk, &control);
1990 }
1991}
1992
1993static void l2cap_ertm_send_srej(struct sock *sk, u16 txseq)
1994{
1995 struct bt_l2cap_control control;
1996 struct l2cap_pinfo *pi;
1997 u16 seq;
1998
1999 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
2000
2001 pi = l2cap_pi(sk);
2002 memset(&control, 0, sizeof(control));
2003 control.frame_type = 's';
2004 control.super = L2CAP_SFRAME_SREJ;
2005
2006 for (seq = pi->expected_tx_seq; seq != txseq;
2007 seq = __next_seq(seq, pi)) {
2008 if (!l2cap_ertm_seq_in_queue(SREJ_QUEUE(pi), seq)) {
2009 control.reqseq = seq;
2010 l2cap_ertm_send_sframe(sk, &control);
2011 l2cap_seq_list_append(&pi->srej_list, seq);
2012 }
2013 }
2014
2015 pi->expected_tx_seq = __next_seq(txseq, pi);
2016}
2017
2018static void l2cap_ertm_send_srej_tail(struct sock *sk)
2019{
2020 struct bt_l2cap_control control;
2021 struct l2cap_pinfo *pi;
2022
2023 BT_DBG("sk %p", sk);
2024
2025 pi = l2cap_pi(sk);
2026
2027 if (pi->srej_list.tail == L2CAP_SEQ_LIST_CLEAR)
2028 return;
2029
2030 memset(&control, 0, sizeof(control));
2031 control.frame_type = 's';
2032 control.super = L2CAP_SFRAME_SREJ;
2033 control.reqseq = pi->srej_list.tail;
2034 l2cap_ertm_send_sframe(sk, &control);
2035}
2036
2037static void l2cap_ertm_send_srej_list(struct sock *sk, u16 txseq)
2038{
2039 struct bt_l2cap_control control;
2040 struct l2cap_pinfo *pi;
2041 u16 initial_head;
2042 u16 seq;
2043
2044 BT_DBG("sk %p, txseq %d", sk, (int) txseq);
2045
2046 pi = l2cap_pi(sk);
2047 memset(&control, 0, sizeof(control));
2048 control.frame_type = 's';
2049 control.super = L2CAP_SFRAME_SREJ;
2050
2051 /* Capture initial list head to allow only one pass through the list. */
2052 initial_head = pi->srej_list.head;
2053
2054 do {
2055 seq = l2cap_seq_list_pop(&pi->srej_list);
2056 if ((seq == txseq) || (seq == L2CAP_SEQ_LIST_CLEAR))
2057 break;
2058
2059 control.reqseq = seq;
2060 l2cap_ertm_send_sframe(sk, &control);
2061 l2cap_seq_list_append(&pi->srej_list, seq);
2062 } while (pi->srej_list.head != initial_head);
2063}
2064
2065static void l2cap_ertm_abort_rx_srej_sent(struct sock *sk)
2066{
2067 struct l2cap_pinfo *pi = l2cap_pi(sk);
2068 BT_DBG("sk %p", sk);
2069
2070 pi->expected_tx_seq = pi->buffer_seq;
2071 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
2072 skb_queue_purge(SREJ_QUEUE(sk));
2073 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2074}
2075
2076static int l2cap_ertm_tx_state_xmit(struct sock *sk,
2077 struct bt_l2cap_control *control,
2078 struct sk_buff_head *skbs, u8 event)
2079{
2080 struct l2cap_pinfo *pi;
2081 int err = 0;
2082
2083 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2084 (int)event);
2085 pi = l2cap_pi(sk);
2086
2087 switch (event) {
2088 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2089 if (sk->sk_send_head == NULL)
2090 sk->sk_send_head = skb_peek(skbs);
2091
2092 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2093 l2cap_ertm_send(sk);
2094 break;
2095 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2096 BT_DBG("Enter LOCAL_BUSY");
2097 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2098
2099 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2100 /* The SREJ_SENT state must be aborted if we are to
2101 * enter the LOCAL_BUSY state.
2102 */
2103 l2cap_ertm_abort_rx_srej_sent(sk);
2104 }
2105
2106 l2cap_ertm_send_ack(sk);
2107
2108 break;
2109 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2110 BT_DBG("Exit LOCAL_BUSY");
2111 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2112
2113 if (pi->amp_move_state == L2CAP_AMP_STATE_WAIT_LOCAL_BUSY) {
2114 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
2115 pi->amp_move_state =
2116 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
2117 l2cap_send_move_chan_cfm(pi->conn, pi,
2118 pi->scid,
2119 L2CAP_MOVE_CHAN_CONFIRMED);
2120 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2121 } else if (pi->amp_move_role ==
2122 L2CAP_AMP_MOVE_RESPONDER) {
2123 pi->amp_move_state =
2124 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
2125 l2cap_send_move_chan_rsp(pi->conn,
2126 pi->amp_move_cmd_ident,
2127 pi->dcid,
2128 L2CAP_MOVE_CHAN_SUCCESS);
2129 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002130 break;
2131 }
2132
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002133 if (pi->amp_move_role == L2CAP_AMP_MOVE_NONE &&
2134 (pi->conn_state & L2CAP_CONN_SENT_RNR)) {
2135 struct bt_l2cap_control local_control;
2136
2137 memset(&local_control, 0, sizeof(local_control));
2138 local_control.frame_type = 's';
2139 local_control.super = L2CAP_SFRAME_RR;
2140 local_control.poll = 1;
2141 local_control.reqseq = pi->buffer_seq;
2142 l2cap_ertm_send_sframe(sk, &local_control);
2143
2144 pi->retry_count = 1;
2145 l2cap_ertm_start_monitor_timer(pi);
2146 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002147 }
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002148 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002149 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2150 l2cap_ertm_process_reqseq(sk, control->reqseq);
2151 break;
2152 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2153 l2cap_ertm_send_rr_or_rnr(sk, 1);
2154 pi->retry_count = 1;
2155 l2cap_ertm_start_monitor_timer(pi);
2156 l2cap_ertm_stop_ack_timer(pi);
2157 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2158 break;
2159 case L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES:
2160 l2cap_ertm_send_rr_or_rnr(sk, 1);
2161 pi->retry_count = 1;
2162 l2cap_ertm_start_monitor_timer(pi);
2163 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2164 break;
2165 case L2CAP_ERTM_EVENT_RECV_FBIT:
2166 /* Nothing to process */
2167 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002168 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002169 break;
Gustavo F. Padovandcb1cc32011-04-28 18:50:17 -03002170 }
2171
2172 return err;
2173}
2174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002175static int l2cap_ertm_tx_state_wait_f(struct sock *sk,
2176 struct bt_l2cap_control *control,
2177 struct sk_buff_head *skbs, u8 event)
2178{
2179 struct l2cap_pinfo *pi;
2180 int err = 0;
2181
2182 BT_DBG("sk %p, control %p, skbs %p, event %d", sk, control, skbs,
2183 (int)event);
2184 pi = l2cap_pi(sk);
2185
2186 switch (event) {
2187 case L2CAP_ERTM_EVENT_DATA_REQUEST:
2188 if (sk->sk_send_head == NULL)
2189 sk->sk_send_head = skb_peek(skbs);
2190 /* Queue data, but don't send. */
2191 skb_queue_splice_tail_init(skbs, TX_QUEUE(sk));
2192 break;
2193 case L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED:
2194 BT_DBG("Enter LOCAL_BUSY");
2195 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
2196
2197 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
2198 /* The SREJ_SENT state must be aborted if we are to
2199 * enter the LOCAL_BUSY state.
2200 */
2201 l2cap_ertm_abort_rx_srej_sent(sk);
2202 }
2203
2204 l2cap_ertm_send_ack(sk);
2205
2206 break;
2207 case L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR:
2208 BT_DBG("Exit LOCAL_BUSY");
2209 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
2210
2211 if (pi->conn_state & L2CAP_CONN_SENT_RNR) {
2212 struct bt_l2cap_control local_control;
2213 memset(&local_control, 0, sizeof(local_control));
2214 local_control.frame_type = 's';
2215 local_control.super = L2CAP_SFRAME_RR;
2216 local_control.poll = 1;
2217 local_control.reqseq = pi->buffer_seq;
2218 l2cap_ertm_send_sframe(sk, &local_control);
2219
2220 pi->retry_count = 1;
2221 l2cap_ertm_start_monitor_timer(pi);
2222 pi->tx_state = L2CAP_ERTM_TX_STATE_WAIT_F;
2223 }
2224 break;
2225 case L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT:
2226 l2cap_ertm_process_reqseq(sk, control->reqseq);
2227
2228 /* Fall through */
2229
2230 case L2CAP_ERTM_EVENT_RECV_FBIT:
2231 if (control && control->final) {
2232 l2cap_ertm_stop_monitor_timer(pi);
2233 if (pi->unacked_frames > 0)
2234 l2cap_ertm_start_retrans_timer(pi);
2235 pi->retry_count = 0;
2236 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2237 BT_DBG("recv fbit tx_state 0x2.2%x", pi->tx_state);
2238 }
2239 break;
2240 case L2CAP_ERTM_EVENT_EXPLICIT_POLL:
2241 /* Ignore */
2242 break;
2243 case L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES:
2244 if ((pi->max_tx == 0) || (pi->retry_count < pi->max_tx)) {
2245 l2cap_ertm_send_rr_or_rnr(sk, 1);
2246 l2cap_ertm_start_monitor_timer(pi);
2247 pi->retry_count += 1;
2248 } else
2249 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
2250 break;
2251 default:
2252 break;
2253 }
2254
2255 return err;
2256}
2257
2258int l2cap_ertm_tx(struct sock *sk, struct bt_l2cap_control *control,
2259 struct sk_buff_head *skbs, u8 event)
2260{
2261 struct l2cap_pinfo *pi;
2262 int err = 0;
2263
2264 BT_DBG("sk %p, control %p, skbs %p, event %d, state %d",
2265 sk, control, skbs, (int)event, l2cap_pi(sk)->tx_state);
2266
2267 pi = l2cap_pi(sk);
2268
2269 switch (pi->tx_state) {
2270 case L2CAP_ERTM_TX_STATE_XMIT:
2271 err = l2cap_ertm_tx_state_xmit(sk, control, skbs, event);
2272 break;
2273 case L2CAP_ERTM_TX_STATE_WAIT_F:
2274 err = l2cap_ertm_tx_state_wait_f(sk, control, skbs, event);
2275 break;
2276 default:
2277 /* Ignore event */
2278 break;
2279 }
2280
2281 return err;
2282}
2283
2284int l2cap_segment_sdu(struct sock *sk, struct sk_buff_head* seg_queue,
2285 struct msghdr *msg, size_t len, int reseg)
2286{
2287 struct sk_buff *skb;
2288 u16 sdu_len;
2289 size_t pdu_len;
2290 int err = 0;
2291 u8 sar;
2292
2293 BT_DBG("sk %p, msg %p, len %d", sk, msg, (int)len);
2294
2295 /* It is critical that ERTM PDUs fit in a single HCI fragment,
2296 * so fragmented skbs are not used. The HCI layer's handling
2297 * of fragmented skbs is not compatible with ERTM's queueing.
2298 */
2299
2300 /* PDU size is derived from the HCI MTU */
2301 pdu_len = l2cap_pi(sk)->conn->mtu;
2302
2303 /* Constrain BR/EDR PDU size to fit within the largest radio packet */
2304 if (!l2cap_pi(sk)->ampcon)
2305 pdu_len = min_t(size_t, pdu_len, L2CAP_BREDR_MAX_PAYLOAD);
2306
2307 /* Adjust for largest possible L2CAP overhead. */
2308 pdu_len -= L2CAP_EXTENDED_HDR_SIZE + L2CAP_FCS_SIZE;
2309
2310 /* Remote device may have requested smaller PDUs */
2311 pdu_len = min_t(size_t, pdu_len, l2cap_pi(sk)->remote_mps);
2312
2313 if (len <= pdu_len) {
2314 sar = L2CAP_SAR_UNSEGMENTED;
2315 sdu_len = 0;
2316 pdu_len = len;
2317 } else {
2318 sar = L2CAP_SAR_START;
2319 sdu_len = len;
2320 pdu_len -= L2CAP_SDULEN_SIZE;
2321 }
2322
2323 while (len) {
2324 skb = l2cap_create_iframe_pdu(sk, msg, pdu_len, sdu_len, reseg);
2325
2326 BT_DBG("iframe skb %p", skb);
2327
2328 if (IS_ERR(skb)) {
2329 __skb_queue_purge(seg_queue);
2330 return PTR_ERR(skb);
2331 }
2332
2333 bt_cb(skb)->control.sar = sar;
2334 __skb_queue_tail(seg_queue, skb);
2335
2336 len -= pdu_len;
2337 if (sdu_len) {
2338 sdu_len = 0;
2339 pdu_len += L2CAP_SDULEN_SIZE;
2340 }
2341
2342 if (len <= pdu_len) {
2343 sar = L2CAP_SAR_END;
2344 pdu_len = len;
2345 } else {
2346 sar = L2CAP_SAR_CONTINUE;
2347 }
2348 }
2349
2350 return err;
2351}
2352
2353static inline int is_initial_frame(u8 sar)
2354{
2355 return (sar == L2CAP_SAR_UNSEGMENTED ||
2356 sar == L2CAP_SAR_START);
2357}
2358
2359static inline int l2cap_skbuff_to_kvec(struct sk_buff *skb, struct kvec *iv,
2360 size_t veclen)
2361{
2362 struct sk_buff *frag_iter;
2363
2364 BT_DBG("skb %p (len %d), iv %p", skb, (int)skb->len, iv);
2365
2366 if (iv->iov_len + skb->len > veclen)
2367 return -ENOMEM;
2368
2369 memcpy(iv->iov_base + iv->iov_len, skb->data, skb->len);
2370 iv->iov_len += skb->len;
2371
2372 skb_walk_frags(skb, frag_iter) {
2373 if (iv->iov_len + skb->len > veclen)
2374 return -ENOMEM;
2375
2376 BT_DBG("Copying %d bytes", (int)frag_iter->len);
2377 memcpy(iv->iov_base + iv->iov_len, frag_iter->data,
2378 frag_iter->len);
2379 iv->iov_len += frag_iter->len;
2380 }
2381
2382 return 0;
2383}
2384
2385int l2cap_resegment_queue(struct sock *sk, struct sk_buff_head *queue)
2386{
2387 void *buf;
2388 int buflen;
2389 int err = 0;
2390 struct sk_buff *skb;
2391 struct msghdr msg;
2392 struct kvec iv;
2393 struct sk_buff_head old_frames;
2394 struct l2cap_pinfo *pi = l2cap_pi(sk);
2395
2396 BT_DBG("sk %p", sk);
2397
2398 if (skb_queue_empty(queue))
2399 return 0;
2400
2401 memset(&msg, 0, sizeof(msg));
2402 msg.msg_iov = (struct iovec *) &iv;
2403
2404 buflen = pi->omtu + L2CAP_FCS_SIZE;
2405 buf = kzalloc(buflen, GFP_TEMPORARY);
2406
2407 if (!buf) {
2408 BT_DBG("Could not allocate resegmentation buffer");
2409 return -ENOMEM;
2410 }
2411
2412 /* Move current frames off the original queue */
2413 __skb_queue_head_init(&old_frames);
2414 skb_queue_splice_tail_init(queue, &old_frames);
2415
2416 while (!skb_queue_empty(&old_frames)) {
2417 struct sk_buff_head current_sdu;
2418 u8 original_sar;
2419
2420 /* Reassemble each SDU from one or more PDUs */
2421
2422 iv.iov_base = buf;
2423 iv.iov_len = 0;
2424
2425 skb = skb_peek(&old_frames);
2426 original_sar = bt_cb(skb)->control.sar;
2427
2428 __skb_unlink(skb, &old_frames);
2429
2430 /* Append data to SDU */
2431 if (pi->extended_control)
2432 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2433 else
2434 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2435
2436 if (original_sar == L2CAP_SAR_START)
2437 skb_pull(skb, L2CAP_SDULEN_SIZE);
2438
2439 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2440
2441 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2442 iv.iov_len -= L2CAP_FCS_SIZE;
2443
2444 /* Free skb */
2445 kfree_skb(skb);
2446
2447 if (err)
2448 break;
2449
2450 while (!skb_queue_empty(&old_frames) && !err) {
2451 /* Check next frame */
2452 skb = skb_peek(&old_frames);
2453
2454 if (is_initial_frame(bt_cb(skb)->control.sar))
2455 break;
2456
2457 __skb_unlink(skb, &old_frames);
2458
2459 /* Append data to SDU */
2460 if (pi->extended_control)
2461 skb_pull(skb, L2CAP_EXTENDED_HDR_SIZE);
2462 else
2463 skb_pull(skb, L2CAP_ENHANCED_HDR_SIZE);
2464
2465 if (bt_cb(skb)->control.sar == L2CAP_SAR_START)
2466 skb_pull(skb, L2CAP_SDULEN_SIZE);
2467
2468 err = l2cap_skbuff_to_kvec(skb, &iv, buflen);
2469
2470 if (bt_cb(skb)->control.fcs == L2CAP_FCS_CRC16)
2471 iv.iov_len -= L2CAP_FCS_SIZE;
2472
2473 /* Free skb */
2474 kfree_skb(skb);
2475 }
2476
2477 if (err)
2478 break;
2479
2480 /* Segment data */
2481
2482 __skb_queue_head_init(&current_sdu);
2483
2484 /* skbs for the SDU were just freed, but the
2485 * resegmenting process could produce more, smaller
2486 * skbs due to smaller PDUs and reduced HCI MTU. The
2487 * overhead from the sk_buff structs could put us over
2488 * the sk_sndbuf limit.
2489 *
2490 * Since this code is running in response to a
2491 * received poll/final packet, it cannot block.
2492 * Therefore, memory allocation needs to be allowed by
2493 * falling back to bt_skb_alloc() (with
2494 * skb_set_owner_w() to maintain sk_wmem_alloc
2495 * correctly).
2496 */
2497 msg.msg_iovlen = iv.iov_len;
2498 err = l2cap_segment_sdu(sk, &current_sdu, &msg,
2499 msg.msg_iovlen, 1);
2500
2501 if (err || skb_queue_empty(&current_sdu)) {
2502 BT_DBG("Error %d resegmenting data for socket %p",
2503 err, sk);
2504 __skb_queue_purge(&current_sdu);
2505 break;
2506 }
2507
2508 /* Fix up first PDU SAR bits */
2509 if (!is_initial_frame(original_sar)) {
2510 BT_DBG("Changing SAR bits, %d PDUs",
2511 skb_queue_len(&current_sdu));
2512 skb = skb_peek(&current_sdu);
2513
2514 if (skb_queue_len(&current_sdu) == 1) {
2515 /* Change SAR from 'unsegmented' to 'end' */
2516 bt_cb(skb)->control.sar = L2CAP_SAR_END;
2517 } else {
2518 struct l2cap_hdr *lh;
2519 size_t hdrlen;
2520
2521 /* Change SAR from 'start' to 'continue' */
2522 bt_cb(skb)->control.sar = L2CAP_SAR_CONTINUE;
2523
2524 /* Start frames contain 2 bytes for
2525 * sdulen and continue frames don't.
2526 * Must rewrite header to eliminate
2527 * sdulen and then adjust l2cap frame
2528 * length.
2529 */
2530 if (pi->extended_control)
2531 hdrlen = L2CAP_EXTENDED_HDR_SIZE;
2532 else
2533 hdrlen = L2CAP_ENHANCED_HDR_SIZE;
2534
2535 memmove(skb->data + L2CAP_SDULEN_SIZE,
2536 skb->data, hdrlen);
2537 skb_pull(skb, L2CAP_SDULEN_SIZE);
2538 lh = (struct l2cap_hdr *)skb->data;
2539 lh->len = cpu_to_le16(le16_to_cpu(lh->len) -
2540 L2CAP_SDULEN_SIZE);
2541 }
2542 }
2543
2544 /* Add to queue */
2545 skb_queue_splice_tail(&current_sdu, queue);
2546 }
2547
2548 __skb_queue_purge(&old_frames);
2549 if (err)
2550 __skb_queue_purge(queue);
2551
2552 kfree(buf);
2553
2554 BT_DBG("Queue resegmented, err=%d", err);
2555 return err;
2556}
2557
2558static void l2cap_resegment_worker(struct work_struct *work)
2559{
2560 int err = 0;
2561 struct l2cap_resegment_work *seg_work =
2562 container_of(work, struct l2cap_resegment_work, work);
2563 struct sock *sk = seg_work->sk;
2564
2565 kfree(seg_work);
2566
2567 BT_DBG("sk %p", sk);
2568 lock_sock(sk);
2569
2570 if (l2cap_pi(sk)->amp_move_state != L2CAP_AMP_STATE_RESEGMENT) {
2571 release_sock(sk);
2572 return;
2573 }
2574
2575 err = l2cap_resegment_queue(sk, TX_QUEUE(sk));
2576
2577 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2578
2579 if (skb_queue_empty(TX_QUEUE(sk)))
2580 sk->sk_send_head = NULL;
2581 else
2582 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
2583
2584 if (err)
2585 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2586 else
2587 l2cap_ertm_send(sk);
2588
2589 release_sock(sk);
2590}
2591
2592static int l2cap_setup_resegment(struct sock *sk)
2593{
2594 struct l2cap_resegment_work *seg_work;
2595
2596 BT_DBG("sk %p", sk);
2597
2598 if (skb_queue_empty(TX_QUEUE(sk)))
2599 return 0;
2600
2601 seg_work = kzalloc(sizeof(*seg_work), GFP_ATOMIC);
2602 if (!seg_work)
2603 return -ENOMEM;
2604
2605 INIT_WORK(&seg_work->work, l2cap_resegment_worker);
2606 seg_work->sk = sk;
2607
2608 if (!queue_work(_l2cap_wq, &seg_work->work)) {
2609 kfree(seg_work);
2610 return -ENOMEM;
2611 }
2612
2613 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_RESEGMENT;
2614
2615 return 0;
2616}
2617
2618static inline int l2cap_rmem_available(struct sock *sk)
2619{
2620 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2621 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2622 return atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf / 3;
2623}
2624
2625static inline int l2cap_rmem_full(struct sock *sk)
2626{
2627 BT_DBG("sk_rmem_alloc %d, sk_rcvbuf %d",
2628 atomic_read(&sk->sk_rmem_alloc), sk->sk_rcvbuf);
2629 return atomic_read(&sk->sk_rmem_alloc) > (2 * sk->sk_rcvbuf) / 3;
2630}
2631
2632void l2cap_amp_move_init(struct sock *sk)
2633{
2634 BT_DBG("sk %p", sk);
2635
2636 if (!l2cap_pi(sk)->conn)
2637 return;
2638
2639 if (!(l2cap_pi(sk)->conn->fc_mask & L2CAP_FC_A2MP))
2640 return;
2641
2642 if (l2cap_pi(sk)->amp_id == 0) {
2643 if (l2cap_pi(sk)->amp_pref != BT_AMP_POLICY_PREFER_AMP)
2644 return;
2645 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2646 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
2647 amp_create_physical(l2cap_pi(sk)->conn, sk);
2648 } else {
2649 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_INITIATOR;
2650 l2cap_pi(sk)->amp_move_state =
2651 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
2652 l2cap_pi(sk)->amp_move_id = 0;
2653 l2cap_amp_move_setup(sk);
2654 l2cap_send_move_chan_req(l2cap_pi(sk)->conn,
2655 l2cap_pi(sk), l2cap_pi(sk)->scid, 0);
2656 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
2657 }
2658}
2659
2660static void l2cap_chan_ready(struct sock *sk)
2661{
2662 struct sock *parent = bt_sk(sk)->parent;
2663
2664 BT_DBG("sk %p, parent %p", sk, parent);
2665
2666 l2cap_pi(sk)->conf_state = 0;
2667 l2cap_sock_clear_timer(sk);
2668
2669 if (!parent) {
2670 /* Outgoing channel.
2671 * Wake up socket sleeping on connect.
2672 */
2673 sk->sk_state = BT_CONNECTED;
2674 sk->sk_state_change(sk);
2675 } else {
2676 /* Incoming channel.
2677 * Wake up socket sleeping on accept.
2678 */
2679 parent->sk_data_ready(parent, 0);
2680 }
2681}
2682
Linus Torvalds1da177e2005-04-16 15:20:36 -07002683/* Copy frame to all raw sockets on that connection */
2684static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2685{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002686 struct l2cap_chan_list *l = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002687 struct sk_buff *nskb;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002688 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002689
2690 BT_DBG("conn %p", conn);
2691
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002692 read_lock(&l->lock);
2693 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2694 if (sk->sk_type != SOCK_RAW)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002695 continue;
2696
2697 /* Don't send frame to the socket it came from */
2698 if (skb->sk == sk)
2699 continue;
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002700 nskb = skb_clone(skb, GFP_ATOMIC);
2701 if (!nskb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002702 continue;
2703
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002704 if (sock_queue_rcv_skb(sk, nskb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07002705 kfree_skb(nskb);
2706 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002707 read_unlock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002708}
2709
2710/* ---- L2CAP signalling commands ---- */
2711static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2712 u8 code, u8 ident, u16 dlen, void *data)
2713{
2714 struct sk_buff *skb, **frag;
2715 struct l2cap_cmd_hdr *cmd;
2716 struct l2cap_hdr *lh;
2717 int len, count;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002718 unsigned int mtu = conn->hcon->hdev->acl_mtu;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002719
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03002720 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2721 conn, code, ident, dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002722
2723 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002724 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002725
2726 skb = bt_skb_alloc(count, GFP_ATOMIC);
2727 if (!skb)
2728 return NULL;
2729
2730 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002731 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02002732
2733 if (conn->hcon->type == LE_LINK)
2734 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
2735 else
2736 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002737
2738 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2739 cmd->code = code;
2740 cmd->ident = ident;
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07002741 cmd->len = cpu_to_le16(dlen);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002742
2743 if (dlen) {
2744 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2745 memcpy(skb_put(skb, count), data, count);
2746 data += count;
2747 }
2748
2749 len -= skb->len;
2750
2751 /* Continuation fragments (no L2CAP header) */
2752 frag = &skb_shinfo(skb)->frag_list;
2753 while (len) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002754 count = min_t(unsigned int, mtu, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002755
2756 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2757 if (!*frag)
2758 goto fail;
2759
2760 memcpy(skb_put(*frag, count), data, count);
2761
2762 len -= count;
2763 data += count;
2764
2765 frag = &(*frag)->next;
2766 }
2767
2768 return skb;
2769
2770fail:
2771 kfree_skb(skb);
2772 return NULL;
2773}
2774
2775static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2776{
2777 struct l2cap_conf_opt *opt = *ptr;
2778 int len;
2779
2780 len = L2CAP_CONF_OPT_SIZE + opt->len;
2781 *ptr += len;
2782
2783 *type = opt->type;
2784 *olen = opt->len;
2785
2786 switch (opt->len) {
2787 case 1:
2788 *val = *((u8 *) opt->val);
2789 break;
2790
2791 case 2:
steven miaobfaaeb32010-10-16 18:29:47 -04002792 *val = get_unaligned_le16(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002793 break;
2794
2795 case 4:
steven miaobfaaeb32010-10-16 18:29:47 -04002796 *val = get_unaligned_le32(opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002797 break;
2798
2799 default:
2800 *val = (unsigned long) opt->val;
2801 break;
2802 }
2803
2804 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2805 return len;
2806}
2807
Linus Torvalds1da177e2005-04-16 15:20:36 -07002808static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2809{
2810 struct l2cap_conf_opt *opt = *ptr;
2811
2812 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2813
2814 opt->type = type;
2815 opt->len = len;
2816
2817 switch (len) {
2818 case 1:
2819 *((u8 *) opt->val) = val;
2820 break;
2821
2822 case 2:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002823 put_unaligned_le16(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002824 break;
2825
2826 case 4:
Gustavo F. Padovan4f8b6912010-10-18 14:25:53 -02002827 put_unaligned_le32(val, opt->val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002828 break;
2829
2830 default:
2831 memcpy(opt->val, (void *) val, len);
2832 break;
2833 }
2834
2835 *ptr += L2CAP_CONF_OPT_SIZE + len;
2836}
2837
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002838static void l2cap_ertm_ack_timeout(struct work_struct *work)
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002839{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002840 struct delayed_work *delayed =
2841 container_of(work, struct delayed_work, work);
2842 struct l2cap_pinfo *pi =
2843 container_of(delayed, struct l2cap_pinfo, ack_work);
2844 struct sock *sk = (struct sock *)pi;
2845 u16 frames_to_ack;
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002846
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002847 BT_DBG("sk %p", sk);
2848
2849 if (!sk)
2850 return;
2851
2852 lock_sock(sk);
2853
2854 if (!l2cap_pi(sk)->conn) {
2855 release_sock(sk);
2856 return;
2857 }
2858
2859 frames_to_ack = __delta_seq(l2cap_pi(sk)->buffer_seq,
2860 l2cap_pi(sk)->last_acked_seq,
2861 l2cap_pi(sk));
2862
2863 if (frames_to_ack)
2864 l2cap_ertm_send_rr_or_rnr(sk, 0);
2865
2866 release_sock(sk);
Gustavo F. Padovanc1b4f432010-05-01 16:15:39 -03002867}
2868
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002869static void l2cap_ertm_retrans_timeout(struct work_struct *work)
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002870{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002871 struct delayed_work *delayed =
2872 container_of(work, struct delayed_work, work);
2873 struct l2cap_pinfo *pi =
2874 container_of(delayed, struct l2cap_pinfo, retrans_work);
2875 struct sock *sk = (struct sock *)pi;
Gustavo F. Padovan525cd182011-03-25 19:43:39 -03002876
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002877 BT_DBG("sk %p", sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002878
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002879 if (!sk)
2880 return;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002881
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002882 lock_sock(sk);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03002883
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002884 if (!l2cap_pi(sk)->conn) {
2885 release_sock(sk);
2886 return;
2887 }
Gustavo F. Padovan39d5a3e2011-04-04 15:40:12 -03002888
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002889 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_RETRANS_TIMER_EXPIRES);
2890 release_sock(sk);
2891}
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03002892
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07002893static void l2cap_ertm_monitor_timeout(struct work_struct *work)
2894{
2895 struct delayed_work *delayed =
2896 container_of(work, struct delayed_work, work);
2897 struct l2cap_pinfo *pi =
2898 container_of(delayed, struct l2cap_pinfo, monitor_work);
2899 struct sock *sk = (struct sock *)pi;
2900
2901 BT_DBG("sk %p", sk);
2902
2903 if (!sk)
2904 return;
2905
2906 lock_sock(sk);
2907
2908 if (!l2cap_pi(sk)->conn) {
2909 release_sock(sk);
2910 return;
2911 }
2912
2913 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_MONITOR_TIMER_EXPIRES);
2914
2915 release_sock(sk);
2916}
2917
2918static inline void l2cap_ertm_init(struct sock *sk)
2919{
2920 l2cap_pi(sk)->next_tx_seq = 0;
2921 l2cap_pi(sk)->expected_tx_seq = 0;
2922 l2cap_pi(sk)->expected_ack_seq = 0;
2923 l2cap_pi(sk)->unacked_frames = 0;
2924 l2cap_pi(sk)->buffer_seq = 0;
2925 l2cap_pi(sk)->frames_sent = 0;
2926 l2cap_pi(sk)->last_acked_seq = 0;
2927 l2cap_pi(sk)->sdu = NULL;
2928 l2cap_pi(sk)->sdu_last_frag = NULL;
2929 l2cap_pi(sk)->sdu_len = 0;
2930 atomic_set(&l2cap_pi(sk)->ertm_queued, 0);
2931
2932 l2cap_pi(sk)->rx_state = L2CAP_ERTM_RX_STATE_RECV;
2933 l2cap_pi(sk)->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
2934
2935 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", l2cap_pi(sk)->tx_state,
2936 l2cap_pi(sk)->rx_state);
2937
2938 l2cap_pi(sk)->amp_id = 0;
2939 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
2940 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
2941 l2cap_pi(sk)->amp_move_reqseq = 0;
2942 l2cap_pi(sk)->amp_move_event = 0;
2943
2944 INIT_DELAYED_WORK(&l2cap_pi(sk)->ack_work, l2cap_ertm_ack_timeout);
2945 INIT_DELAYED_WORK(&l2cap_pi(sk)->retrans_work,
2946 l2cap_ertm_retrans_timeout);
2947 INIT_DELAYED_WORK(&l2cap_pi(sk)->monitor_work,
2948 l2cap_ertm_monitor_timeout);
2949 INIT_WORK(&l2cap_pi(sk)->tx_work, l2cap_ertm_tx_worker);
2950 skb_queue_head_init(SREJ_QUEUE(sk));
2951 skb_queue_head_init(TX_QUEUE(sk));
2952
2953 l2cap_seq_list_init(&l2cap_pi(sk)->srej_list, l2cap_pi(sk)->tx_win);
2954 l2cap_seq_list_init(&l2cap_pi(sk)->retrans_list,
2955 l2cap_pi(sk)->remote_tx_win);
2956}
2957
2958void l2cap_ertm_destruct(struct sock *sk)
2959{
2960 l2cap_seq_list_free(&l2cap_pi(sk)->srej_list);
2961 l2cap_seq_list_free(&l2cap_pi(sk)->retrans_list);
2962}
2963
2964void l2cap_ertm_shutdown(struct sock *sk)
2965{
2966 l2cap_ertm_stop_ack_timer(l2cap_pi(sk));
2967 l2cap_ertm_stop_retrans_timer(l2cap_pi(sk));
2968 l2cap_ertm_stop_monitor_timer(l2cap_pi(sk));
2969}
2970
2971void l2cap_ertm_recv_done(struct sock *sk)
2972{
2973 lock_sock(sk);
2974
2975 if (l2cap_pi(sk)->mode != L2CAP_MODE_ERTM) {
2976 release_sock(sk);
2977 return;
2978 }
2979
2980 /* Consume any queued incoming frames and update local busy status */
2981 if (l2cap_pi(sk)->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT &&
2982 l2cap_ertm_rx_queued_iframes(sk))
2983 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNRESET);
2984 else if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
2985 l2cap_rmem_available(sk))
2986 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
2987
2988 release_sock(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03002989}
2990
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03002991static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2992{
2993 switch (mode) {
2994 case L2CAP_MODE_STREAMING:
2995 case L2CAP_MODE_ERTM:
2996 if (l2cap_mode_supported(mode, remote_feat_mask))
2997 return mode;
2998 /* fall through */
2999 default:
3000 return L2CAP_MODE_BASIC;
3001 }
3002}
3003
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003004static void l2cap_setup_txwin(struct l2cap_pinfo *pi)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003005{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003006 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED &&
3007 (pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW)) {
3008 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3009 pi->extended_control = 1;
3010 } else {
3011 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3012 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3013
3014 pi->tx_win_max = L2CAP_TX_WIN_MAX_ENHANCED;
3015 pi->extended_control = 0;
3016 }
3017}
3018
3019static void l2cap_aggregate_fs(struct hci_ext_fs *cur,
3020 struct hci_ext_fs *new,
3021 struct hci_ext_fs *agg)
3022{
3023 *agg = *cur;
3024 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3025 /* current flow spec has known rate */
3026 if ((new->max_sdu == 0xFFFF) ||
3027 (new->sdu_arr_time == 0xFFFFFFFF)) {
3028 /* new fs has unknown rate, so aggregate is unknown */
3029 agg->max_sdu = 0xFFFF;
3030 agg->sdu_arr_time = 0xFFFFFFFF;
3031 } else {
3032 /* new fs has known rate, so aggregate is known */
3033 u64 cur_rate;
3034 u64 new_rate;
3035 cur_rate = cur->max_sdu * 1000000ULL;
3036 if (cur->sdu_arr_time)
3037 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3038 new_rate = new->max_sdu * 1000000ULL;
3039 if (new->sdu_arr_time)
3040 new_rate = div_u64(new_rate, new->sdu_arr_time);
3041 cur_rate = cur_rate + new_rate;
3042 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3043 cur_rate);
3044 }
3045 }
3046}
3047
3048static int l2cap_aggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3049{
3050 struct hci_ext_fs tx_fs;
3051 struct hci_ext_fs rx_fs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003052
Gustavo F. Padovan49208c92011-04-04 15:59:54 -03003053 BT_DBG("chan %p", chan);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003054
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003055 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3056 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3057 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3058 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3059 return 0;
3060
3061 l2cap_aggregate_fs(&chan->tx_fs,
3062 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3063 l2cap_aggregate_fs(&chan->rx_fs,
3064 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3065 hci_chan_modify(chan, &tx_fs, &rx_fs);
3066 return 1;
3067}
3068
3069static void l2cap_deaggregate_fs(struct hci_ext_fs *cur,
3070 struct hci_ext_fs *old,
3071 struct hci_ext_fs *agg)
3072{
3073 *agg = *cur;
3074 if ((cur->max_sdu != 0xFFFF) && (cur->sdu_arr_time != 0xFFFFFFFF)) {
3075 u64 cur_rate;
3076 u64 old_rate;
3077 cur_rate = cur->max_sdu * 1000000ULL;
3078 if (cur->sdu_arr_time)
3079 cur_rate = div_u64(cur_rate, cur->sdu_arr_time);
3080 old_rate = old->max_sdu * 1000000ULL;
3081 if (old->sdu_arr_time)
3082 old_rate = div_u64(old_rate, old->sdu_arr_time);
3083 cur_rate = cur_rate - old_rate;
3084 agg->sdu_arr_time = div64_u64(agg->max_sdu * 1000000ULL,
3085 cur_rate);
3086 }
3087}
3088
3089static int l2cap_deaggregate(struct hci_chan *chan, struct l2cap_pinfo *pi)
3090{
3091 struct hci_ext_fs tx_fs;
3092 struct hci_ext_fs rx_fs;
3093
3094 BT_DBG("chan %p", chan);
3095
3096 if (((chan->tx_fs.max_sdu == 0xFFFF) ||
3097 (chan->tx_fs.sdu_arr_time == 0xFFFFFFFF)) &&
3098 ((chan->rx_fs.max_sdu == 0xFFFF) ||
3099 (chan->rx_fs.sdu_arr_time == 0xFFFFFFFF)))
3100 return 0;
3101
3102 l2cap_deaggregate_fs(&chan->tx_fs,
3103 (struct hci_ext_fs *) &pi->local_fs, &tx_fs);
3104 l2cap_deaggregate_fs(&chan->rx_fs,
3105 (struct hci_ext_fs *) &pi->remote_fs, &rx_fs);
3106 hci_chan_modify(chan, &tx_fs, &rx_fs);
3107 return 1;
3108}
3109
3110static struct hci_chan *l2cap_chan_admit(u8 amp_id, struct l2cap_pinfo *pi)
3111{
3112 struct hci_dev *hdev;
3113 struct hci_conn *hcon;
3114 struct hci_chan *chan;
3115
3116 hdev = hci_dev_get(A2MP_HCI_ID(amp_id));
3117 if (!hdev)
3118 return NULL;
3119
3120 BT_DBG("hdev %s", hdev->name);
3121
3122 hcon = hci_conn_hash_lookup_ba(hdev, ACL_LINK, pi->conn->dst);
3123 if (!hcon)
3124 return NULL;
3125
3126 chan = hci_chan_list_lookup_id(hdev, hcon->handle);
3127 if (chan) {
3128 l2cap_aggregate(chan, pi);
3129 hci_chan_hold(chan);
3130 return chan;
3131 }
3132
3133 if (bt_sk(pi)->parent) {
3134 /* Incoming connection */
3135 chan = hci_chan_accept(hcon,
3136 (struct hci_ext_fs *) &pi->local_fs,
3137 (struct hci_ext_fs *) &pi->remote_fs);
3138 } else {
3139 /* Outgoing connection */
3140 chan = hci_chan_create(hcon,
3141 (struct hci_ext_fs *) &pi->local_fs,
3142 (struct hci_ext_fs *) &pi->remote_fs);
3143 }
3144 return chan;
3145}
3146
3147int l2cap_build_conf_req(struct sock *sk, void *data)
3148{
3149 struct l2cap_pinfo *pi = l2cap_pi(sk);
3150 struct l2cap_conf_req *req = data;
3151 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3152 void *ptr = req->data;
3153
3154 BT_DBG("sk %p", sk);
3155
3156 if (pi->num_conf_req || pi->num_conf_rsp)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003157 goto done;
3158
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003159 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003160 case L2CAP_MODE_STREAMING:
3161 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003162 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003163 break;
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003164
Gustavo F. Padovan2ba13ed2010-06-09 16:39:05 -03003165 /* fall through */
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003166 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003167 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003168 break;
3169 }
3170
3171done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003172 if (pi->imtu != L2CAP_DEFAULT_MTU)
3173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovan79906812011-01-24 16:01:43 -02003174
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003175 switch (pi->mode) {
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003176 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003177 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
3178 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003179 break;
3180
Gustavo F. Padovan62547752010-06-08 20:05:31 -03003181 rfc.txwin_size = 0;
3182 rfc.max_transmit = 0;
3183 rfc.retrans_timeout = 0;
3184 rfc.monitor_timeout = 0;
3185 rfc.max_pdu_size = 0;
3186
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003187 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3188 (unsigned long) &rfc);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003189 break;
3190
3191 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003192 l2cap_setup_txwin(pi);
3193 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3194 rfc.txwin_size = L2CAP_TX_WIN_MAX_ENHANCED;
3195 else
3196 rfc.txwin_size = pi->tx_win;
3197 rfc.max_transmit = pi->max_tx;
3198 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3199 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003200 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003201 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3202 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003203
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003204 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3205 (unsigned long) &rfc);
3206
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003207 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3208 pi->extended_control) {
3209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3210 pi->tx_win);
3211 }
3212
3213 if (pi->amp_id) {
3214 /* default best effort extended flow spec */
3215 struct l2cap_conf_ext_fs fs = {1, 1, 0xFFFF,
3216 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
3217 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3218 sizeof(fs), (unsigned long) &fs);
3219 }
3220
3221 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003222 break;
3223
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003224 if (pi->fcs == L2CAP_FCS_NONE ||
3225 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3226 pi->fcs = L2CAP_FCS_NONE;
3227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003228 }
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003229 break;
3230
3231 case L2CAP_MODE_STREAMING:
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003232 rfc.txwin_size = 0;
3233 rfc.max_transmit = 0;
3234 rfc.retrans_timeout = 0;
3235 rfc.monitor_timeout = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03003236 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003237 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3238 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003239
Gustavo F. Padovan63406502010-08-03 23:49:29 -03003240 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3241 (unsigned long) &rfc);
3242
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003243 if ((pi->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW) &&
3244 pi->extended_control) {
3245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2, 0);
3246 }
3247
3248 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003249 break;
3250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003251 if (pi->fcs == L2CAP_FCS_NONE ||
3252 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
3253 pi->fcs = L2CAP_FCS_NONE;
3254 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003255 }
Marcel Holtmann65c7c492009-05-02 23:07:53 -07003256 break;
3257 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07003258
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003259 req->dcid = cpu_to_le16(pi->dcid);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003260 req->flags = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003261
3262 return ptr - data;
3263}
3264
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003265
3266static int l2cap_build_amp_reconf_req(struct sock *sk, void *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003267{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003268 struct l2cap_pinfo *pi = l2cap_pi(sk);
3269 struct l2cap_conf_req *req = data;
3270 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
3271 void *ptr = req->data;
3272 u32 be_flush_to;
3273
3274 BT_DBG("sk %p", sk);
3275
3276 /* convert to milliseconds, round up */
3277 be_flush_to = (pi->conn->hcon->hdev->amp_be_flush_to + 999) / 1000;
3278
3279 switch (pi->mode) {
3280 case L2CAP_MODE_ERTM:
3281 rfc.mode = L2CAP_MODE_ERTM;
3282 rfc.txwin_size = pi->tx_win;
3283 rfc.max_transmit = pi->max_tx;
3284 if (pi->amp_move_id) {
3285 rfc.retrans_timeout =
3286 cpu_to_le16((3 * be_flush_to) + 500);
3287 rfc.monitor_timeout =
3288 cpu_to_le16((3 * be_flush_to) + 500);
3289 } else {
3290 rfc.retrans_timeout =
3291 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
3292 rfc.monitor_timeout =
3293 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
3294 }
3295 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
3296 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->imtu)
3297 rfc.max_pdu_size = cpu_to_le16(pi->imtu);
3298
3299 break;
3300
3301 default:
3302 return -ECONNREFUSED;
3303 }
3304
3305 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
3306 (unsigned long) &rfc);
3307
3308 if (pi->conn->feat_mask & L2CAP_FEAT_FCS) {
3309
3310 /* TODO assign fcs for br/edr based on socket config option */
3311 if (pi->amp_move_id)
3312 pi->local_conf.fcs = L2CAP_FCS_NONE;
3313 else
3314 pi->local_conf.fcs = L2CAP_FCS_CRC16;
3315
3316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1,
3317 pi->local_conf.fcs);
3318
3319 pi->fcs = pi->local_conf.fcs | pi->remote_conf.fcs;
3320 }
3321
3322 req->dcid = cpu_to_le16(pi->dcid);
3323 req->flags = cpu_to_le16(0);
3324
3325 return ptr - data;
3326}
3327
3328static int l2cap_parse_conf_req(struct sock *sk, void *data)
3329{
3330 struct l2cap_pinfo *pi = l2cap_pi(sk);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003331 struct l2cap_conf_rsp *rsp = data;
3332 void *ptr = rsp->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003333 void *req = pi->conf_req;
3334 int len = pi->conf_len;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003335 int type, hint, olen;
3336 unsigned long val;
Marcel Holtmann6464f352007-10-20 13:39:51 +02003337 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003338 struct l2cap_conf_ext_fs fs;
Marcel Holtmann861d6882007-10-20 13:37:06 +02003339 u16 mtu = L2CAP_DEFAULT_MTU;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003340 u16 result = L2CAP_CONF_SUCCESS;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003342 BT_DBG("sk %p", sk);
Marcel Holtmann820ae1b2006-11-18 22:15:00 +01003343
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003344 while (len >= L2CAP_CONF_OPT_SIZE) {
3345 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003346
Gustavo F. Padovan589d2742009-04-20 01:31:07 -03003347 hint = type & L2CAP_CONF_HINT;
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07003348 type &= L2CAP_CONF_MASK;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003349
3350 switch (type) {
3351 case L2CAP_CONF_MTU:
Marcel Holtmann861d6882007-10-20 13:37:06 +02003352 mtu = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003353 break;
3354
3355 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003356 pi->flush_to = val;
3357 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3358 result = L2CAP_CONF_UNACCEPT;
3359 else
3360 pi->remote_conf.flush_to = val;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003361 break;
3362
3363 case L2CAP_CONF_QOS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003364 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3365 result = L2CAP_CONF_UNACCEPT;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003366 break;
3367
Marcel Holtmann6464f352007-10-20 13:39:51 +02003368 case L2CAP_CONF_RFC:
3369 if (olen == sizeof(rfc))
3370 memcpy(&rfc, (void *) val, olen);
3371 break;
3372
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003373 case L2CAP_CONF_FCS:
3374 if (val == L2CAP_FCS_NONE)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003375 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
3376 pi->remote_conf.fcs = val;
3377 break;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003378
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003379 case L2CAP_CONF_EXT_FS:
3380 if (olen == sizeof(fs)) {
3381 pi->conf_state |= L2CAP_CONF_EFS_RECV;
3382 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
3383 result = L2CAP_CONF_UNACCEPT;
3384 break;
3385 }
3386 memcpy(&fs, (void *) val, olen);
3387 if (fs.type != L2CAP_SERVICE_BEST_EFFORT) {
3388 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3389 break;
3390 }
3391 pi->remote_conf.flush_to =
3392 le32_to_cpu(fs.flush_to);
3393 pi->remote_fs.id = fs.id;
3394 pi->remote_fs.type = fs.type;
3395 pi->remote_fs.max_sdu =
3396 le16_to_cpu(fs.max_sdu);
3397 pi->remote_fs.sdu_arr_time =
3398 le32_to_cpu(fs.sdu_arr_time);
3399 pi->remote_fs.acc_latency =
3400 le32_to_cpu(fs.acc_latency);
3401 pi->remote_fs.flush_to =
3402 le32_to_cpu(fs.flush_to);
3403 }
3404 break;
3405
3406 case L2CAP_CONF_EXT_WINDOW:
3407 pi->extended_control = 1;
3408 pi->remote_tx_win = val;
3409 pi->tx_win_max = L2CAP_TX_WIN_MAX_EXTENDED;
3410 pi->conf_state |= L2CAP_CONF_EXT_WIN_RECV;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03003411 break;
3412
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003413 default:
3414 if (hint)
3415 break;
3416
3417 result = L2CAP_CONF_UNKNOWN;
3418 *((u8 *) ptr++) = type;
3419 break;
3420 }
3421 }
3422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003423 if (pi->num_conf_rsp || pi->num_conf_req > 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003424 goto done;
3425
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003426 switch (pi->mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003427 case L2CAP_MODE_STREAMING:
3428 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003429 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
3430 pi->mode = l2cap_select_mode(rfc.mode,
3431 pi->conn->feat_mask);
Gustavo F. Padovan85eb53c2010-06-03 18:43:28 -03003432 break;
3433 }
3434
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003435 if (pi->mode != rfc.mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003436 return -ECONNREFUSED;
Gustavo F. Padovan742e5192010-06-08 19:09:48 -03003437
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003438 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003439 }
3440
3441done:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003442 if (pi->mode != rfc.mode) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003443 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003444 rfc.mode = pi->mode;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003445
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003446 if (pi->num_conf_rsp == 1)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003447 return -ECONNREFUSED;
3448
3449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3450 sizeof(rfc), (unsigned long) &rfc);
3451 }
3452
3453
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003454 if ((pi->conf_state & L2CAP_CONF_LOCKSTEP) &&
3455 !(pi->conf_state & L2CAP_CONF_EFS_RECV))
3456 return -ECONNREFUSED;
3457
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003458 if (result == L2CAP_CONF_SUCCESS) {
3459 /* Configure output options and let the other side know
3460 * which ones we don't like. */
3461
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003462 if (mtu < L2CAP_DEFAULT_MIN_MTU) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003463 result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003464 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003465 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003466 else {
3467 pi->omtu = mtu;
3468 pi->conf_state |= L2CAP_CONF_MTU_DONE;
3469 }
3470 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003471
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003472 switch (rfc.mode) {
3473 case L2CAP_MODE_BASIC:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003474 pi->fcs = L2CAP_FCS_NONE;
3475 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003476 break;
3477
3478 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003479 if (!(pi->conf_state & L2CAP_CONF_EXT_WIN_RECV))
3480 pi->remote_tx_win = rfc.txwin_size;
Mat Martineau86b1b262010-08-05 15:54:22 -07003481
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003482 pi->remote_max_tx = rfc.max_transmit;
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003483
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003484 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003485
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003486 rfc.retrans_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003487 cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
Gustavo F. Padovan10467e92010-05-01 16:15:40 -03003488 rfc.monitor_timeout =
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003489 cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003490
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003491 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003492
3493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3494 sizeof(rfc), (unsigned long) &rfc);
3495
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003496 if (pi->conf_state & L2CAP_CONF_LOCKSTEP)
3497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_FS,
3498 sizeof(fs), (unsigned long) &fs);
3499
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003500 break;
3501
3502 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003503 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan1c762152010-05-01 16:15:40 -03003504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003505 pi->conf_state |= L2CAP_CONF_MODE_DONE;
Gustavo F. Padovan68ae6632009-10-17 21:41:01 -03003506
3507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3508 sizeof(rfc), (unsigned long) &rfc);
3509
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003510 break;
3511
3512 default:
Marcel Holtmann6464f352007-10-20 13:39:51 +02003513 result = L2CAP_CONF_UNACCEPT;
3514
3515 memset(&rfc, 0, sizeof(rfc));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003516 rfc.mode = pi->mode;
3517 }
3518
3519 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
3520 !(pi->conf_state & L2CAP_CONF_PEND_SENT)) {
3521 pi->conf_state |= L2CAP_CONF_PEND_SENT;
3522 result = L2CAP_CONF_PENDING;
3523
3524 if (pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND &&
3525 pi->amp_id) {
Peter Krystadf453bb32011-07-19 17:23:34 -07003526 struct hci_chan *chan;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003527 /* Trigger logical link creation only on AMP */
3528
Peter Krystadf453bb32011-07-19 17:23:34 -07003529 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003530 if (!chan)
3531 return -ECONNREFUSED;
3532
3533 chan->l2cap_sk = sk;
3534 if (chan->state == BT_CONNECTED)
3535 l2cap_create_cfm(chan, 0);
3536 }
Marcel Holtmann6464f352007-10-20 13:39:51 +02003537 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003538
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003539 if (result == L2CAP_CONF_SUCCESS)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003540 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003541 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003542 rsp->scid = cpu_to_le16(pi->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003543 rsp->result = cpu_to_le16(result);
3544 rsp->flags = cpu_to_le16(0x0000);
3545
3546 return ptr - data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003547}
3548
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003549static int l2cap_parse_amp_move_reconf_req(struct sock *sk, void *data)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003550{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003551 struct l2cap_pinfo *pi = l2cap_pi(sk);
3552 struct l2cap_conf_rsp *rsp = data;
3553 void *ptr = rsp->data;
3554 void *req = pi->conf_req;
3555 int len = pi->conf_len;
3556 int type, hint, olen;
3557 unsigned long val;
3558 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
3559 struct l2cap_conf_ext_fs fs;
3560 u16 mtu = pi->omtu;
3561 u16 tx_win = pi->remote_tx_win;
3562 u16 result = L2CAP_CONF_SUCCESS;
3563
3564 BT_DBG("sk %p", sk);
3565
3566 while (len >= L2CAP_CONF_OPT_SIZE) {
3567 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
3568
3569 hint = type & L2CAP_CONF_HINT;
3570 type &= L2CAP_CONF_MASK;
3571
3572 switch (type) {
3573 case L2CAP_CONF_MTU:
3574 mtu = val;
3575 break;
3576
3577 case L2CAP_CONF_FLUSH_TO:
3578 if (pi->amp_move_id)
3579 result = L2CAP_CONF_UNACCEPT;
3580 else
3581 pi->remote_conf.flush_to = val;
3582 break;
3583
3584 case L2CAP_CONF_QOS:
3585 if (pi->amp_move_id)
3586 result = L2CAP_CONF_UNACCEPT;
3587 break;
3588
3589 case L2CAP_CONF_RFC:
3590 if (olen == sizeof(rfc))
3591 memcpy(&rfc, (void *) val, olen);
3592 if (pi->mode != rfc.mode ||
3593 rfc.mode == L2CAP_MODE_BASIC)
3594 result = L2CAP_CONF_UNACCEPT;
3595 break;
3596
3597 case L2CAP_CONF_FCS:
3598 pi->remote_conf.fcs = val;
3599 break;
3600
3601 case L2CAP_CONF_EXT_FS:
3602 if (olen == sizeof(fs)) {
3603 memcpy(&fs, (void *) val, olen);
3604 if (fs.type != L2CAP_SERVICE_BEST_EFFORT)
3605 result = L2CAP_CONF_FLOW_SPEC_REJECT;
3606 else {
3607 pi->remote_conf.flush_to =
3608 le32_to_cpu(fs.flush_to);
3609 }
3610 }
3611 break;
3612
3613 case L2CAP_CONF_EXT_WINDOW:
3614 tx_win = val;
3615 break;
3616
3617 default:
3618 if (hint)
3619 break;
3620
3621 result = L2CAP_CONF_UNKNOWN;
3622 *((u8 *) ptr++) = type;
3623 break;
3624 }
3625 }
3626
3627 BT_DBG("result 0x%2.2x cur mode 0x%2.2x req mode 0x%2.2x",
3628 result, pi->mode, rfc.mode);
3629
3630 if (result == L2CAP_CONF_SUCCESS) {
3631 /* Configure output options and let the other side know
3632 * which ones we don't like. */
3633
3634 /* Don't allow mtu to decrease. */
3635 if (mtu < pi->omtu)
3636 result = L2CAP_CONF_UNACCEPT;
3637
3638 BT_DBG("mtu %d omtu %d", mtu, pi->omtu);
3639
3640 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
3641
3642 /* Don't allow extended transmit window to change. */
3643 if (tx_win != pi->remote_tx_win) {
3644 result = L2CAP_CONF_UNACCEPT;
3645 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW, 2,
3646 pi->remote_tx_win);
3647 }
3648
3649 if (rfc.mode == L2CAP_MODE_ERTM) {
3650 pi->remote_conf.retrans_timeout =
3651 le16_to_cpu(rfc.retrans_timeout);
3652 pi->remote_conf.monitor_timeout =
3653 le16_to_cpu(rfc.monitor_timeout);
3654
3655 BT_DBG("remote conf monitor timeout %d",
3656 pi->remote_conf.monitor_timeout);
3657
3658 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3659 sizeof(rfc), (unsigned long) &rfc);
3660 }
3661
3662 }
3663
3664 if (result != L2CAP_CONF_SUCCESS)
3665 goto done;
3666
3667 pi->fcs = pi->remote_conf.fcs | pi->local_conf.fcs ;
3668
3669 if (pi->rx_state == L2CAP_ERTM_RX_STATE_WAIT_F_FLAG) {
3670 pi->flush_to = pi->remote_conf.flush_to;
3671 pi->retrans_timeout = pi->remote_conf.retrans_timeout;
3672
3673 if (pi->amp_move_id)
3674 pi->monitor_timeout = pi->remote_conf.monitor_timeout;
3675 else
3676 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3677 BT_DBG("mode %d monitor timeout %d",
3678 pi->mode, pi->monitor_timeout);
3679
3680 }
3681
3682done:
3683 rsp->scid = cpu_to_le16(pi->dcid);
3684 rsp->result = cpu_to_le16(result);
3685 rsp->flags = cpu_to_le16(0x0000);
3686
3687 return ptr - data;
3688}
3689
3690static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
3691{
3692 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003693 struct l2cap_conf_req *req = data;
3694 void *ptr = req->data;
3695 int type, olen;
3696 unsigned long val;
3697 struct l2cap_conf_rfc rfc;
3698
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003699 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003700
3701 while (len >= L2CAP_CONF_OPT_SIZE) {
3702 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3703
3704 switch (type) {
3705 case L2CAP_CONF_MTU:
3706 if (val < L2CAP_DEFAULT_MIN_MTU) {
3707 *result = L2CAP_CONF_UNACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003708 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003709 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003710 pi->imtu = val;
3711 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003712 break;
3713
3714 case L2CAP_CONF_FLUSH_TO:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003715 pi->flush_to = val;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003717 2, pi->flush_to);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003718 break;
3719
3720 case L2CAP_CONF_RFC:
3721 if (olen == sizeof(rfc))
3722 memcpy(&rfc, (void *)val, olen);
3723
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003724 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
3725 rfc.mode != pi->mode)
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003726 return -ECONNREFUSED;
3727
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003728 pi->fcs = 0;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003729
3730 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
3731 sizeof(rfc), (unsigned long) &rfc);
3732 break;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003733
3734 case L2CAP_CONF_EXT_WINDOW:
3735 pi->tx_win = val;
3736
3737 if (pi->tx_win > L2CAP_TX_WIN_MAX_ENHANCED)
3738 pi->tx_win = L2CAP_TX_WIN_MAX_ENHANCED;
3739
3740 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EXT_WINDOW,
3741 2, pi->tx_win);
3742 break;
3743
3744 default:
3745 break;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003746 }
3747 }
3748
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003749 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003750 return -ECONNREFUSED;
3751
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003752 pi->mode = rfc.mode;
Gustavo F. Padovan6c2ea7a2010-06-08 20:08:49 -03003753
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003754 if (*result == L2CAP_CONF_SUCCESS) {
3755 switch (rfc.mode) {
3756 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003757 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3758 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3759 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003760 break;
3761 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003762 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003763 }
3764 }
3765
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003766 req->dcid = cpu_to_le16(pi->dcid);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03003767 req->flags = cpu_to_le16(0x0000);
3768
3769 return ptr - data;
3770}
3771
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003772static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003773{
3774 struct l2cap_conf_rsp *rsp = data;
3775 void *ptr = rsp->data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003776
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003777 BT_DBG("sk %p", sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003778
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003779 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02003780 rsp->result = cpu_to_le16(result);
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07003781 rsp->flags = cpu_to_le16(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003782
3783 return ptr - data;
3784}
3785
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003786static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
Gustavo F. Padovan710f9b02011-03-25 14:30:37 -03003787{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003788 struct l2cap_pinfo *pi = l2cap_pi(sk);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003789 int type, olen;
3790 unsigned long val;
3791 struct l2cap_conf_rfc rfc;
3792
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003793 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003795 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003796 return;
3797
3798 while (len >= L2CAP_CONF_OPT_SIZE) {
3799 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3800
3801 switch (type) {
3802 case L2CAP_CONF_RFC:
3803 if (olen == sizeof(rfc))
3804 memcpy(&rfc, (void *)val, olen);
3805 goto done;
3806 }
3807 }
3808
3809done:
3810 switch (rfc.mode) {
3811 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003812 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
3813 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
3814 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003815 break;
3816 case L2CAP_MODE_STREAMING:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003817 pi->mps = le16_to_cpu(rfc.max_pdu_size);
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03003818 }
3819}
3820
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003821static void l2cap_conf_ext_fs_get(struct sock *sk, void *rsp, int len)
3822{
3823 struct l2cap_pinfo *pi = l2cap_pi(sk);
3824 int type, olen;
3825 unsigned long val;
3826 struct l2cap_conf_ext_fs fs;
3827
3828 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
3829
3830 while (len >= L2CAP_CONF_OPT_SIZE) {
3831 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3832 if ((type == L2CAP_CONF_EXT_FS) &&
3833 (olen == sizeof(struct l2cap_conf_ext_fs))) {
3834 memcpy(&fs, (void *)val, olen);
3835 pi->local_fs.id = fs.id;
3836 pi->local_fs.type = fs.type;
3837 pi->local_fs.max_sdu = le16_to_cpu(fs.max_sdu);
3838 pi->local_fs.sdu_arr_time =
3839 le32_to_cpu(fs.sdu_arr_time);
3840 pi->local_fs.acc_latency = le32_to_cpu(fs.acc_latency);
3841 pi->local_fs.flush_to = le32_to_cpu(fs.flush_to);
3842 break;
3843 }
3844 }
3845
3846}
3847
3848static int l2cap_finish_amp_move(struct sock *sk)
3849{
3850 struct l2cap_pinfo *pi;
3851 int err;
3852
3853 BT_DBG("sk %p", sk);
3854
3855 pi = l2cap_pi(sk);
3856
3857 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
3858 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
3859
3860 if (pi->ampcon)
3861 pi->conn->mtu = pi->ampcon->hdev->acl_mtu;
3862 else
3863 pi->conn->mtu = pi->conn->hcon->hdev->acl_mtu;
3864
3865 err = l2cap_setup_resegment(sk);
3866
3867 return err;
3868}
3869
3870static int l2cap_amp_move_reconf_rsp(struct sock *sk, void *rsp, int len,
3871 u16 result)
3872{
3873 int err = 0;
3874 struct l2cap_conf_rfc rfc = {.mode = L2CAP_MODE_BASIC};
3875 struct l2cap_pinfo *pi = l2cap_pi(sk);
3876
3877 BT_DBG("sk %p, rsp %p, len %d, res 0x%2.2x", sk, rsp, len, result);
3878
3879 if (pi->reconf_state == L2CAP_RECONF_NONE)
3880 return -ECONNREFUSED;
3881
3882 if (result == L2CAP_CONF_SUCCESS) {
3883 while (len >= L2CAP_CONF_OPT_SIZE) {
3884 int type, olen;
3885 unsigned long val;
3886
3887 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
3888
3889 if (type == L2CAP_CONF_RFC) {
3890 if (olen == sizeof(rfc))
3891 memcpy(&rfc, (void *)val, olen);
3892 if (rfc.mode != pi->mode &&
3893 rfc.mode != L2CAP_MODE_ERTM) {
3894 err = -ECONNREFUSED;
3895 goto done;
3896 }
3897 break;
3898 }
3899 }
3900 }
3901
3902done:
3903 l2cap_ertm_stop_ack_timer(pi);
3904 l2cap_ertm_stop_retrans_timer(pi);
3905 l2cap_ertm_stop_monitor_timer(pi);
3906
3907 if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_ACC) {
3908 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_NONE;
3909
3910 /* Respond to poll */
3911 err = l2cap_answer_move_poll(sk);
3912
3913 } else if (l2cap_pi(sk)->reconf_state == L2CAP_RECONF_INT) {
3914
3915 /* If moving to BR/EDR, use default timeout defined by
3916 * the spec */
3917 if (pi->amp_move_id == 0)
3918 pi->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
3919
3920 if (pi->mode == L2CAP_MODE_ERTM) {
3921 l2cap_ertm_tx(sk, NULL, NULL,
3922 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
3923 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
3924 }
3925 }
3926
3927 return err;
3928}
3929
3930
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003931static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3932{
3933 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
3934
3935 if (rej->reason != 0x0000)
3936 return 0;
3937
3938 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
3939 cmd->ident == conn->info_ident) {
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003940 del_timer(&conn->info_timer);
Marcel Holtmann984947d2009-02-06 23:35:19 +01003941
3942 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01003943 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01003944
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02003945 l2cap_conn_start(conn);
3946 }
3947
3948 return 0;
3949}
3950
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003951static struct sock *l2cap_create_connect(struct l2cap_conn *conn,
3952 struct l2cap_cmd_hdr *cmd,
3953 u8 *data, u8 rsp_code,
3954 u8 amp_id)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003955{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003956 struct l2cap_chan_list *list = &conn->chan_list;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003957 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
3958 struct l2cap_conn_rsp rsp;
Nathan Holsteind793fe82010-10-15 11:54:02 -04003959 struct sock *parent, *sk = NULL;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003960 int result, status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003961
3962 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003963 __le16 psm = req->psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07003964
3965 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
3966
3967 /* Check if we have socket listening on psm */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003968 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
3969 if (!parent) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07003970 result = L2CAP_CR_BAD_PSM;
3971 goto sendresp;
3972 }
3973
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00003974 bh_lock_sock(parent);
3975
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003976 /* Check if the ACL is secure enough (if not SDP) */
3977 if (psm != cpu_to_le16(0x0001) &&
3978 !hci_conn_check_link_mode(conn->hcon)) {
Marcel Holtmann2950f212009-02-12 14:02:50 +01003979 conn->disc_reason = 0x05;
Marcel Holtmanne7c29cb2008-09-09 07:19:20 +02003980 result = L2CAP_CR_SEC_BLOCK;
3981 goto response;
3982 }
3983
Linus Torvalds1da177e2005-04-16 15:20:36 -07003984 result = L2CAP_CR_NO_MEM;
3985
3986 /* Check for backlog size */
3987 if (sk_acceptq_is_full(parent)) {
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09003988 BT_DBG("backlog full %d", parent->sk_ack_backlog);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003989 goto response;
3990 }
3991
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003992 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
3993 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07003994 goto response;
3995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003996 write_lock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07003997
3998 /* Check if we already have channel with that dcid */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07003999 if (__l2cap_get_chan_by_dcid(list, scid)) {
4000 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004001 sock_set_flag(sk, SOCK_ZAPPED);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004002 l2cap_sock_kill(sk);
4003 sk = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004004 goto response;
4005 }
4006
4007 hci_conn_hold(conn->hcon);
4008
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004009 l2cap_sock_init(sk, parent);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004010 bacpy(&bt_sk(sk)->src, conn->src);
4011 bacpy(&bt_sk(sk)->dst, conn->dst);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004012 l2cap_pi(sk)->psm = psm;
4013 l2cap_pi(sk)->dcid = scid;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004014
Gustavo F. Padovand1010242011-03-25 00:39:48 -03004015 bt_accept_enqueue(parent, sk);
4016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004017 __l2cap_chan_add(conn, sk);
4018 dcid = l2cap_pi(sk)->scid;
4019 l2cap_pi(sk)->amp_id = amp_id;
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004020
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004021 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004022
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004023 l2cap_pi(sk)->ident = cmd->ident;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004024
Marcel Holtmann984947d2009-02-06 23:35:19 +01004025 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004026 if (l2cap_check_security(sk)) {
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004027 if (bt_sk(sk)->defer_setup) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004028 sk->sk_state = BT_CONNECT2;
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004029 result = L2CAP_CR_PEND;
4030 status = L2CAP_CS_AUTHOR_PEND;
4031 parent->sk_data_ready(parent, 0);
4032 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004033 /* Force pending result for AMP controllers.
4034 * The connection will succeed after the
4035 * physical link is up. */
4036 if (amp_id) {
4037 sk->sk_state = BT_CONNECT2;
4038 result = L2CAP_CR_PEND;
4039 } else {
4040 sk->sk_state = BT_CONFIG;
4041 result = L2CAP_CR_SUCCESS;
4042 }
Marcel Holtmannf66dc812009-01-15 21:57:00 +01004043 status = L2CAP_CS_NO_INFO;
4044 }
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004045 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004046 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004047 result = L2CAP_CR_PEND;
4048 status = L2CAP_CS_AUTHEN_PEND;
4049 }
4050 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004051 sk->sk_state = BT_CONNECT2;
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004052 result = L2CAP_CR_PEND;
4053 status = L2CAP_CS_NO_INFO;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004054 }
4055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004056 write_unlock_bh(&list->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004057
4058response:
4059 bh_unlock_sock(parent);
4060
4061sendresp:
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07004062 rsp.scid = cpu_to_le16(scid);
4063 rsp.dcid = cpu_to_le16(dcid);
4064 rsp.result = cpu_to_le16(result);
4065 rsp.status = cpu_to_le16(status);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004066 l2cap_send_cmd(conn, cmd->ident, rsp_code, sizeof(rsp), &rsp);
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004068 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004069 struct l2cap_info_req info;
4070 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4071
4072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
4073 conn->info_ident = l2cap_get_ident(conn);
4074
4075 mod_timer(&conn->info_timer, jiffies +
4076 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
4077
4078 l2cap_send_cmd(conn, conn->info_ident,
4079 L2CAP_INFO_REQ, sizeof(info), &info);
4080 }
4081
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004082 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004083 result == L2CAP_CR_SUCCESS) {
4084 u8 buf[128];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004086 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004087 l2cap_build_conf_req(sk, buf), buf);
4088 l2cap_pi(sk)->num_conf_req++;
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004089 }
4090
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004091 return sk;
4092}
4093
4094static inline int l2cap_connect_req(struct l2cap_conn *conn,
4095 struct l2cap_cmd_hdr *cmd, u8 *data)
4096{
4097 l2cap_create_connect(conn, cmd, data, L2CAP_CONN_RSP, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004098 return 0;
4099}
4100
4101static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4102{
4103 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
4104 u16 scid, dcid, result, status;
4105 struct sock *sk;
4106 u8 req[128];
4107
4108 scid = __le16_to_cpu(rsp->scid);
4109 dcid = __le16_to_cpu(rsp->dcid);
4110 result = __le16_to_cpu(rsp->result);
4111 status = __le16_to_cpu(rsp->status);
4112
4113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
4114
4115 if (scid) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004116 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4117 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004118 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004119 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004120 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4121 if (!sk)
João Paulo Rechi Vita57d3b222010-06-22 13:56:26 -03004122 return -EFAULT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004123 }
4124
4125 switch (result) {
4126 case L2CAP_CR_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004127 sk->sk_state = BT_CONFIG;
4128 l2cap_pi(sk)->ident = 0;
4129 l2cap_pi(sk)->dcid = dcid;
4130 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01004131
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004132 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
Gustavo F. Padovane9aeb2d2010-07-08 20:08:18 -03004133 break;
4134
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004135 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
4136
Linus Torvalds1da177e2005-04-16 15:20:36 -07004137 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004138 l2cap_build_conf_req(sk, req), req);
4139 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004140 break;
4141
4142 case L2CAP_CR_PEND:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004143 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004144 break;
4145
4146 default:
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004147 /* don't delete l2cap channel if sk is owned by user */
4148 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004149 sk->sk_state = BT_DISCONN;
4150 l2cap_sock_clear_timer(sk);
4151 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004152 break;
4153 }
4154
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004155 l2cap_chan_del(sk, ECONNREFUSED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004156 break;
4157 }
4158
4159 bh_unlock_sock(sk);
4160 return 0;
4161}
4162
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004163static inline void set_default_fcs(struct l2cap_pinfo *pi)
Mat Martineau8c462b62010-08-24 15:35:42 -07004164{
4165 /* FCS is enabled only in ERTM or streaming mode, if one or both
4166 * sides request it.
4167 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004168 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
4169 pi->fcs = L2CAP_FCS_NONE;
4170 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
4171 pi->fcs = L2CAP_FCS_CRC16;
Mat Martineau8c462b62010-08-24 15:35:42 -07004172}
4173
Al Viro88219a02007-07-29 00:17:25 -07004174static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004175{
4176 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
4177 u16 dcid, flags;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004178 u8 rspbuf[64];
4179 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *) rspbuf;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004180 struct sock *sk;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004181 int len;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004182 u8 amp_move_reconf = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004183
4184 dcid = __le16_to_cpu(req->dcid);
4185 flags = __le16_to_cpu(req->flags);
4186
4187 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
4188
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004189 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4190 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004191 return -ENOENT;
4192
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004193 BT_DBG("sk_state 0x%2.2x rx_state 0x%2.2x "
4194 "reconf_state 0x%2.2x amp_id 0x%2.2x amp_move_id 0x%2.2x",
4195 sk->sk_state, l2cap_pi(sk)->rx_state,
4196 l2cap_pi(sk)->reconf_state, l2cap_pi(sk)->amp_id,
4197 l2cap_pi(sk)->amp_move_id);
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004198
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004199 /* Detect a reconfig request due to channel move between
4200 * BR/EDR and AMP
4201 */
4202 if (sk->sk_state == BT_CONNECTED &&
4203 l2cap_pi(sk)->rx_state ==
4204 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE)
4205 l2cap_pi(sk)->reconf_state = L2CAP_RECONF_ACC;
4206
4207 if (l2cap_pi(sk)->reconf_state != L2CAP_RECONF_NONE)
4208 amp_move_reconf = 1;
4209
4210 if (sk->sk_state != BT_CONFIG && !amp_move_reconf) {
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004211 struct l2cap_cmd_rej rej;
4212
4213 rej.reason = cpu_to_le16(0x0002);
4214 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
4215 sizeof(rej), &rej);
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004216 goto unlock;
Gustavo F. Padovandf6bd742010-06-14 02:26:15 -03004217 }
Marcel Holtmann354f60a2006-11-18 22:15:20 +01004218
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004219 /* Reject if config buffer is too small. */
Al Viro88219a02007-07-29 00:17:25 -07004220 len = cmd_len - sizeof(*req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004221 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004222 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004223 l2cap_build_conf_rsp(sk, rspbuf,
4224 L2CAP_CONF_REJECT, flags), rspbuf);
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004225 goto unlock;
4226 }
4227
4228 /* Store config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004229 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
4230 l2cap_pi(sk)->conf_len += len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004231
4232 if (flags & 0x0001) {
4233 /* Incomplete config. Send empty response. */
4234 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004235 l2cap_build_conf_rsp(sk, rspbuf,
4236 L2CAP_CONF_SUCCESS, 0x0001), rspbuf);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004237 goto unlock;
4238 }
4239
4240 /* Complete config. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004241 if (!amp_move_reconf)
4242 len = l2cap_parse_conf_req(sk, rspbuf);
4243 else
4244 len = l2cap_parse_amp_move_reconf_req(sk, rspbuf);
4245
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004246 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004247 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004248 goto unlock;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004249 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004250
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004251 l2cap_pi(sk)->conf_ident = cmd->ident;
4252 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4253
4254 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_LOCKSTEP &&
4255 rsp->result == cpu_to_le16(L2CAP_CONF_PENDING) &&
4256 !l2cap_pi(sk)->amp_id) {
4257 /* Send success response right after pending if using
4258 * lockstep config on BR/EDR
4259 */
4260 rsp->result = cpu_to_le16(L2CAP_CONF_SUCCESS);
4261 l2cap_pi(sk)->conf_state |= L2CAP_CONF_OUTPUT_DONE;
4262 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rspbuf);
4263 }
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004264
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004265 /* Reset config buffer. */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004266 l2cap_pi(sk)->conf_len = 0;
Marcel Holtmann5dee9e72007-05-24 14:27:19 +02004267
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004268 if (amp_move_reconf)
Marcel Holtmann876d9482007-10-20 13:35:42 +02004269 goto unlock;
4270
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004271 l2cap_pi(sk)->num_conf_rsp++;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004272
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004273 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
4274 goto unlock;
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004275
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004276 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
4277 set_default_fcs(l2cap_pi(sk));
4278
4279 sk->sk_state = BT_CONNECTED;
4280
4281 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
4282 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
4283 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004284
Linus Torvalds1da177e2005-04-16 15:20:36 -07004285 l2cap_chan_ready(sk);
Marcel Holtmann876d9482007-10-20 13:35:42 +02004286 goto unlock;
4287 }
4288
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004289 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
Marcel Holtmann79d554a2008-07-14 20:13:44 +02004290 u8 buf[64];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004291 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004292 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004293 l2cap_build_conf_req(sk, buf), buf);
4294 l2cap_pi(sk)->num_conf_req++;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004295 }
4296
4297unlock:
4298 bh_unlock_sock(sk);
4299 return 0;
4300}
4301
4302static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4303{
4304 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
4305 u16 scid, flags, result;
4306 struct sock *sk;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004307 struct l2cap_pinfo *pi;
Gustavo F. Padovan7b1c0042010-05-01 16:15:39 -03004308 int len = cmd->len - sizeof(*rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004309
4310 scid = __le16_to_cpu(rsp->scid);
4311 flags = __le16_to_cpu(rsp->flags);
4312 result = __le16_to_cpu(rsp->result);
4313
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03004314 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
4315 scid, flags, result);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004316
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004317 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4318 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004319 return 0;
4320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004321 pi = l2cap_pi(sk);
4322
4323 if (pi->reconf_state != L2CAP_RECONF_NONE) {
4324 l2cap_amp_move_reconf_rsp(sk, rsp->data, len, result);
4325 goto done;
4326 }
Gustavo F. Padovan48454072011-03-25 00:22:30 -03004327
Linus Torvalds1da177e2005-04-16 15:20:36 -07004328 switch (result) {
4329 case L2CAP_CONF_SUCCESS:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004330 if (pi->conf_state & L2CAP_CONF_LOCKSTEP &&
4331 !(pi->conf_state & L2CAP_CONF_LOCKSTEP_PEND)) {
4332 /* Lockstep procedure requires a pending response
4333 * before success.
4334 */
4335 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4336 goto done;
4337 }
4338
4339 l2cap_conf_rfc_get(sk, rsp->data, len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004340 break;
4341
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004342 case L2CAP_CONF_PENDING:
4343 if (!(pi->conf_state & L2CAP_CONF_LOCKSTEP)) {
4344 l2cap_send_disconn_req(conn, sk, ECONNRESET);
4345 goto done;
4346 }
4347
4348 l2cap_conf_rfc_get(sk, rsp->data, len);
4349
4350 pi->conf_state |= L2CAP_CONF_LOCKSTEP_PEND;
4351
Peter Krystadf453bb32011-07-19 17:23:34 -07004352 l2cap_conf_ext_fs_get(sk, rsp->data, len);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004353
4354 if (pi->amp_id && pi->conf_state & L2CAP_CONF_PEND_SENT) {
4355 struct hci_chan *chan;
4356
4357 /* Already sent a 'pending' response, so set up
4358 * the logical link now
4359 */
Peter Krystadf453bb32011-07-19 17:23:34 -07004360 chan = l2cap_chan_admit(pi->amp_id, pi);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004361 if (!chan) {
4362 l2cap_send_disconn_req(pi->conn, sk,
4363 ECONNRESET);
4364 goto done;
4365 }
4366
4367 chan->l2cap_sk = sk;
4368 if (chan->state == BT_CONNECTED)
4369 l2cap_create_cfm(chan, 0);
4370 }
4371
4372 goto done;
4373
Linus Torvalds1da177e2005-04-16 15:20:36 -07004374 case L2CAP_CONF_UNACCEPT:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004375 if (pi->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004376 char req[64];
4377
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004378 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004379 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Andrei Emeltchenkoc2c77ec2010-03-19 10:26:28 +02004380 goto done;
4381 }
4382
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004383 /* throw out any old stored conf requests */
4384 result = L2CAP_CONF_SUCCESS;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004385 len = l2cap_parse_conf_rsp(sk, rsp->data,
4386 len, req, &result);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004387 if (len < 0) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004388 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004389 goto done;
4390 }
4391
4392 l2cap_send_cmd(conn, l2cap_get_ident(conn),
4393 L2CAP_CONF_REQ, len, req);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004394 pi->num_conf_req++;
Gustavo F. Padovanf2fcfcd2009-07-04 15:06:24 -03004395 if (result != L2CAP_CONF_SUCCESS)
4396 goto done;
4397 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004398 }
4399
YOSHIFUJI Hideaki8e87d142007-02-09 23:24:33 +09004400 default:
Marcel Holtmannb1235d72008-07-14 20:13:54 +02004401 sk->sk_err = ECONNRESET;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004402 l2cap_sock_set_timer(sk, HZ * 5);
4403 l2cap_send_disconn_req(conn, sk, ECONNRESET);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004404 goto done;
4405 }
4406
4407 if (flags & 0x01)
4408 goto done;
4409
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004410 pi->conf_state |= L2CAP_CONF_INPUT_DONE;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004411
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004412 if (pi->conf_state & L2CAP_CONF_OUTPUT_DONE) {
4413 set_default_fcs(pi);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004415 sk->sk_state = BT_CONNECTED;
4416
4417 if (pi->mode == L2CAP_MODE_ERTM ||
4418 pi->mode == L2CAP_MODE_STREAMING)
4419 l2cap_ertm_init(sk);
Gustavo F. Padovan0565c1c2009-10-03 02:34:36 -03004420
Linus Torvalds1da177e2005-04-16 15:20:36 -07004421 l2cap_chan_ready(sk);
4422 }
4423
4424done:
4425 bh_unlock_sock(sk);
4426 return 0;
4427}
4428
4429static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4430{
4431 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
4432 struct l2cap_disconn_rsp rsp;
4433 u16 dcid, scid;
4434 struct sock *sk;
4435
4436 scid = __le16_to_cpu(req->scid);
4437 dcid = __le16_to_cpu(req->dcid);
4438
4439 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
4440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004441 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
4442 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004443 return 0;
4444
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004445 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4446 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004447 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
4448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004449 /* Only do cleanup if a disconnect request was not sent already */
4450 if (sk->sk_state != BT_DISCONN) {
4451 sk->sk_shutdown = SHUTDOWN_MASK;
4452
4453 skb_queue_purge(TX_QUEUE(sk));
4454
4455 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
4456 skb_queue_purge(SREJ_QUEUE(sk));
4457
4458 __cancel_delayed_work(&l2cap_pi(sk)->ack_work);
4459 __cancel_delayed_work(&l2cap_pi(sk)->retrans_work);
4460 __cancel_delayed_work(&l2cap_pi(sk)->monitor_work);
4461 }
4462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004463
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004464 /* don't delete l2cap channel if sk is owned by user */
4465 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004466 sk->sk_state = BT_DISCONN;
4467 l2cap_sock_clear_timer(sk);
4468 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004469 bh_unlock_sock(sk);
4470 return 0;
4471 }
4472
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004473 l2cap_chan_del(sk, ECONNRESET);
4474
Linus Torvalds1da177e2005-04-16 15:20:36 -07004475 bh_unlock_sock(sk);
4476
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004477 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004478 return 0;
4479}
4480
4481static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4482{
4483 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
4484 u16 dcid, scid;
4485 struct sock *sk;
4486
4487 scid = __le16_to_cpu(rsp->scid);
4488 dcid = __le16_to_cpu(rsp->dcid);
4489
4490 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
4491
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004492 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
4493 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07004494 return 0;
4495
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004496 /* don't delete l2cap channel if sk is owned by user */
4497 if (sock_owned_by_user(sk)) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004498 sk->sk_state = BT_DISCONN;
4499 l2cap_sock_clear_timer(sk);
4500 l2cap_sock_set_timer(sk, HZ / 5);
Andrei Emeltchenkoa49184c2010-11-03 12:32:44 +02004501 bh_unlock_sock(sk);
4502 return 0;
4503 }
4504
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004505 l2cap_chan_del(sk, 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004506 bh_unlock_sock(sk);
4507
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004508 l2cap_sock_kill(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07004509 return 0;
4510}
4511
4512static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4513{
4514 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
Linus Torvalds1da177e2005-04-16 15:20:36 -07004515 u16 type;
4516
4517 type = __le16_to_cpu(req->type);
4518
4519 BT_DBG("type 0x%4.4x", type);
4520
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004521 if (type == L2CAP_IT_FEAT_MASK) {
4522 u8 buf[8];
Marcel Holtmann44dd46d2009-05-02 19:09:01 -07004523 u32 feat_mask = l2cap_feat_mask;
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004524 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4525 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
4526 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03004527 if (!disable_ertm)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03004528 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004529 | L2CAP_FEAT_FCS | L2CAP_FEAT_EXT_WINDOW;
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03004530 put_unaligned_le32(feat_mask, rsp->data);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004531 l2cap_send_cmd(conn, cmd->ident,
4532 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004533 } else if (type == L2CAP_IT_FIXED_CHAN) {
4534 u8 buf[12];
4535 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
4536 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4537 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
4538 memcpy(buf + 4, l2cap_fixed_chan, 8);
4539 l2cap_send_cmd(conn, cmd->ident,
4540 L2CAP_INFO_RSP, sizeof(buf), buf);
Marcel Holtmannf0709e02007-10-20 13:38:51 +02004541 } else {
4542 struct l2cap_info_rsp rsp;
4543 rsp.type = cpu_to_le16(type);
4544 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
4545 l2cap_send_cmd(conn, cmd->ident,
4546 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
4547 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07004548
4549 return 0;
4550}
4551
4552static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
4553{
4554 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
4555 u16 type, result;
4556
4557 type = __le16_to_cpu(rsp->type);
4558 result = __le16_to_cpu(rsp->result);
4559
4560 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
4561
Andrei Emeltchenkoe90165b2011-03-25 11:31:41 +02004562 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
4563 if (cmd->ident != conn->info_ident ||
4564 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
4565 return 0;
4566
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004567 del_timer(&conn->info_timer);
4568
Ville Tervoadb08ed2010-08-04 09:43:33 +03004569 if (result != L2CAP_IR_SUCCESS) {
4570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4571 conn->info_ident = 0;
4572
4573 l2cap_conn_start(conn);
4574
4575 return 0;
4576 }
4577
Marcel Holtmann984947d2009-02-06 23:35:19 +01004578 if (type == L2CAP_IT_FEAT_MASK) {
Harvey Harrison83985312008-05-02 16:25:46 -07004579 conn->feat_mask = get_unaligned_le32(rsp->data);
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004580
Marcel Holtmann47ec1dcd2009-05-02 18:57:55 -07004581 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004582 struct l2cap_info_req req;
4583 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
4584
4585 conn->info_ident = l2cap_get_ident(conn);
4586
4587 l2cap_send_cmd(conn, conn->info_ident,
4588 L2CAP_INFO_REQ, sizeof(req), &req);
4589 } else {
4590 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
4591 conn->info_ident = 0;
4592
4593 l2cap_conn_start(conn);
4594 }
4595 } else if (type == L2CAP_IT_FIXED_CHAN) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004596 conn->fc_mask = rsp->data[0];
Marcel Holtmann984947d2009-02-06 23:35:19 +01004597 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
Marcel Holtmanne1027a72009-02-09 09:18:02 +01004598 conn->info_ident = 0;
Marcel Holtmann984947d2009-02-06 23:35:19 +01004599
4600 l2cap_conn_start(conn);
4601 }
Marcel Holtmann4e8402a2007-10-20 13:37:56 +02004602
Linus Torvalds1da177e2005-04-16 15:20:36 -07004603 return 0;
4604}
4605
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004606static void l2cap_send_move_chan_req(struct l2cap_conn *conn,
4607 struct l2cap_pinfo *pi, u16 icid, u8 dest_amp_id)
4608{
4609 struct l2cap_move_chan_req req;
4610 u8 ident;
4611
4612 BT_DBG("pi %p, icid %d, dest_amp_id %d", pi, (int) icid,
4613 (int) dest_amp_id);
4614
4615 ident = l2cap_get_ident(conn);
4616 if (pi)
4617 pi->ident = ident;
4618
4619 req.icid = cpu_to_le16(icid);
4620 req.dest_amp_id = dest_amp_id;
4621
4622 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_REQ, sizeof(req), &req);
4623}
4624
4625static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
4626 u16 icid, u16 result)
4627{
4628 struct l2cap_move_chan_rsp rsp;
4629
4630 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4631
4632 rsp.icid = cpu_to_le16(icid);
4633 rsp.result = cpu_to_le16(result);
4634
4635 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
4636}
4637
4638static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
4639 struct l2cap_pinfo *pi, u16 icid, u16 result)
4640{
4641 struct l2cap_move_chan_cfm cfm;
4642 u8 ident;
4643
4644 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4645
4646 ident = l2cap_get_ident(conn);
4647 if (pi)
4648 pi->ident = ident;
4649
4650 cfm.icid = cpu_to_le16(icid);
4651 cfm.result = cpu_to_le16(result);
4652
4653 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
4654}
4655
4656static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
4657 u16 icid)
4658{
4659 struct l2cap_move_chan_cfm_rsp rsp;
4660
4661 BT_DBG("icid %d", (int) icid);
4662
4663 rsp.icid = cpu_to_le16(icid);
4664 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
4665}
4666
4667static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
4668 struct l2cap_cmd_hdr *cmd, u8 *data)
4669{
4670 struct l2cap_create_chan_req *req =
4671 (struct l2cap_create_chan_req *) data;
4672 struct sock *sk;
4673 u16 psm, scid;
4674
4675 psm = le16_to_cpu(req->psm);
4676 scid = le16_to_cpu(req->scid);
4677
4678 BT_DBG("psm %d, scid %d, amp_id %d", (int) psm, (int) scid,
4679 (int) req->amp_id);
4680
4681 if (req->amp_id) {
4682 struct hci_dev *hdev;
4683
4684 /* Validate AMP controller id */
4685 hdev = hci_dev_get(A2MP_HCI_ID(req->amp_id));
4686 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4687 struct l2cap_create_chan_rsp rsp;
4688
4689 rsp.dcid = 0;
4690 rsp.scid = cpu_to_le16(scid);
4691 rsp.result = L2CAP_CREATE_CHAN_REFUSED_CONTROLLER;
4692 rsp.status = L2CAP_CREATE_CHAN_STATUS_NONE;
4693
4694 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
4695 sizeof(rsp), &rsp);
4696
4697 if (hdev)
4698 hci_dev_put(hdev);
4699
4700 return 0;
4701 }
4702
4703 hci_dev_put(hdev);
4704 }
4705
4706 sk = l2cap_create_connect(conn, cmd, data, L2CAP_CREATE_CHAN_RSP,
4707 req->amp_id);
4708
Mat Martineau55f2a622011-09-19 13:20:17 -07004709 if (sk)
4710 l2cap_pi(sk)->conf_state |= L2CAP_CONF_LOCKSTEP;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004711
Mat Martineau55f2a622011-09-19 13:20:17 -07004712 if (sk && req->amp_id &&
4713 (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004714 amp_accept_physical(conn, req->amp_id, sk);
4715
4716 return 0;
4717}
4718
4719static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
4720 struct l2cap_cmd_hdr *cmd, u8 *data)
4721{
4722 BT_DBG("conn %p", conn);
4723
4724 return l2cap_connect_rsp(conn, cmd, data);
4725}
4726
4727static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
4728 struct l2cap_cmd_hdr *cmd, u8 *data)
4729{
4730 struct l2cap_move_chan_req *req = (struct l2cap_move_chan_req *) data;
4731 struct sock *sk;
4732 struct l2cap_pinfo *pi;
4733 u16 icid = 0;
4734 u16 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4735
4736 icid = le16_to_cpu(req->icid);
4737
4738 BT_DBG("icid %d, dest_amp_id %d", (int) icid, (int) req->dest_amp_id);
4739
4740 read_lock(&conn->chan_list.lock);
4741 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4742 read_unlock(&conn->chan_list.lock);
4743
4744 if (!sk)
4745 goto send_move_response;
4746
4747 lock_sock(sk);
4748 pi = l2cap_pi(sk);
4749
4750 if (pi->scid < L2CAP_CID_DYN_START ||
4751 (pi->mode != L2CAP_MODE_ERTM &&
4752 pi->mode != L2CAP_MODE_STREAMING)) {
4753 goto send_move_response;
4754 }
4755
4756 if (pi->amp_id == req->dest_amp_id) {
4757 result = L2CAP_MOVE_CHAN_REFUSED_SAME_ID;
4758 goto send_move_response;
4759 }
4760
4761 if (req->dest_amp_id) {
4762 struct hci_dev *hdev;
4763 hdev = hci_dev_get(A2MP_HCI_ID(req->dest_amp_id));
4764 if (!hdev || !test_bit(HCI_UP, &hdev->flags)) {
4765 if (hdev)
4766 hci_dev_put(hdev);
4767
4768 result = L2CAP_MOVE_CHAN_REFUSED_CONTROLLER;
4769 goto send_move_response;
4770 }
4771 }
4772
4773 if (((pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
4774 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE) ||
4775 pi->amp_move_role != L2CAP_AMP_MOVE_NONE) &&
4776 bacmp(conn->src, conn->dst) > 0) {
4777 result = L2CAP_MOVE_CHAN_REFUSED_COLLISION;
4778 goto send_move_response;
4779 }
4780
4781 if (pi->amp_pref == BT_AMP_POLICY_REQUIRE_BR_EDR) {
4782 result = L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED;
4783 goto send_move_response;
4784 }
4785
4786 pi->amp_move_cmd_ident = cmd->ident;
4787 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4788 l2cap_amp_move_setup(sk);
4789 pi->amp_move_id = req->dest_amp_id;
4790 icid = pi->dcid;
4791
4792 if (req->dest_amp_id == 0) {
4793 /* Moving to BR/EDR */
4794 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4795 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4796 result = L2CAP_MOVE_CHAN_PENDING;
4797 } else {
4798 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
4799 result = L2CAP_MOVE_CHAN_SUCCESS;
4800 }
4801 } else {
4802 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_PREPARE;
4803 amp_accept_physical(pi->conn, req->dest_amp_id, sk);
4804 result = L2CAP_MOVE_CHAN_PENDING;
4805 }
4806
4807send_move_response:
4808 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
4809
4810 if (sk)
4811 release_sock(sk);
4812
4813 return 0;
4814}
4815
4816static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
4817 struct l2cap_cmd_hdr *cmd, u8 *data)
4818{
4819 struct l2cap_move_chan_rsp *rsp = (struct l2cap_move_chan_rsp *) data;
4820 struct sock *sk;
4821 struct l2cap_pinfo *pi;
4822 u16 icid, result;
4823
4824 icid = le16_to_cpu(rsp->icid);
4825 result = le16_to_cpu(rsp->result);
4826
4827 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4828
4829 switch (result) {
4830 case L2CAP_MOVE_CHAN_SUCCESS:
4831 case L2CAP_MOVE_CHAN_PENDING:
4832 read_lock(&conn->chan_list.lock);
4833 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
4834 read_unlock(&conn->chan_list.lock);
4835
4836 if (!sk) {
4837 l2cap_send_move_chan_cfm(conn, NULL, icid,
4838 L2CAP_MOVE_CHAN_UNCONFIRMED);
4839 break;
4840 }
4841
4842 lock_sock(sk);
4843 pi = l2cap_pi(sk);
4844
4845 l2cap_sock_clear_timer(sk);
4846 if (result == L2CAP_MOVE_CHAN_PENDING)
4847 l2cap_sock_set_timer(sk, L2CAP_MOVE_ERTX_TIMEOUT);
4848
4849 if (pi->amp_move_state ==
4850 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
4851 /* Move confirm will be sent when logical link
4852 * is complete.
4853 */
4854 pi->amp_move_state =
4855 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4856 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
4857 pi->amp_move_state ==
4858 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS) {
4859 /* Logical link is up or moving to BR/EDR,
4860 * proceed with move */
4861 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
4862 pi->amp_move_state =
4863 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
4864 } else {
4865 pi->amp_move_state =
4866 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
4867 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4868 L2CAP_MOVE_CHAN_CONFIRMED);
4869 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4870 }
4871 } else if (pi->amp_move_state ==
4872 L2CAP_AMP_STATE_WAIT_MOVE_RSP) {
4873 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
4874 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
4875 struct hci_chan *chan;
4876 /* Moving to AMP */
4877 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4878 /* Remote is ready, send confirm immediately
4879 * after logical link is ready
4880 */
4881 pi->amp_move_state =
4882 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
4883 } else {
4884 /* Both logical link and move success
4885 * are required to confirm
4886 */
4887 pi->amp_move_state =
4888 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE;
4889 }
4890 pi->remote_fs = default_fs;
4891 pi->local_fs = default_fs;
4892 chan = l2cap_chan_admit(pi->amp_move_id, pi);
4893 if (!chan) {
4894 /* Logical link not available */
4895 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4896 L2CAP_MOVE_CHAN_UNCONFIRMED);
4897 break;
4898 }
4899 if (chan->state == BT_CONNECTED) {
4900 /* Logical link is already ready to go */
4901 pi->ampchan = chan;
4902 pi->ampcon = chan->conn;
4903 pi->ampcon->l2cap_data = pi->conn;
4904 if (result == L2CAP_MOVE_CHAN_SUCCESS) {
4905 /* Can confirm now */
4906 l2cap_send_move_chan_cfm(conn, pi,
4907 pi->scid,
4908 L2CAP_MOVE_CHAN_CONFIRMED);
4909 } else {
4910 /* Now only need move success
4911 * required to confirm
4912 */
4913 pi->amp_move_state =
4914 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
4915 }
4916 } else
4917 chan->l2cap_sk = sk;
4918 } else {
4919 /* Any other amp move state means the move failed. */
4920 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4921 L2CAP_MOVE_CHAN_UNCONFIRMED);
4922 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4923 }
4924 break;
4925 default:
4926 /* Failed (including collision case) */
4927 read_lock(&conn->chan_list.lock);
4928 sk = __l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
4929 read_unlock(&conn->chan_list.lock);
4930
4931 if (!sk) {
4932 /* Could not locate channel, icid is best guess */
4933 l2cap_send_move_chan_cfm(conn, NULL, icid,
4934 L2CAP_MOVE_CHAN_UNCONFIRMED);
4935 break;
4936 }
4937
4938 lock_sock(sk);
4939 pi = l2cap_pi(sk);
4940
4941 l2cap_sock_clear_timer(sk);
4942
4943 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
4944 if (result == L2CAP_MOVE_CHAN_REFUSED_COLLISION)
4945 pi->amp_move_role = L2CAP_AMP_MOVE_RESPONDER;
4946 else {
4947 /* Cleanup - cancel move */
4948 pi->amp_move_id = pi->amp_id;
4949 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
4950 l2cap_amp_move_revert(sk);
4951 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
4952 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07004953 }
4954
4955 l2cap_send_move_chan_cfm(conn, pi, pi->scid,
4956 L2CAP_MOVE_CHAN_UNCONFIRMED);
4957 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
4958 break;
4959 }
4960
4961 if (sk)
4962 release_sock(sk);
4963
4964 return 0;
4965}
4966
4967static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
4968 struct l2cap_cmd_hdr *cmd, u8 *data)
4969{
4970 struct l2cap_move_chan_cfm *cfm = (struct l2cap_move_chan_cfm *) data;
4971 struct sock *sk;
4972 u16 icid, result;
4973
4974 icid = le16_to_cpu(cfm->icid);
4975 result = le16_to_cpu(cfm->result);
4976
4977 BT_DBG("icid %d, result %d", (int) icid, (int) result);
4978
4979 read_lock(&conn->chan_list.lock);
4980 sk = __l2cap_get_chan_by_dcid(&conn->chan_list, icid);
4981 read_unlock(&conn->chan_list.lock);
4982
4983 if (!sk) {
4984 BT_DBG("Bad channel (%d)", (int) icid);
4985 goto send_move_confirm_response;
4986 }
4987
4988 lock_sock(sk);
4989
4990 if (l2cap_pi(sk)->amp_move_state == L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM) {
4991 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
4992 if (result == L2CAP_MOVE_CHAN_CONFIRMED) {
4993 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
4994 if ((!l2cap_pi(sk)->amp_id) &&
4995 (l2cap_pi(sk)->ampchan)) {
4996 /* Have moved off of AMP, free the channel */
4997 hci_chan_put(l2cap_pi(sk)->ampchan);
4998 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
4999 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5000 l2cap_pi(sk));
5001 l2cap_pi(sk)->ampchan = NULL;
5002 l2cap_pi(sk)->ampcon = NULL;
5003 }
5004 l2cap_amp_move_success(sk);
5005 } else {
5006 l2cap_pi(sk)->amp_move_id = l2cap_pi(sk)->amp_id;
5007 l2cap_amp_move_revert(sk);
5008 }
5009 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5010 } else if (l2cap_pi(sk)->amp_move_state ==
5011 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5012 BT_DBG("Bad AMP_MOVE_STATE (%d)", l2cap_pi(sk)->amp_move_state);
5013 }
5014
5015send_move_confirm_response:
5016 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
5017
5018 if (sk)
5019 release_sock(sk);
5020
5021 return 0;
5022}
5023
5024static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
5025 struct l2cap_cmd_hdr *cmd, u8 *data)
5026{
5027 struct l2cap_move_chan_cfm_rsp *rsp =
5028 (struct l2cap_move_chan_cfm_rsp *) data;
5029 struct sock *sk;
5030 u16 icid;
5031
5032 icid = le16_to_cpu(rsp->icid);
5033
5034 BT_DBG("icid %d", (int) icid);
5035
5036 read_lock(&conn->chan_list.lock);
5037 sk = __l2cap_get_chan_by_scid(&conn->chan_list, icid);
5038 read_unlock(&conn->chan_list.lock);
5039
5040 if (!sk)
5041 return 0;
5042
5043 lock_sock(sk);
5044
5045 l2cap_sock_clear_timer(sk);
5046
5047 if (l2cap_pi(sk)->amp_move_state ==
5048 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP) {
5049 l2cap_pi(sk)->amp_move_state = L2CAP_AMP_STATE_STABLE;
5050 l2cap_pi(sk)->amp_id = l2cap_pi(sk)->amp_move_id;
5051
5052 if (!l2cap_pi(sk)->amp_id) {
5053 /* Have moved off of AMP, free the channel */
5054 l2cap_pi(sk)->ampcon = NULL;
5055 if (l2cap_pi(sk)->ampchan) {
5056 hci_chan_put(l2cap_pi(sk)->ampchan);
5057 if (atomic_read(&l2cap_pi(sk)->ampchan->refcnt))
5058 l2cap_deaggregate(l2cap_pi(sk)->ampchan,
5059 l2cap_pi(sk));
5060 }
5061 l2cap_pi(sk)->ampchan = NULL;
5062 }
5063
5064 l2cap_amp_move_success(sk);
5065
5066 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5067 }
5068
5069 release_sock(sk);
5070
5071 return 0;
5072}
5073
5074static void l2cap_amp_signal_worker(struct work_struct *work)
5075{
5076 int err = 0;
5077 struct l2cap_amp_signal_work *ampwork =
5078 container_of(work, struct l2cap_amp_signal_work, work);
5079
5080 switch (ampwork->cmd.code) {
5081 case L2CAP_MOVE_CHAN_REQ:
5082 err = l2cap_move_channel_req(ampwork->conn, &ampwork->cmd,
5083 ampwork->data);
5084 break;
5085
5086 case L2CAP_MOVE_CHAN_RSP:
5087 err = l2cap_move_channel_rsp(ampwork->conn, &ampwork->cmd,
5088 ampwork->data);
5089 break;
5090
5091 case L2CAP_MOVE_CHAN_CFM:
5092 err = l2cap_move_channel_confirm(ampwork->conn, &ampwork->cmd,
5093 ampwork->data);
5094 break;
5095
5096 case L2CAP_MOVE_CHAN_CFM_RSP:
5097 err = l2cap_move_channel_confirm_rsp(ampwork->conn,
5098 &ampwork->cmd, ampwork->data);
5099 break;
5100
5101 default:
5102 BT_ERR("Unknown signaling command 0x%2.2x", ampwork->cmd.code);
5103 err = -EINVAL;
5104 break;
5105 }
5106
5107 if (err) {
5108 struct l2cap_cmd_rej rej;
5109 BT_DBG("error %d", err);
5110
5111 /* In this context, commands are only rejected with
5112 * "command not understood", code 0.
5113 */
5114 rej.reason = cpu_to_le16(0);
5115 l2cap_send_cmd(ampwork->conn, ampwork->cmd.ident,
5116 L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5117 }
5118
5119 kfree_skb(ampwork->skb);
5120 kfree(ampwork);
5121}
5122
5123void l2cap_amp_physical_complete(int result, u8 local_id, u8 remote_id,
5124 struct sock *sk)
5125{
5126 struct l2cap_pinfo *pi;
5127
5128 BT_DBG("result %d, local_id %d, remote_id %d, sk %p", result,
5129 (int) local_id, (int) remote_id, sk);
5130
5131 lock_sock(sk);
5132
5133 if (sk->sk_state == BT_DISCONN || sk->sk_state == BT_CLOSED) {
5134 release_sock(sk);
5135 return;
5136 }
5137
5138 pi = l2cap_pi(sk);
5139
5140 if (sk->sk_state != BT_CONNECTED) {
5141 if (bt_sk(sk)->parent) {
5142 struct l2cap_conn_rsp rsp;
5143 char buf[128];
5144 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
5145 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
5146
5147 /* Incoming channel on AMP */
5148 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5149 /* Send successful response */
5150 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
5151 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5152 } else {
5153 /* Send negative response */
5154 rsp.result = cpu_to_le16(L2CAP_CR_NO_MEM);
5155 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
5156 }
5157
5158 l2cap_send_cmd(pi->conn, pi->ident,
5159 L2CAP_CREATE_CHAN_RSP,
5160 sizeof(rsp), &rsp);
5161
5162 if (result == L2CAP_CREATE_CHAN_SUCCESS) {
5163 sk->sk_state = BT_CONFIG;
5164 pi->conf_state |= L2CAP_CONF_REQ_SENT;
5165 l2cap_send_cmd(pi->conn,
5166 l2cap_get_ident(pi->conn),
5167 L2CAP_CONF_REQ,
5168 l2cap_build_conf_req(sk, buf), buf);
5169 l2cap_pi(sk)->num_conf_req++;
5170 }
5171 } else {
5172 /* Outgoing channel on AMP */
5173 if (result != L2CAP_CREATE_CHAN_SUCCESS) {
5174 /* Revert to BR/EDR connect */
5175 l2cap_send_conn_req(sk);
5176 } else {
5177 pi->amp_id = local_id;
5178 l2cap_send_create_chan_req(sk, remote_id);
5179 }
5180 }
5181 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5182 pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5183 l2cap_amp_move_setup(sk);
5184 pi->amp_move_id = local_id;
5185 pi->amp_move_state = L2CAP_AMP_STATE_WAIT_MOVE_RSP;
5186
5187 l2cap_send_move_chan_req(pi->conn, pi, pi->scid, remote_id);
5188 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5189 } else if (result == L2CAP_MOVE_CHAN_SUCCESS &&
5190 pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5191 struct hci_chan *chan;
5192 struct l2cap_conf_ext_fs default_fs = {1, 1, 0xFFFF,
5193 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF};
5194 pi->remote_fs = default_fs;
5195 pi->local_fs = default_fs;
5196 chan = l2cap_chan_admit(local_id, pi);
5197 if (chan) {
5198 if (chan->state == BT_CONNECTED) {
5199 /* Logical link is ready to go */
5200 pi->ampchan = chan;
5201 pi->ampcon = chan->conn;
5202 pi->ampcon->l2cap_data = pi->conn;
5203 pi->amp_move_state =
5204 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5205 l2cap_send_move_chan_rsp(pi->conn,
5206 pi->amp_move_cmd_ident, pi->dcid,
5207 L2CAP_MOVE_CHAN_SUCCESS);
5208 } else {
5209 /* Wait for logical link to be ready */
5210 chan->l2cap_sk = sk;
5211 pi->amp_move_state =
5212 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM;
5213 }
5214 } else {
5215 /* Logical link not available */
5216 l2cap_send_move_chan_rsp(pi->conn,
5217 pi->amp_move_cmd_ident, pi->dcid,
5218 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5219 }
5220 } else {
5221 BT_DBG("result %d, role %d, local_busy %d", result,
5222 (int) pi->amp_move_role,
5223 (int) ((pi->conn_state & L2CAP_CONN_LOCAL_BUSY) != 0));
5224
5225 if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5226 if (result == -EINVAL)
5227 l2cap_send_move_chan_rsp(pi->conn,
5228 pi->amp_move_cmd_ident, pi->dcid,
5229 L2CAP_MOVE_CHAN_REFUSED_CONTROLLER);
5230 else
5231 l2cap_send_move_chan_rsp(pi->conn,
5232 pi->amp_move_cmd_ident, pi->dcid,
5233 L2CAP_MOVE_CHAN_REFUSED_NOT_ALLOWED);
5234 }
5235
5236 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
5237 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5238
5239 if ((l2cap_pi(sk)->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
5240 l2cap_rmem_available(sk))
5241 l2cap_ertm_tx(sk, 0, 0,
5242 L2CAP_ERTM_EVENT_LOCAL_BUSY_CLEAR);
5243
5244 /* Restart data transmission */
5245 l2cap_ertm_send(sk);
5246 }
5247
5248 release_sock(sk);
5249}
5250
5251int l2cap_logical_link_complete(struct hci_chan *chan, u8 status)
5252{
5253 struct l2cap_pinfo *pi;
5254 struct sock *sk;
5255
5256 BT_DBG("status %d, chan %p, conn %p", (int) status, chan, chan->conn);
5257
5258 sk = chan->l2cap_sk;
5259
5260 BT_DBG("sk %p", sk);
5261
5262 lock_sock(sk);
5263
5264 if (sk->sk_state != BT_CONNECTED && !l2cap_pi(sk)->amp_id) {
5265 release_sock(sk);
5266 return 0;
5267 }
5268
5269 pi = l2cap_pi(sk);
5270
5271 if ((!status) && (chan != NULL)) {
5272 pi->ampchan = chan;
5273 pi->ampcon = chan->conn;
5274 pi->ampcon->l2cap_data = pi->conn;
5275
5276 if (sk->sk_state != BT_CONNECTED) {
5277 struct l2cap_conf_rsp rsp;
5278
5279 /* Must use spinlock to prevent concurrent
5280 * execution of l2cap_config_rsp()
5281 */
5282 bh_lock_sock(sk);
5283 l2cap_send_cmd(pi->conn, pi->conf_ident, L2CAP_CONF_RSP,
5284 l2cap_build_conf_rsp(sk, &rsp,
5285 L2CAP_CONF_SUCCESS, 0), &rsp);
5286 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
5287
5288 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
5289 set_default_fcs(l2cap_pi(sk));
5290
5291 sk->sk_state = BT_CONNECTED;
5292
5293 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM ||
5294 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
5295 l2cap_ertm_init(sk);
5296
5297 l2cap_chan_ready(sk);
5298 }
5299 bh_unlock_sock(sk);
5300 } else if (pi->amp_move_state ==
5301 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) {
5302 /* Move confirm will be sent after a success
5303 * response is received
5304 */
5305 pi->amp_move_state =
5306 L2CAP_AMP_STATE_WAIT_MOVE_RSP_SUCCESS;
5307 } else if (pi->amp_move_state ==
5308 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM) {
5309 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
5310 pi->amp_move_state =
5311 L2CAP_AMP_STATE_WAIT_LOCAL_BUSY;
5312 else if (pi->amp_move_role ==
5313 L2CAP_AMP_MOVE_INITIATOR) {
5314 pi->amp_move_state =
5315 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM_RSP;
5316 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5317 L2CAP_MOVE_CHAN_SUCCESS);
5318 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5319 } else if (pi->amp_move_role ==
5320 L2CAP_AMP_MOVE_RESPONDER) {
5321 pi->amp_move_state =
5322 L2CAP_AMP_STATE_WAIT_MOVE_CONFIRM;
5323 l2cap_send_move_chan_rsp(pi->conn,
5324 pi->amp_move_cmd_ident, pi->dcid,
5325 L2CAP_MOVE_CHAN_SUCCESS);
5326 }
5327 } else {
5328 /* Move was not in expected state, free the
5329 * logical link
5330 */
5331 hci_chan_put(pi->ampchan);
5332 pi->ampcon = NULL;
5333 pi->ampchan = NULL;
5334 }
5335 } else {
5336 /* Logical link setup failed. */
5337
5338 if (sk->sk_state != BT_CONNECTED)
5339 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5340 else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
5341 l2cap_amp_move_revert(sk);
5342 l2cap_pi(sk)->amp_move_role = L2CAP_AMP_MOVE_NONE;
5343 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5344 l2cap_send_move_chan_rsp(pi->conn,
5345 pi->amp_move_cmd_ident, pi->dcid,
5346 L2CAP_MOVE_CHAN_REFUSED_CONFIG);
5347 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
5348 if ((pi->amp_move_state ==
5349 L2CAP_AMP_STATE_WAIT_LOGICAL_COMPLETE) ||
5350 (pi->amp_move_state ==
5351 L2CAP_AMP_STATE_WAIT_LOGICAL_CONFIRM)) {
5352 /* Remote has only sent pending or
5353 * success responses, clean up
5354 */
5355 l2cap_amp_move_revert(sk);
5356 l2cap_pi(sk)->amp_move_role =
5357 L2CAP_AMP_MOVE_NONE;
5358 pi->amp_move_state = L2CAP_AMP_STATE_STABLE;
5359 }
5360
5361 /* Other amp move states imply that the move
5362 * has already aborted
5363 */
5364 l2cap_send_move_chan_cfm(pi->conn, pi, pi->scid,
5365 L2CAP_MOVE_CHAN_UNCONFIRMED);
5366 l2cap_sock_set_timer(sk, L2CAP_MOVE_TIMEOUT);
5367 }
5368
5369 pi->ampcon = NULL;
5370 pi->ampchan = NULL;
5371 }
5372
5373 release_sock(sk);
5374 return 0;
5375}
5376
5377static void l2cap_logical_link_worker(struct work_struct *work)
5378{
5379 struct l2cap_logical_link_work *log_link_work =
5380 container_of(work, struct l2cap_logical_link_work, work);
5381
5382 l2cap_logical_link_complete(log_link_work->chan, log_link_work->status);
5383 kfree(log_link_work);
5384}
5385
5386static int l2cap_create_cfm(struct hci_chan *chan, u8 status)
5387{
5388 struct l2cap_logical_link_work *amp_work;
5389
5390 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5391 if (!amp_work)
5392 return -ENOMEM;
5393
5394 INIT_WORK(&amp_work->work, l2cap_logical_link_worker);
5395 amp_work->chan = chan;
5396 amp_work->status = status;
5397 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5398 kfree(amp_work);
5399 return -ENOMEM;
5400 }
5401
5402 return 0;
5403}
5404
5405int l2cap_modify_cfm(struct hci_chan *chan, u8 status)
5406{
5407 struct l2cap_conn *conn = chan->conn->l2cap_data;
5408
5409 BT_DBG("chan %p conn %p status %d", chan, conn, status);
5410
5411 /* TODO: if failed status restore previous fs */
5412 return 0;
5413}
5414
5415int l2cap_destroy_cfm(struct hci_chan *chan, u8 reason)
5416{
5417 struct l2cap_chan_list *l;
5418 struct l2cap_conn *conn = chan->conn->l2cap_data;
5419 struct sock *sk;
5420
5421 BT_DBG("chan %p conn %p", chan, conn);
5422
5423 if (!conn)
5424 return 0;
5425
5426 l = &conn->chan_list;
5427
5428 read_lock(&l->lock);
5429
5430 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
5431 bh_lock_sock(sk);
5432 /* TODO MM/PK - What to do if connection is LOCAL_BUSY? */
5433 if (l2cap_pi(sk)->ampchan == chan) {
5434 l2cap_pi(sk)->ampchan = NULL;
5435 l2cap_amp_move_init(sk);
5436 }
5437 bh_unlock_sock(sk);
5438 }
5439
5440 read_unlock(&l->lock);
5441
5442 return 0;
5443
5444
5445}
5446
5447static int l2cap_sig_amp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd,
5448 u8 *data, struct sk_buff *skb)
5449{
5450 struct l2cap_amp_signal_work *amp_work;
5451
5452 amp_work = kzalloc(sizeof(*amp_work), GFP_ATOMIC);
5453 if (!amp_work)
5454 return -ENOMEM;
5455
5456 INIT_WORK(&amp_work->work, l2cap_amp_signal_worker);
5457 amp_work->conn = conn;
5458 amp_work->cmd = *cmd;
5459 amp_work->data = data;
5460 amp_work->skb = skb_clone(skb, GFP_ATOMIC);
5461 if (!amp_work->skb) {
5462 kfree(amp_work);
5463 return -ENOMEM;
5464 }
5465
5466 if (!queue_work(_l2cap_wq, &amp_work->work)) {
5467 kfree_skb(amp_work->skb);
5468 kfree(amp_work);
5469 return -ENOMEM;
5470 }
5471
5472 return 0;
5473}
5474
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005475static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
Claudio Takahaside731152011-02-11 19:28:55 -02005476 u16 to_multiplier)
5477{
5478 u16 max_latency;
5479
5480 if (min > max || min < 6 || max > 3200)
5481 return -EINVAL;
5482
5483 if (to_multiplier < 10 || to_multiplier > 3200)
5484 return -EINVAL;
5485
5486 if (max >= to_multiplier * 8)
5487 return -EINVAL;
5488
5489 max_latency = (to_multiplier * 8 / max) - 1;
5490 if (latency > 499 || latency > max_latency)
5491 return -EINVAL;
5492
5493 return 0;
5494}
5495
5496static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
5497 struct l2cap_cmd_hdr *cmd, u8 *data)
5498{
5499 struct hci_conn *hcon = conn->hcon;
5500 struct l2cap_conn_param_update_req *req;
5501 struct l2cap_conn_param_update_rsp rsp;
5502 u16 min, max, latency, to_multiplier, cmd_len;
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005503 int err;
Claudio Takahaside731152011-02-11 19:28:55 -02005504
5505 if (!(hcon->link_mode & HCI_LM_MASTER))
5506 return -EINVAL;
5507
5508 cmd_len = __le16_to_cpu(cmd->len);
5509 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
5510 return -EPROTO;
5511
5512 req = (struct l2cap_conn_param_update_req *) data;
Gustavo F. Padovane2174ca2011-02-17 19:16:55 -03005513 min = __le16_to_cpu(req->min);
5514 max = __le16_to_cpu(req->max);
Claudio Takahaside731152011-02-11 19:28:55 -02005515 latency = __le16_to_cpu(req->latency);
5516 to_multiplier = __le16_to_cpu(req->to_multiplier);
5517
5518 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
5519 min, max, latency, to_multiplier);
5520
5521 memset(&rsp, 0, sizeof(rsp));
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005522
5523 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
5524 if (err)
Claudio Takahaside731152011-02-11 19:28:55 -02005525 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
5526 else
5527 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
5528
5529 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
5530 sizeof(rsp), &rsp);
5531
Claudio Takahasi2ce603e2011-02-16 20:44:53 -02005532 if (!err)
5533 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
5534
Claudio Takahaside731152011-02-11 19:28:55 -02005535 return 0;
5536}
5537
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005538static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005539 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data,
5540 struct sk_buff *skb)
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005541{
5542 int err = 0;
5543
5544 switch (cmd->code) {
5545 case L2CAP_COMMAND_REJ:
5546 l2cap_command_rej(conn, cmd, data);
5547 break;
5548
5549 case L2CAP_CONN_REQ:
5550 err = l2cap_connect_req(conn, cmd, data);
5551 break;
5552
5553 case L2CAP_CONN_RSP:
5554 err = l2cap_connect_rsp(conn, cmd, data);
5555 break;
5556
5557 case L2CAP_CONF_REQ:
5558 err = l2cap_config_req(conn, cmd, cmd_len, data);
5559 break;
5560
5561 case L2CAP_CONF_RSP:
5562 err = l2cap_config_rsp(conn, cmd, data);
5563 break;
5564
5565 case L2CAP_DISCONN_REQ:
5566 err = l2cap_disconnect_req(conn, cmd, data);
5567 break;
5568
5569 case L2CAP_DISCONN_RSP:
5570 err = l2cap_disconnect_rsp(conn, cmd, data);
5571 break;
5572
5573 case L2CAP_ECHO_REQ:
5574 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
5575 break;
5576
5577 case L2CAP_ECHO_RSP:
5578 break;
5579
5580 case L2CAP_INFO_REQ:
5581 err = l2cap_information_req(conn, cmd, data);
5582 break;
5583
5584 case L2CAP_INFO_RSP:
5585 err = l2cap_information_rsp(conn, cmd, data);
5586 break;
5587
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005588 case L2CAP_CREATE_CHAN_REQ:
5589 err = l2cap_create_channel_req(conn, cmd, data);
5590 break;
5591
5592 case L2CAP_CREATE_CHAN_RSP:
5593 err = l2cap_create_channel_rsp(conn, cmd, data);
5594 break;
5595
5596 case L2CAP_MOVE_CHAN_REQ:
5597 case L2CAP_MOVE_CHAN_RSP:
5598 case L2CAP_MOVE_CHAN_CFM:
5599 case L2CAP_MOVE_CHAN_CFM_RSP:
5600 err = l2cap_sig_amp(conn, cmd, data, skb);
5601 break;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005602 default:
5603 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
5604 err = -EINVAL;
5605 break;
5606 }
5607
5608 return err;
5609}
5610
5611static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
5612 struct l2cap_cmd_hdr *cmd, u8 *data)
5613{
5614 switch (cmd->code) {
5615 case L2CAP_COMMAND_REJ:
5616 return 0;
5617
5618 case L2CAP_CONN_PARAM_UPDATE_REQ:
Claudio Takahaside731152011-02-11 19:28:55 -02005619 return l2cap_conn_param_update_req(conn, cmd, data);
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005620
5621 case L2CAP_CONN_PARAM_UPDATE_RSP:
5622 return 0;
5623
5624 default:
5625 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
5626 return -EINVAL;
5627 }
5628}
5629
5630static inline void l2cap_sig_channel(struct l2cap_conn *conn,
5631 struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07005632{
5633 u8 *data = skb->data;
5634 int len = skb->len;
5635 struct l2cap_cmd_hdr cmd;
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005636 int err;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005637
5638 l2cap_raw_recv(conn, skb);
5639
5640 while (len >= L2CAP_CMD_HDR_SIZE) {
Al Viro88219a02007-07-29 00:17:25 -07005641 u16 cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005642 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
5643 data += L2CAP_CMD_HDR_SIZE;
5644 len -= L2CAP_CMD_HDR_SIZE;
5645
Al Viro88219a02007-07-29 00:17:25 -07005646 cmd_len = le16_to_cpu(cmd.len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005647
Al Viro88219a02007-07-29 00:17:25 -07005648 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005649
Al Viro88219a02007-07-29 00:17:25 -07005650 if (cmd_len > len || !cmd.ident) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07005651 BT_DBG("corrupted command");
5652 break;
5653 }
5654
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02005655 if (conn->hcon->type == LE_LINK)
5656 err = l2cap_le_sig_cmd(conn, &cmd, data);
5657 else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005658 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len,
5659 data, skb);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005660
5661 if (err) {
5662 struct l2cap_cmd_rej rej;
Gustavo F. Padovan2c6d1a22011-03-23 14:38:32 -03005663
5664 BT_ERR("Wrong link type (%d)", err);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005665
5666 /* FIXME: Map err to a valid reason */
YOSHIFUJI Hideakiaca31922007-03-25 20:12:50 -07005667 rej.reason = cpu_to_le16(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -07005668 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
5669 }
5670
Al Viro88219a02007-07-29 00:17:25 -07005671 data += cmd_len;
5672 len -= cmd_len;
Linus Torvalds1da177e2005-04-16 15:20:36 -07005673 }
5674
5675 kfree_skb(skb);
5676}
5677
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005678static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005679{
5680 u16 our_fcs, rcv_fcs;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005681 int hdr_size;
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005682
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005683 if (pi->extended_control)
5684 hdr_size = L2CAP_EXTENDED_HDR_SIZE;
5685 else
5686 hdr_size = L2CAP_ENHANCED_HDR_SIZE;
5687
5688 if (pi->fcs == L2CAP_FCS_CRC16) {
5689 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005690 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
5691 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
5692
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005693 if (our_fcs != rcv_fcs) {
5694 BT_DBG("Bad FCS");
João Paulo Rechi Vita7a560e52010-06-22 13:56:27 -03005695 return -EBADMSG;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005696 }
Gustavo F. Padovanfcc203c2009-08-20 22:26:02 -03005697 }
5698 return 0;
5699}
5700
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005701static void l2cap_ertm_pass_to_tx(struct sock *sk,
5702 struct bt_l2cap_control *control)
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005703{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005704 BT_DBG("sk %p, control %p", sk, control);
5705 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_REQSEQ_AND_FBIT);
Gustavo F. Padovand5392c82010-05-01 16:15:36 -03005706}
5707
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005708static void l2cap_ertm_pass_to_tx_fbit(struct sock *sk,
5709 struct bt_l2cap_control *control)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005710{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005711 BT_DBG("sk %p, control %p", sk, control);
5712 l2cap_ertm_tx(sk, control, 0, L2CAP_ERTM_EVENT_RECV_FBIT);
5713}
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005714
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005715static void l2cap_ertm_resend(struct sock *sk)
5716{
5717 struct bt_l2cap_control control;
5718 struct l2cap_pinfo *pi;
5719 struct sk_buff *skb;
5720 struct sk_buff *tx_skb;
5721 u16 seq;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005722
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005723 BT_DBG("sk %p", sk);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005724
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005725 pi = l2cap_pi(sk);
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005726
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005727 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5728 return;
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03005729
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005730 if (pi->amp_move_state != L2CAP_AMP_STATE_STABLE &&
5731 pi->amp_move_state != L2CAP_AMP_STATE_WAIT_PREPARE)
5732 return;
João Paulo Rechi Vitabfbacc12010-05-31 18:35:44 -03005733
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005734 while (pi->retrans_list.head != L2CAP_SEQ_LIST_CLEAR) {
5735 seq = l2cap_seq_list_pop(&pi->retrans_list);
5736
5737 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), seq);
5738 if (!skb) {
5739 BT_DBG("Error: Can't retransmit seq %d, frame missing",
5740 (int) seq);
5741 continue;
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005742 }
5743
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005744 bt_cb(skb)->retries += 1;
5745 control = bt_cb(skb)->control;
5746
5747 if ((pi->max_tx != 0) && (bt_cb(skb)->retries > pi->max_tx)) {
5748 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
5749 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
5750 l2cap_seq_list_clear(&pi->retrans_list);
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005751 break;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005752 }
5753
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005754 control.reqseq = pi->buffer_seq;
5755 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
5756 control.final = 1;
5757 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
5758 } else {
5759 control.final = 0;
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005760 }
5761
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005762 if (skb_cloned(skb)) {
5763 /* Cloned sk_buffs are read-only, so we need a
5764 * writeable copy
5765 */
5766 tx_skb = skb_copy(skb, GFP_ATOMIC);
5767 } else {
5768 tx_skb = skb_clone(skb, GFP_ATOMIC);
5769 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005770
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005771 /* Update skb contents */
5772 if (pi->extended_control) {
5773 put_unaligned_le32(__pack_extended_control(&control),
5774 tx_skb->data + L2CAP_HDR_SIZE);
5775 } else {
5776 put_unaligned_le16(__pack_enhanced_control(&control),
5777 tx_skb->data + L2CAP_HDR_SIZE);
5778 }
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005779
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005780 if (pi->fcs == L2CAP_FCS_CRC16)
5781 apply_fcs(tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005782
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005783 tx_skb->sk = sk;
5784 tx_skb->destructor = l2cap_skb_destructor;
5785 atomic_inc(&pi->ertm_queued);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005786
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005787 l2cap_do_send(sk, tx_skb);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005788
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005789 BT_DBG("Resent txseq %d", (int)control.txseq);
Gustavo F. Padovan1890d362010-05-01 16:15:44 -03005790
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005791 pi->last_acked_seq = pi->buffer_seq;
Mat Martineauc0675b82011-07-07 09:39:02 -07005792 }
5793}
5794
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005795static inline void l2cap_ertm_retransmit(struct sock *sk,
5796 struct bt_l2cap_control *control)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005797{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005798 BT_DBG("sk %p, control %p", sk, control);
5799
5800 l2cap_seq_list_append(&l2cap_pi(sk)->retrans_list, control->reqseq);
5801 l2cap_ertm_resend(sk);
5802}
5803
5804static void l2cap_ertm_retransmit_all(struct sock *sk,
5805 struct bt_l2cap_control *control)
5806{
5807 struct l2cap_pinfo *pi;
5808 struct sk_buff *skb;
5809
5810 BT_DBG("sk %p, control %p", sk, control);
5811
5812 pi = l2cap_pi(sk);
5813
5814 if (control->poll)
5815 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
5816
5817 l2cap_seq_list_clear(&pi->retrans_list);
5818
5819 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
5820 return;
5821
5822 if (pi->unacked_frames) {
5823 skb_queue_walk(TX_QUEUE(sk), skb) {
5824 if ((bt_cb(skb)->control.txseq == control->reqseq) ||
5825 skb == sk->sk_send_head)
5826 break;
5827 }
5828
5829 skb_queue_walk_from(TX_QUEUE(sk), skb) {
5830 if (skb == sk->sk_send_head)
5831 break;
5832
5833 l2cap_seq_list_append(&pi->retrans_list,
5834 bt_cb(skb)->control.txseq);
5835 }
5836
5837 l2cap_ertm_resend(sk);
5838 }
5839}
5840
5841static inline void append_skb_frag(struct sk_buff *skb,
5842 struct sk_buff *new_frag, struct sk_buff **last_frag)
5843{
5844 /* skb->len reflects data in skb as well as all fragments
5845 skb->data_len reflects only data in fragments
5846 */
5847 BT_DBG("skb %p, new_frag %p, *last_frag %p", skb, new_frag, *last_frag);
5848
5849 if (!skb_has_frag_list(skb))
5850 skb_shinfo(skb)->frag_list = new_frag;
5851
5852 new_frag->next = NULL;
5853
5854 (*last_frag)->next = new_frag;
5855 *last_frag = new_frag;
5856
5857 skb->len += new_frag->len;
5858 skb->data_len += new_frag->len;
5859 skb->truesize += new_frag->truesize;
5860}
5861
5862static int l2cap_ertm_rx_expected_iframe(struct sock *sk,
5863 struct bt_l2cap_control *control, struct sk_buff *skb)
5864{
5865 struct l2cap_pinfo *pi;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005866 int err = -EINVAL;
5867
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005868 BT_DBG("sk %p, control %p, skb %p len %d truesize %d", sk, control,
5869 skb, skb->len, skb->truesize);
Gustavo F. Padovan18778a62010-05-01 16:15:44 -03005870
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005871 if (!control)
5872 return err;
5873
5874 pi = l2cap_pi(sk);
5875
5876 BT_DBG("type %c, sar %d, txseq %d, reqseq %d, final %d",
5877 control->frame_type, control->sar, control->txseq,
5878 control->reqseq, control->final);
5879
5880 switch (control->sar) {
5881 case L2CAP_SAR_UNSEGMENTED:
5882 if (pi->sdu) {
5883 BT_DBG("Unexpected unsegmented PDU during reassembly");
5884 kfree_skb(pi->sdu);
5885 pi->sdu = NULL;
5886 pi->sdu_last_frag = NULL;
5887 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005888 }
5889
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005890 BT_DBG("Unsegmented");
5891 err = sock_queue_rcv_skb(sk, skb);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005892 break;
5893
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005894 case L2CAP_SAR_START:
5895 if (pi->sdu) {
5896 BT_DBG("Unexpected start PDU during reassembly");
5897 kfree_skb(pi->sdu);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005898 }
5899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005900 pi->sdu_len = get_unaligned_le16(skb->data);
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005901 skb_pull(skb, 2);
5902
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005903 if (pi->sdu_len > pi->imtu) {
Gustavo F. Padovan052897c2010-05-01 16:15:40 -03005904 err = -EMSGSIZE;
5905 break;
5906 }
5907
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005908 if (skb->len >= pi->sdu_len)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005909 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005910
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005911 pi->sdu = skb;
5912 pi->sdu_last_frag = skb;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005913
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005914 BT_DBG("Start");
5915
5916 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005917 err = 0;
5918 break;
5919
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005920 case L2CAP_SAR_CONTINUE:
5921 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005922 break;
5923
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005924 append_skb_frag(pi->sdu, skb,
5925 &pi->sdu_last_frag);
5926 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005927
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005928 if (pi->sdu->len >= pi->sdu_len)
5929 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005930
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005931 BT_DBG("Continue, reassembled %d", pi->sdu->len);
5932
5933 err = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005934 break;
5935
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005936 case L2CAP_SAR_END:
5937 if (!pi->sdu)
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005938 break;
5939
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005940 append_skb_frag(pi->sdu, skb,
5941 &pi->sdu_last_frag);
5942 skb = NULL;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005943
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005944 if (pi->sdu->len != pi->sdu_len)
5945 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005946
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005947 BT_DBG("End, reassembled %d", pi->sdu->len);
5948 /* If the sender used tiny PDUs, the rcv queuing could fail.
5949 * Applications that have issues here should use a larger
5950 * sk_rcvbuf.
5951 */
5952 err = sock_queue_rcv_skb(sk, pi->sdu);
Gustavo F. Padovan36f2fd52010-05-01 16:15:37 -03005953
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005954 if (!err) {
5955 /* Reassembly complete */
5956 pi->sdu = NULL;
5957 pi->sdu_last_frag = NULL;
5958 pi->sdu_len = 0;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005959 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005960 break;
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005961
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005962 default:
5963 BT_DBG("Bad SAR value");
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005964 break;
5965 }
5966
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005967 if (err) {
5968 BT_DBG("Reassembly error %d, sk_rcvbuf %d, sk_rmem_alloc %d",
5969 err, sk->sk_rcvbuf, atomic_read(&sk->sk_rmem_alloc));
5970 if (pi->sdu) {
5971 kfree_skb(pi->sdu);
5972 pi->sdu = NULL;
5973 }
5974 pi->sdu_last_frag = NULL;
5975 pi->sdu_len = 0;
5976 if (skb)
5977 kfree_skb(skb);
5978 }
5979
5980 /* Update local busy state */
5981 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) && l2cap_rmem_full(sk))
5982 l2cap_ertm_tx(sk, 0, 0, L2CAP_ERTM_EVENT_LOCAL_BUSY_DETECTED);
5983
Gustavo F. Padovanc74e5602009-08-20 22:25:58 -03005984 return err;
5985}
5986
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005987static int l2cap_ertm_rx_queued_iframes(struct sock *sk)
Gustavo F. Padovan8f171542009-08-20 22:26:03 -03005988{
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005989 int err = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005990 /* Pass sequential frames to l2cap_ertm_rx_expected_iframe()
5991 * until a gap is encountered.
5992 */
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005993
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005994 struct l2cap_pinfo *pi;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03005995
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005996 BT_DBG("sk %p", sk);
5997 pi = l2cap_pi(sk);
5998
5999 while (l2cap_rmem_available(sk)) {
6000 struct sk_buff *skb;
6001 BT_DBG("Searching for skb with txseq %d (queue len %d)",
6002 (int) pi->buffer_seq, skb_queue_len(SREJ_QUEUE(sk)));
6003
6004 skb = l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), pi->buffer_seq);
6005
6006 if (!skb)
6007 break;
6008
6009 skb_unlink(skb, SREJ_QUEUE(sk));
6010 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6011 err = l2cap_ertm_rx_expected_iframe(sk,
6012 &bt_cb(skb)->control, skb);
6013 if (err)
6014 break;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006015 }
6016
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006017 if (skb_queue_empty(SREJ_QUEUE(sk))) {
6018 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6019 l2cap_ertm_send_ack(sk);
João Paulo Rechi Vita9b533502010-05-01 16:15:44 -03006020 }
6021
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006022 return err;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006023}
6024
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006025static void l2cap_ertm_handle_srej(struct sock *sk,
6026 struct bt_l2cap_control *control)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006027{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006028 struct l2cap_pinfo *pi;
6029 struct sk_buff *skb;
Gustavo F. Padovan0e989582010-04-19 14:45:38 -03006030
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006031 BT_DBG("sk %p, control %p", sk, control);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006032
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006033 pi = l2cap_pi(sk);
Gustavo F. Padovan05fbd892010-05-01 16:15:39 -03006034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006035 if (control->reqseq == pi->next_tx_seq) {
6036 BT_DBG("Invalid reqseq %d, disconnecting",
6037 (int) control->reqseq);
6038 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006039 return;
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006040 }
Gustavo F. Padovan99b0d4b2010-05-01 16:15:38 -03006041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006042 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovane0727452010-05-01 16:15:38 -03006043
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006044 if (skb == NULL) {
6045 BT_DBG("Seq %d not available for retransmission",
6046 (int) control->reqseq);
6047 return;
Gustavo F. Padovan1d8f5d12010-05-01 16:15:37 -03006048 }
6049
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006050 if ((pi->max_tx != 0) && (bt_cb(skb)->retries >= pi->max_tx)) {
6051 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6052 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6053 return;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006054 }
6055
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006056 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006057
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006058 if (control->poll) {
6059 l2cap_ertm_pass_to_tx(sk, control);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006060
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006061 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6062 l2cap_ertm_retransmit(sk, control);
6063 l2cap_ertm_send(sk);
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006065 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6066 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6067 pi->srej_save_reqseq = control->reqseq;
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006068 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006069 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006070 l2cap_ertm_pass_to_tx_fbit(sk, control);
6071
6072 if (control->final) {
6073 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
6074 (pi->srej_save_reqseq == control->reqseq)) {
6075 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6076 } else {
6077 l2cap_ertm_retransmit(sk, control);
6078 }
6079 } else {
6080 l2cap_ertm_retransmit(sk, control);
6081 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F) {
6082 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
6083 pi->srej_save_reqseq = control->reqseq;
6084 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006085 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006086 }
Gustavo F. Padovan218bb9d2010-06-21 18:53:22 -03006087}
6088
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006089static void l2cap_ertm_handle_rej(struct sock *sk,
6090 struct bt_l2cap_control *control)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006091{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006092 struct l2cap_pinfo *pi;
6093 struct sk_buff *skb;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006094
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006095 BT_DBG("sk %p, control %p", sk, control);
6096
6097 pi = l2cap_pi(sk);
6098
6099 if (control->reqseq == pi->next_tx_seq) {
6100 BT_DBG("Invalid reqseq %d, disconnecting",
6101 (int) control->reqseq);
6102 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6103 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006104 }
6105
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006106 skb = l2cap_ertm_seq_in_queue(TX_QUEUE(sk), control->reqseq);
Gustavo F. Padovan6840ed02009-08-20 22:26:01 -03006107
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006108 if (pi->max_tx && skb && bt_cb(skb)->retries >= pi->max_tx) {
6109 BT_DBG("Retry limit exceeded (%d)", (int) pi->max_tx);
6110 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6111 return;
6112 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07006113
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006114 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6115
6116 l2cap_ertm_pass_to_tx(sk, control);
6117
6118 if (control->final) {
6119 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6120 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6121 else
6122 l2cap_ertm_retransmit_all(sk, control);
6123 } else {
6124 l2cap_ertm_retransmit_all(sk, control);
6125 l2cap_ertm_send(sk);
6126 if (pi->tx_state == L2CAP_ERTM_TX_STATE_WAIT_F)
6127 pi->conn_state |= L2CAP_CONN_REJ_ACT;
6128 }
6129}
6130
6131static u8 l2cap_ertm_classify_txseq(struct sock *sk, u16 txseq)
6132{
6133 struct l2cap_pinfo *pi;
6134
6135 BT_DBG("sk %p, txseq %d", sk, (int)txseq);
6136 pi = l2cap_pi(sk);
6137
6138 BT_DBG("last_acked_seq %d, expected_tx_seq %d", (int)pi->last_acked_seq,
6139 (int)pi->expected_tx_seq);
6140
6141 if (pi->rx_state == L2CAP_ERTM_RX_STATE_SREJ_SENT) {
6142 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6143 /* See notes below regarding "double poll" and
6144 * invalid packets.
6145 */
6146 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6147 BT_DBG("Invalid/Ignore - txseq outside "
6148 "tx window after SREJ sent");
6149 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6150 } else {
6151 BT_DBG("Invalid - bad txseq within tx "
6152 "window after SREJ sent");
6153 return L2CAP_ERTM_TXSEQ_INVALID;
6154 }
6155 }
6156
6157 if (pi->srej_list.head == txseq) {
6158 BT_DBG("Expected SREJ");
6159 return L2CAP_ERTM_TXSEQ_EXPECTED_SREJ;
6160 }
6161
6162 if (l2cap_ertm_seq_in_queue(SREJ_QUEUE(sk), txseq)) {
6163 BT_DBG("Duplicate SREJ - txseq already stored");
6164 return L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ;
6165 }
6166
6167 if (l2cap_seq_list_contains(&pi->srej_list, txseq)) {
6168 BT_DBG("Unexpected SREJ - txseq not requested "
6169 "with SREJ");
6170 return L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ;
6171 }
6172 }
6173
6174 if (pi->expected_tx_seq == txseq) {
6175 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6176 BT_DBG("Invalid - txseq outside tx window");
6177 return L2CAP_ERTM_TXSEQ_INVALID;
6178 } else {
6179 BT_DBG("Expected");
6180 return L2CAP_ERTM_TXSEQ_EXPECTED;
6181 }
6182 }
6183
6184 if (__delta_seq(txseq, pi->last_acked_seq, pi) <
6185 __delta_seq(pi->expected_tx_seq, pi->last_acked_seq, pi)) {
6186 BT_DBG("Duplicate - expected_tx_seq later than txseq");
6187 return L2CAP_ERTM_TXSEQ_DUPLICATE;
6188 }
6189
6190 if (__delta_seq(txseq, pi->last_acked_seq, pi) >= pi->tx_win) {
6191 /* A source of invalid packets is a "double poll" condition,
6192 * where delays cause us to send multiple poll packets. If
6193 * the remote stack receives and processes both polls,
6194 * sequence numbers can wrap around in such a way that a
6195 * resent frame has a sequence number that looks like new data
6196 * with a sequence gap. This would trigger an erroneous SREJ
6197 * request.
6198 *
6199 * Fortunately, this is impossible with a tx window that's
6200 * less than half of the maximum sequence number, which allows
6201 * invalid frames to be safely ignored.
6202 *
6203 * With tx window sizes greater than half of the tx window
6204 * maximum, the frame is invalid and cannot be ignored. This
6205 * causes a disconnect.
6206 */
6207
6208 if (pi->tx_win <= ((pi->tx_win_max + 1) >> 1)) {
6209 BT_DBG("Invalid/Ignore - txseq outside tx window");
6210 return L2CAP_ERTM_TXSEQ_INVALID_IGNORE;
6211 } else {
6212 BT_DBG("Invalid - txseq outside tx window");
6213 return L2CAP_ERTM_TXSEQ_INVALID;
6214 }
6215 } else {
6216 BT_DBG("Unexpected - txseq indicates missing frames");
6217 return L2CAP_ERTM_TXSEQ_UNEXPECTED;
6218 }
6219}
6220
6221static int l2cap_ertm_rx_state_recv(struct sock *sk,
6222 struct bt_l2cap_control *control,
6223 struct sk_buff *skb, u8 event)
6224{
6225 struct l2cap_pinfo *pi;
6226 int err = 0;
6227 bool skb_in_use = 0;
6228
6229 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6230 (int)event);
6231 pi = l2cap_pi(sk);
6232
6233 switch (event) {
6234 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6235 switch (l2cap_ertm_classify_txseq(sk, control->txseq)) {
6236 case L2CAP_ERTM_TXSEQ_EXPECTED:
6237 l2cap_ertm_pass_to_tx(sk, control);
6238
6239 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6240 BT_DBG("Busy, discarding expected seq %d",
6241 control->txseq);
6242 break;
6243 }
6244
6245 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6246 pi->buffer_seq = pi->expected_tx_seq;
6247 skb_in_use = 1;
6248
6249 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6250 if (err)
6251 break;
6252
6253 if (control->final) {
6254 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6255 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6256 else {
6257 control->final = 0;
6258 l2cap_ertm_retransmit_all(sk, control);
6259 l2cap_ertm_send(sk);
6260 }
6261 }
6262
6263 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY))
6264 l2cap_ertm_send_ack(sk);
6265 break;
6266 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6267 l2cap_ertm_pass_to_tx(sk, control);
6268
6269 /* Can't issue SREJ frames in the local busy state.
6270 * Drop this frame, it will be seen as missing
6271 * when local busy is exited.
6272 */
6273 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6274 BT_DBG("Busy, discarding unexpected seq %d",
6275 control->txseq);
6276 break;
6277 }
6278
6279 /* There was a gap in the sequence, so an SREJ
6280 * must be sent for each missing frame. The
6281 * current frame is stored for later use.
6282 */
6283 skb_queue_tail(SREJ_QUEUE(sk), skb);
6284 skb_in_use = 1;
6285 BT_DBG("Queued %p (queue len %d)", skb,
6286 skb_queue_len(SREJ_QUEUE(sk)));
6287
6288 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
6289 l2cap_seq_list_clear(&pi->srej_list);
6290 l2cap_ertm_send_srej(sk, control->txseq);
6291
6292 pi->rx_state = L2CAP_ERTM_RX_STATE_SREJ_SENT;
6293 break;
6294 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6295 l2cap_ertm_pass_to_tx(sk, control);
6296 break;
6297 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6298 break;
6299 case L2CAP_ERTM_TXSEQ_INVALID:
6300 default:
6301 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6302 ECONNRESET);
6303 break;
6304 }
6305 break;
6306 case L2CAP_ERTM_EVENT_RECV_RR:
6307 l2cap_ertm_pass_to_tx(sk, control);
6308 if (control->final) {
6309 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6310
6311 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6312 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6313 else if (pi->amp_move_state == L2CAP_AMP_STATE_STABLE ||
6314 pi->amp_move_state ==
6315 L2CAP_AMP_STATE_WAIT_PREPARE) {
6316 control->final = 0;
6317 l2cap_ertm_retransmit_all(sk, control);
6318 }
6319
6320 l2cap_ertm_send(sk);
6321 } else if (control->poll) {
6322 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6323 } else {
6324 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6325 pi->unacked_frames)
6326 l2cap_ertm_start_retrans_timer(pi);
6327 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6328 l2cap_ertm_send(sk);
6329 }
6330 break;
6331 case L2CAP_ERTM_EVENT_RECV_RNR:
6332 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6333 l2cap_ertm_pass_to_tx(sk, control);
6334 if (control && control->poll) {
6335 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6336 l2cap_ertm_send_rr_or_rnr(sk, 0);
6337 }
6338 l2cap_ertm_stop_retrans_timer(pi);
6339 l2cap_seq_list_clear(&pi->retrans_list);
6340 break;
6341 case L2CAP_ERTM_EVENT_RECV_REJ:
6342 l2cap_ertm_handle_rej(sk, control);
6343 break;
6344 case L2CAP_ERTM_EVENT_RECV_SREJ:
6345 l2cap_ertm_handle_srej(sk, control);
6346 break;
6347 default:
6348 break;
6349 }
6350
6351 if (skb && !skb_in_use) {
6352 BT_DBG("Freeing %p", skb);
6353 kfree_skb(skb);
6354 }
6355
6356 return err;
6357}
6358
6359static int l2cap_ertm_rx_state_srej_sent(struct sock *sk,
6360 struct bt_l2cap_control *control,
6361 struct sk_buff *skb, u8 event)
6362{
6363 struct l2cap_pinfo *pi;
6364 int err = 0;
6365 u16 txseq = control->txseq;
6366 bool skb_in_use = 0;
6367
6368 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6369 (int)event);
6370 pi = l2cap_pi(sk);
6371
6372 switch (event) {
6373 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6374 switch (l2cap_ertm_classify_txseq(sk, txseq)) {
6375 case L2CAP_ERTM_TXSEQ_EXPECTED:
6376 /* Keep frame for reassembly later */
6377 l2cap_ertm_pass_to_tx(sk, control);
6378 skb_queue_tail(SREJ_QUEUE(sk), skb);
6379 skb_in_use = 1;
6380 BT_DBG("Queued %p (queue len %d)", skb,
6381 skb_queue_len(SREJ_QUEUE(sk)));
6382
6383 pi->expected_tx_seq = __next_seq(txseq, pi);
6384 break;
6385 case L2CAP_ERTM_TXSEQ_EXPECTED_SREJ:
6386 l2cap_seq_list_pop(&pi->srej_list);
6387
6388 l2cap_ertm_pass_to_tx(sk, control);
6389 skb_queue_tail(SREJ_QUEUE(sk), skb);
6390 skb_in_use = 1;
6391 BT_DBG("Queued %p (queue len %d)", skb,
6392 skb_queue_len(SREJ_QUEUE(sk)));
6393
6394 err = l2cap_ertm_rx_queued_iframes(sk);
6395 if (err)
6396 break;
6397
6398 break;
6399 case L2CAP_ERTM_TXSEQ_UNEXPECTED:
6400 /* Got a frame that can't be reassembled yet.
6401 * Save it for later, and send SREJs to cover
6402 * the missing frames.
6403 */
6404 skb_queue_tail(SREJ_QUEUE(sk), skb);
6405 skb_in_use = 1;
6406 BT_DBG("Queued %p (queue len %d)", skb,
6407 skb_queue_len(SREJ_QUEUE(sk)));
6408
6409 l2cap_ertm_pass_to_tx(sk, control);
6410 l2cap_ertm_send_srej(sk, control->txseq);
6411 break;
6412 case L2CAP_ERTM_TXSEQ_UNEXPECTED_SREJ:
6413 /* This frame was requested with an SREJ, but
6414 * some expected retransmitted frames are
6415 * missing. Request retransmission of missing
6416 * SREJ'd frames.
6417 */
6418 skb_queue_tail(SREJ_QUEUE(sk), skb);
6419 skb_in_use = 1;
6420 BT_DBG("Queued %p (queue len %d)", skb,
6421 skb_queue_len(SREJ_QUEUE(sk)));
6422
6423 l2cap_ertm_pass_to_tx(sk, control);
6424 l2cap_ertm_send_srej_list(sk, control->txseq);
6425 break;
6426 case L2CAP_ERTM_TXSEQ_DUPLICATE_SREJ:
6427 /* We've already queued this frame. Drop this copy. */
6428 l2cap_ertm_pass_to_tx(sk, control);
6429 break;
6430 case L2CAP_ERTM_TXSEQ_DUPLICATE:
6431 /* Expecting a later sequence number, so this frame
6432 * was already received. Ignore it completely.
6433 */
6434 break;
6435 case L2CAP_ERTM_TXSEQ_INVALID_IGNORE:
6436 break;
6437 case L2CAP_ERTM_TXSEQ_INVALID:
6438 default:
6439 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk,
6440 ECONNRESET);
6441 break;
6442 }
6443 break;
6444 case L2CAP_ERTM_EVENT_RECV_RR:
6445 l2cap_ertm_pass_to_tx(sk, control);
6446 if (control->final) {
6447 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6448
6449 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6450 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6451 else {
6452 control->final = 0;
6453 l2cap_ertm_retransmit_all(sk, control);
6454 }
6455
6456 l2cap_ertm_send(sk);
6457 } else if (control->poll) {
6458 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6459 pi->unacked_frames) {
6460 l2cap_ertm_start_retrans_timer(pi);
6461 }
6462 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6463 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6464 l2cap_ertm_send_srej_tail(sk);
6465 } else {
6466 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
6467 pi->unacked_frames) {
6468 l2cap_ertm_start_retrans_timer(pi);
6469 }
6470 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6471 l2cap_ertm_send_ack(sk);
6472 }
6473 break;
6474 case L2CAP_ERTM_EVENT_RECV_RNR:
6475 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6476 l2cap_ertm_pass_to_tx(sk, control);
6477 if (control->poll)
6478 l2cap_ertm_send_srej_tail(sk);
6479 else {
6480 struct bt_l2cap_control rr_control;
6481 memset(&rr_control, 0, sizeof(rr_control));
6482 rr_control.frame_type = 's';
6483 rr_control.super = L2CAP_SFRAME_RR;
6484 rr_control.reqseq = pi->buffer_seq;
6485 l2cap_ertm_send_sframe(sk, &rr_control);
6486 }
6487
6488 break;
6489 case L2CAP_ERTM_EVENT_RECV_REJ:
6490 l2cap_ertm_handle_rej(sk, control);
6491 break;
6492 case L2CAP_ERTM_EVENT_RECV_SREJ:
6493 l2cap_ertm_handle_srej(sk, control);
6494 break;
6495 }
6496
6497 if (skb && !skb_in_use) {
6498 BT_DBG("Freeing %p", skb);
6499 kfree_skb(skb);
6500 }
6501
6502 return err;
6503}
6504
6505static int l2cap_ertm_rx_state_amp_move(struct sock *sk,
6506 struct bt_l2cap_control *control,
6507 struct sk_buff *skb, u8 event)
6508{
6509 struct l2cap_pinfo *pi;
6510 int err = 0;
6511 bool skb_in_use = 0;
6512
6513 BT_DBG("sk %p, control %p, skb %p, event %d", sk, control, skb,
6514 (int)event);
6515 pi = l2cap_pi(sk);
6516
6517 /* Only handle expected frames, to avoid state changes. */
6518
6519 switch (event) {
6520 case L2CAP_ERTM_EVENT_RECV_IFRAME:
6521 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6522 L2CAP_ERTM_TXSEQ_EXPECTED) {
6523 l2cap_ertm_pass_to_tx(sk, control);
6524
6525 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
6526 BT_DBG("Busy, discarding expected seq %d",
6527 control->txseq);
6528 break;
6529 }
6530
6531 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6532 pi->buffer_seq = pi->expected_tx_seq;
6533 skb_in_use = 1;
6534
6535 err = l2cap_ertm_rx_expected_iframe(sk, control, skb);
6536 if (err)
6537 break;
6538
6539 if (control->final) {
6540 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
6541 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
6542 else
6543 control->final = 0;
6544 }
6545 }
6546 break;
6547 case L2CAP_ERTM_EVENT_RECV_RR:
6548 case L2CAP_ERTM_EVENT_RECV_RNR:
6549 case L2CAP_ERTM_EVENT_RECV_REJ:
6550 l2cap_ertm_process_reqseq(sk, control->reqseq);
6551 break;
6552 case L2CAP_ERTM_EVENT_RECV_SREJ:
6553 /* Ignore */
6554 break;
6555 default:
6556 break;
6557 }
6558
6559 if (skb && !skb_in_use) {
6560 BT_DBG("Freeing %p", skb);
6561 kfree_skb(skb);
6562 }
6563
6564 return err;
6565}
6566
6567static int l2cap_answer_move_poll(struct sock *sk)
6568{
6569 struct l2cap_pinfo *pi;
6570 struct bt_l2cap_control control;
6571 int err = 0;
6572
6573 BT_DBG("sk %p", sk);
6574
6575 pi = l2cap_pi(sk);
6576
6577 l2cap_ertm_process_reqseq(sk, pi->amp_move_reqseq);
6578
6579 if (!skb_queue_empty(TX_QUEUE(sk)))
6580 sk->sk_send_head = skb_peek(TX_QUEUE(sk));
6581 else
6582 sk->sk_send_head = NULL;
6583
6584 /* Rewind next_tx_seq to the point expected
6585 * by the receiver.
6586 */
6587 pi->next_tx_seq = pi->amp_move_reqseq;
6588 pi->unacked_frames = 0;
6589
6590 err = l2cap_finish_amp_move(sk);
6591
6592 if (err)
6593 return err;
6594
6595 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
6596 l2cap_ertm_send_i_or_rr_or_rnr(sk);
6597
6598 memset(&control, 0, sizeof(control));
6599 control.reqseq = pi->amp_move_reqseq;
6600
6601 if (pi->amp_move_event == L2CAP_ERTM_EVENT_RECV_IFRAME)
6602 err = -EPROTO;
6603 else
6604 err = l2cap_ertm_rx_state_recv(sk, &control, NULL,
6605 pi->amp_move_event);
6606
6607 return err;
6608}
6609
6610static void l2cap_amp_move_setup(struct sock *sk)
6611{
6612 struct l2cap_pinfo *pi;
6613 struct sk_buff *skb;
6614
6615 BT_DBG("sk %p", sk);
6616
6617 pi = l2cap_pi(sk);
6618
6619 l2cap_ertm_stop_ack_timer(pi);
6620 l2cap_ertm_stop_retrans_timer(pi);
6621 l2cap_ertm_stop_monitor_timer(pi);
6622
6623 pi->retry_count = 0;
6624 skb_queue_walk(TX_QUEUE(sk), skb) {
6625 if (bt_cb(skb)->retries)
6626 bt_cb(skb)->retries = 1;
6627 else
6628 break;
6629 }
6630
6631 pi->expected_tx_seq = pi->buffer_seq;
6632
6633 pi->conn_state &= ~(L2CAP_CONN_REJ_ACT | L2CAP_CONN_SREJ_ACT);
6634 l2cap_seq_list_clear(&pi->retrans_list);
6635 l2cap_seq_list_clear(&l2cap_pi(sk)->srej_list);
6636 skb_queue_purge(SREJ_QUEUE(sk));
6637
6638 pi->tx_state = L2CAP_ERTM_TX_STATE_XMIT;
6639 pi->rx_state = L2CAP_ERTM_RX_STATE_AMP_MOVE;
6640
6641 BT_DBG("tx_state 0x2.2%x rx_state 0x2.2%x", pi->tx_state,
6642 pi->rx_state);
6643
6644 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
6645}
6646
6647static void l2cap_amp_move_revert(struct sock *sk)
6648{
6649 struct l2cap_pinfo *pi;
6650
6651 BT_DBG("sk %p", sk);
6652
6653 pi = l2cap_pi(sk);
6654
6655 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6656 l2cap_ertm_tx(sk, NULL, NULL, L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6657 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6658 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER)
6659 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_P_FLAG;
6660}
6661
6662static int l2cap_amp_move_reconf(struct sock *sk)
6663{
6664 struct l2cap_pinfo *pi;
6665 u8 buf[64];
6666 int err = 0;
6667
6668 BT_DBG("sk %p", sk);
6669
6670 pi = l2cap_pi(sk);
6671
6672 l2cap_send_cmd(pi->conn, l2cap_get_ident(pi->conn), L2CAP_CONF_REQ,
6673 l2cap_build_amp_reconf_req(sk, buf), buf);
6674 return err;
6675}
6676
6677static void l2cap_amp_move_success(struct sock *sk)
6678{
6679 struct l2cap_pinfo *pi;
6680
6681 BT_DBG("sk %p", sk);
6682
6683 pi = l2cap_pi(sk);
6684
6685 if (pi->amp_move_role == L2CAP_AMP_MOVE_INITIATOR) {
6686 int err = 0;
6687 /* Send reconfigure request */
6688 if (pi->mode == L2CAP_MODE_ERTM) {
6689 pi->reconf_state = L2CAP_RECONF_INT;
6690 if (enable_reconfig)
6691 err = l2cap_amp_move_reconf(sk);
6692
6693 if (err || !enable_reconfig) {
6694 pi->reconf_state = L2CAP_RECONF_NONE;
6695 l2cap_ertm_tx(sk, NULL, NULL,
6696 L2CAP_ERTM_EVENT_EXPLICIT_POLL);
6697 pi->rx_state = L2CAP_ERTM_RX_STATE_WAIT_F_FLAG;
6698 }
6699 } else
6700 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6701 } else if (pi->amp_move_role == L2CAP_AMP_MOVE_RESPONDER) {
6702 if (pi->mode == L2CAP_MODE_ERTM)
6703 pi->rx_state =
6704 L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE;
6705 else
6706 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6707 }
6708}
6709
6710static inline bool __valid_reqseq(struct l2cap_pinfo *pi, u16 reqseq)
6711{
6712 /* Make sure reqseq is for a packet that has been sent but not acked */
6713 u16 unacked = __delta_seq(pi->next_tx_seq, pi->expected_ack_seq, pi);
6714 return __delta_seq(pi->next_tx_seq, reqseq, pi) <= unacked;
6715}
6716
6717static int l2cap_strm_rx(struct sock *sk, struct bt_l2cap_control *control,
6718 struct sk_buff *skb)
6719{
6720 struct l2cap_pinfo *pi;
6721 int err = 0;
6722
6723 BT_DBG("sk %p, control %p, skb %p, state %d",
6724 sk, control, skb, l2cap_pi(sk)->rx_state);
6725
6726 pi = l2cap_pi(sk);
6727
6728 if (l2cap_ertm_classify_txseq(sk, control->txseq) ==
6729 L2CAP_ERTM_TXSEQ_EXPECTED) {
6730 l2cap_ertm_pass_to_tx(sk, control);
6731
6732 BT_DBG("buffer_seq %d->%d", pi->buffer_seq,
6733 __next_seq(pi->buffer_seq, pi));
6734
6735 pi->buffer_seq = __next_seq(pi->buffer_seq, pi);
6736
6737 l2cap_ertm_rx_expected_iframe(sk, control, skb);
6738 } else {
6739 if (pi->sdu) {
6740 kfree_skb(pi->sdu);
6741 pi->sdu = NULL;
6742 }
6743 pi->sdu_last_frag = NULL;
6744 pi->sdu_len = 0;
6745
6746 if (skb) {
6747 BT_DBG("Freeing %p", skb);
6748 kfree_skb(skb);
6749 }
6750 }
6751
6752 pi->last_acked_seq = control->txseq;
6753 pi->expected_tx_seq = __next_seq(control->txseq, pi);
6754
6755 return err;
6756}
6757
6758static int l2cap_ertm_rx(struct sock *sk, struct bt_l2cap_control *control,
6759 struct sk_buff *skb, u8 event)
6760{
6761 struct l2cap_pinfo *pi;
6762 int err = 0;
6763
6764 BT_DBG("sk %p, control %p, skb %p, event %d, state %d",
6765 sk, control, skb, (int)event, l2cap_pi(sk)->rx_state);
6766
6767 pi = l2cap_pi(sk);
6768
6769 if (__valid_reqseq(pi, control->reqseq)) {
6770 switch (pi->rx_state) {
6771 case L2CAP_ERTM_RX_STATE_RECV:
6772 err = l2cap_ertm_rx_state_recv(sk, control, skb, event);
6773 break;
6774 case L2CAP_ERTM_RX_STATE_SREJ_SENT:
6775 err = l2cap_ertm_rx_state_srej_sent(sk, control, skb,
6776 event);
6777 break;
6778 case L2CAP_ERTM_RX_STATE_AMP_MOVE:
6779 err = l2cap_ertm_rx_state_amp_move(sk, control, skb,
6780 event);
6781 break;
6782 case L2CAP_ERTM_RX_STATE_WAIT_F_FLAG:
6783 if (control->final) {
6784 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
6785 pi->amp_move_role = L2CAP_AMP_MOVE_NONE;
6786
6787 pi->rx_state = L2CAP_ERTM_RX_STATE_RECV;
6788 l2cap_ertm_process_reqseq(sk, control->reqseq);
6789
6790 if (!skb_queue_empty(TX_QUEUE(sk)))
6791 sk->sk_send_head =
6792 skb_peek(TX_QUEUE(sk));
6793 else
6794 sk->sk_send_head = NULL;
6795
6796 /* Rewind next_tx_seq to the point expected
6797 * by the receiver.
6798 */
6799 pi->next_tx_seq = control->reqseq;
6800 pi->unacked_frames = 0;
6801
6802 if (pi->ampcon)
6803 pi->conn->mtu =
6804 pi->ampcon->hdev->acl_mtu;
6805 else
6806 pi->conn->mtu =
6807 pi->conn->hcon->hdev->acl_mtu;
6808
6809 err = l2cap_setup_resegment(sk);
6810
6811 if (err)
6812 break;
6813
6814 err = l2cap_ertm_rx_state_recv(sk, control, skb,
6815 event);
6816 }
6817 break;
6818 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG:
6819 if (control->poll) {
6820 pi->amp_move_reqseq = control->reqseq;
6821 pi->amp_move_event = event;
6822 err = l2cap_answer_move_poll(sk);
6823 }
6824 break;
6825 case L2CAP_ERTM_RX_STATE_WAIT_P_FLAG_RECONFIGURE:
6826 if (control->poll) {
6827 pi->amp_move_reqseq = control->reqseq;
6828 pi->amp_move_event = event;
6829
6830 BT_DBG("amp_move_role 0x%2.2x, "
6831 "reconf_state 0x%2.2x",
6832 pi->amp_move_role, pi->reconf_state);
6833
6834 if (pi->reconf_state == L2CAP_RECONF_ACC)
6835 err = l2cap_amp_move_reconf(sk);
6836 else
6837 err = l2cap_answer_move_poll(sk);
6838 }
6839 break;
6840 default:
6841 /* shut it down */
6842 break;
6843 }
6844 } else {
6845 BT_DBG("Invalid reqseq %d (next_tx_seq %d, expected_ack_seq %d",
6846 control->reqseq, pi->next_tx_seq, pi->expected_ack_seq);
6847 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6848 }
6849
6850 return err;
6851}
6852
6853void l2cap_fixed_channel_config(struct sock *sk, struct l2cap_options *opt)
6854{
6855 lock_sock(sk);
6856
6857 l2cap_pi(sk)->fixed_channel = 1;
6858
6859 l2cap_pi(sk)->imtu = opt->imtu;
6860 l2cap_pi(sk)->omtu = opt->omtu;
6861 l2cap_pi(sk)->remote_mps = opt->omtu;
6862 l2cap_pi(sk)->mps = opt->omtu;
6863 l2cap_pi(sk)->flush_to = opt->flush_to;
6864 l2cap_pi(sk)->mode = opt->mode;
6865 l2cap_pi(sk)->fcs = opt->fcs;
6866 l2cap_pi(sk)->max_tx = opt->max_tx;
6867 l2cap_pi(sk)->remote_max_tx = opt->max_tx;
6868 l2cap_pi(sk)->tx_win = opt->txwin_size;
6869 l2cap_pi(sk)->remote_tx_win = opt->txwin_size;
6870 l2cap_pi(sk)->retrans_timeout = L2CAP_DEFAULT_RETRANS_TO;
6871 l2cap_pi(sk)->monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
6872
6873 if (opt->mode == L2CAP_MODE_ERTM ||
6874 l2cap_pi(sk)->mode == L2CAP_MODE_STREAMING)
6875 l2cap_ertm_init(sk);
6876
6877 release_sock(sk);
6878
6879 return;
6880}
6881
6882static const u8 l2cap_ertm_rx_func_to_event[4] = {
6883 L2CAP_ERTM_EVENT_RECV_RR, L2CAP_ERTM_EVENT_RECV_REJ,
6884 L2CAP_ERTM_EVENT_RECV_RNR, L2CAP_ERTM_EVENT_RECV_SREJ
6885};
6886
6887int l2cap_data_channel(struct sock *sk, struct sk_buff *skb)
6888{
6889 struct l2cap_pinfo *pi;
6890 struct bt_l2cap_control *control;
6891 u16 len;
6892 u8 event;
6893 pi = l2cap_pi(sk);
6894
6895 BT_DBG("sk %p, len %d, mode %d", sk, skb->len, pi->mode);
6896
6897 if (sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07006898 goto drop;
6899
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006900 switch (pi->mode) {
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006901 case L2CAP_MODE_BASIC:
6902 /* If socket recv buffers overflows we drop data here
6903 * which is *bad* because L2CAP has to be reliable.
6904 * But we don't have any other choice. L2CAP doesn't
6905 * provide flow control mechanism. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07006906
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006907 if (pi->imtu < skb->len)
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006908 goto drop;
Linus Torvalds1da177e2005-04-16 15:20:36 -07006909
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006910 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006911 goto done;
6912 break;
6913
6914 case L2CAP_MODE_ERTM:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006915 case L2CAP_MODE_STREAMING:
6916 control = &bt_cb(skb)->control;
6917 if (pi->extended_control) {
6918 __get_extended_control(get_unaligned_le32(skb->data),
6919 control);
6920 skb_pull(skb, 4);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006921 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006922 __get_enhanced_control(get_unaligned_le16(skb->data),
6923 control);
6924 skb_pull(skb, 2);
6925 }
6926
6927 len = skb->len;
6928
6929 if (l2cap_check_fcs(pi, skb))
6930 goto drop;
6931
6932 if ((control->frame_type == 'i') &&
6933 (control->sar == L2CAP_SAR_START))
6934 len -= 2;
6935
6936 if (pi->fcs == L2CAP_FCS_CRC16)
6937 len -= 2;
6938
6939 /*
6940 * We can just drop the corrupted I-frame here.
6941 * Receiver will miss it and start proper recovery
6942 * procedures and ask for retransmission.
6943 */
6944 if (len > pi->mps) {
6945 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
6946 goto drop;
6947 }
6948
6949 if (control->frame_type == 'i') {
6950
6951 int err;
6952
6953 BT_DBG("iframe sar %d, reqseq %d, final %d, txseq %d",
6954 control->sar, control->reqseq, control->final,
6955 control->txseq);
6956
6957 /* Validate F-bit - F=0 always valid, F=1 only
6958 * valid in TX WAIT_F
6959 */
6960 if (control->final && (pi->tx_state !=
6961 L2CAP_ERTM_TX_STATE_WAIT_F))
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006962 goto drop;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07006963
6964 if (pi->mode != L2CAP_MODE_STREAMING) {
6965 event = L2CAP_ERTM_EVENT_RECV_IFRAME;
6966 err = l2cap_ertm_rx(sk, control, skb, event);
6967 } else
6968 err = l2cap_strm_rx(sk, control, skb);
6969 if (err)
6970 l2cap_send_disconn_req(pi->conn, sk,
6971 ECONNRESET);
6972 } else {
6973 /* Only I-frames are expected in streaming mode */
6974 if (pi->mode == L2CAP_MODE_STREAMING)
6975 goto drop;
6976
6977 BT_DBG("sframe reqseq %d, final %d, poll %d, super %d",
6978 control->reqseq, control->final, control->poll,
6979 control->super);
6980
6981 if (len != 0) {
6982 l2cap_send_disconn_req(pi->conn, sk,
6983 ECONNRESET);
6984 goto drop;
6985 }
6986
6987 /* Validate F and P bits */
6988 if (control->final &&
6989 ((pi->tx_state != L2CAP_ERTM_TX_STATE_WAIT_F)
6990 || control->poll))
6991 goto drop;
6992
6993 event = l2cap_ertm_rx_func_to_event[control->super];
6994 if (l2cap_ertm_rx(sk, control, skb, event))
6995 l2cap_send_disconn_req(pi->conn, sk,
6996 ECONNRESET);
Gustavo F. Padovan277ffbe2010-05-01 16:15:37 -03006997 }
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03006998
Andrei Emeltchenkofcafde22009-12-22 15:58:08 +02006999 goto done;
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007000
7001 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007002 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007003 break;
7004 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007005
7006drop:
7007 kfree_skb(skb);
7008
7009done:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007010 return 0;
7011}
7012
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007013void l2cap_recv_deferred_frame(struct sock *sk, struct sk_buff *skb)
7014{
7015 lock_sock(sk);
7016 l2cap_data_channel(sk, skb);
7017 release_sock(sk);
7018}
7019
Al Viro8e036fc2007-07-29 00:16:36 -07007020static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007021{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007022 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007023
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007024 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
7025 if (!sk)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007026 goto drop;
7027
Gustavo F. Padovane0f0cb52010-11-01 18:43:53 +00007028 bh_lock_sock(sk);
7029
Linus Torvalds1da177e2005-04-16 15:20:36 -07007030 BT_DBG("sk %p, len %d", sk, skb->len);
7031
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007032 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007033 goto drop;
7034
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007035 if (l2cap_pi(sk)->imtu < skb->len)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007036 goto drop;
7037
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007038 if (!sock_queue_rcv_skb(sk, skb))
Linus Torvalds1da177e2005-04-16 15:20:36 -07007039 goto done;
7040
7041drop:
7042 kfree_skb(skb);
7043
7044done:
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007045 if (sk)
7046 bh_unlock_sock(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007047 return 0;
7048}
7049
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007050static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
7051{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007052 struct sock *sk;
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007053
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007054 sk = l2cap_get_sock_by_scid(0, cid, conn->src);
7055 if (!sk)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007056 goto drop;
7057
7058 bh_lock_sock(sk);
7059
7060 BT_DBG("sk %p, len %d", sk, skb->len);
7061
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007062 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007063 goto drop;
7064
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007065 if (l2cap_pi(sk)->imtu < skb->len)
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007066 goto drop;
7067
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007068 if (!sock_queue_rcv_skb(sk, skb))
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007069 goto done;
7070
7071drop:
7072 kfree_skb(skb);
7073
7074done:
7075 if (sk)
7076 bh_unlock_sock(sk);
7077 return 0;
7078}
7079
Linus Torvalds1da177e2005-04-16 15:20:36 -07007080static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
7081{
7082 struct l2cap_hdr *lh = (void *) skb->data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007083 struct sock *sk;
Al Viro8e036fc2007-07-29 00:16:36 -07007084 u16 cid, len;
7085 __le16 psm;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007086
7087 skb_pull(skb, L2CAP_HDR_SIZE);
7088 cid = __le16_to_cpu(lh->cid);
7089 len = __le16_to_cpu(lh->len);
7090
Gustavo F. Padovan1c2acff2009-08-20 22:25:57 -03007091 if (len != skb->len) {
7092 kfree_skb(skb);
7093 return;
7094 }
7095
Linus Torvalds1da177e2005-04-16 15:20:36 -07007096 BT_DBG("len %d, cid 0x%4.4x", len, cid);
7097
7098 switch (cid) {
Claudio Takahasi3300d9a2011-02-11 19:28:54 -02007099 case L2CAP_CID_LE_SIGNALING:
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007100 case L2CAP_CID_SIGNALING:
Linus Torvalds1da177e2005-04-16 15:20:36 -07007101 l2cap_sig_channel(conn, skb);
7102 break;
7103
Gustavo F. Padovan8db4dc42009-04-20 01:31:05 -03007104 case L2CAP_CID_CONN_LESS:
Gustavo F. Padovan1b7bf4e2009-08-24 00:45:20 -03007105 psm = get_unaligned_le16(skb->data);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007106 skb_pull(skb, 2);
7107 l2cap_conless_channel(conn, psm, skb);
7108 break;
7109
Gustavo F. Padovan9f69bda2011-04-07 16:40:25 -03007110 case L2CAP_CID_LE_DATA:
7111 l2cap_att_channel(conn, cid, skb);
7112 break;
7113
Anderson Brigliaea370122011-06-07 18:46:31 -03007114 case L2CAP_CID_SMP:
7115 if (smp_sig_channel(conn, skb))
7116 l2cap_conn_del(conn->hcon, EACCES);
7117 break;
7118
Linus Torvalds1da177e2005-04-16 15:20:36 -07007119 default:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007120 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
7121 if (sk) {
7122 if (sock_owned_by_user(sk)) {
7123 BT_DBG("backlog sk %p", sk);
7124 if (sk_add_backlog(sk, skb))
7125 kfree_skb(skb);
7126 } else
7127 l2cap_data_channel(sk, skb);
7128
7129 bh_unlock_sock(sk);
7130 } else if (cid == L2CAP_CID_A2MP) {
7131 BT_DBG("A2MP");
7132 amp_conn_ind(conn, skb);
7133 } else {
7134 BT_DBG("unknown cid 0x%4.4x", cid);
7135 kfree_skb(skb);
7136 }
7137
Linus Torvalds1da177e2005-04-16 15:20:36 -07007138 break;
7139 }
7140}
7141
7142/* ---- L2CAP interface with lower layer (HCI) ---- */
7143
7144static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
7145{
7146 int exact = 0, lm1 = 0, lm2 = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007147 register struct sock *sk;
7148 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007149
7150 if (type != ACL_LINK)
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007151 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007152
7153 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
7154
7155 /* Find listening sockets and check their link_mode */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007156 read_lock(&l2cap_sk_list.lock);
7157 sk_for_each(sk, node, &l2cap_sk_list.head) {
7158 if (sk->sk_state != BT_LISTEN)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007159 continue;
7160
7161 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007162 lm1 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007163 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007164 lm1 |= HCI_LM_MASTER;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007165 exact++;
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007166 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
7167 lm2 |= HCI_LM_ACCEPT;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007168 if (l2cap_pi(sk)->role_switch)
Marcel Holtmann2af6b9d2009-01-15 21:58:38 +01007169 lm2 |= HCI_LM_MASTER;
7170 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007171 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007172 read_unlock(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007173
7174 return exact ? lm1 : lm2;
7175}
7176
7177static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
7178{
Marcel Holtmann01394182006-07-03 10:02:46 +02007179 struct l2cap_conn *conn;
7180
Linus Torvalds1da177e2005-04-16 15:20:36 -07007181 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
7182
Ville Tervoacd7d372011-02-10 22:38:49 -03007183 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007184 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007185
7186 if (!status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007187 conn = l2cap_conn_add(hcon, status);
7188 if (conn)
7189 l2cap_conn_ready(conn);
Marcel Holtmann01394182006-07-03 10:02:46 +02007190 } else
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007191 l2cap_conn_del(hcon, bt_err(status));
Linus Torvalds1da177e2005-04-16 15:20:36 -07007192
7193 return 0;
7194}
7195
Marcel Holtmann2950f212009-02-12 14:02:50 +01007196static int l2cap_disconn_ind(struct hci_conn *hcon)
7197{
7198 struct l2cap_conn *conn = hcon->l2cap_data;
7199
7200 BT_DBG("hcon %p", hcon);
7201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007202 if (hcon->type != ACL_LINK || !conn)
Marcel Holtmann2950f212009-02-12 14:02:50 +01007203 return 0x13;
7204
7205 return conn->disc_reason;
7206}
7207
7208static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007209{
7210 BT_DBG("hcon %p reason %d", hcon, reason);
7211
Ville Tervoacd7d372011-02-10 22:38:49 -03007212 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
João Paulo Rechi Vita963cf682010-06-22 13:56:28 -03007213 return -EINVAL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007215 l2cap_conn_del(hcon, bt_err(reason));
Marcel Holtmann01394182006-07-03 10:02:46 +02007216
Linus Torvalds1da177e2005-04-16 15:20:36 -07007217 return 0;
7218}
7219
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007220static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007221{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007222 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
Marcel Holtmann255c7602009-02-04 21:07:19 +01007223 return;
7224
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007225 if (encrypt == 0x00) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007226 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
7227 l2cap_sock_clear_timer(sk);
7228 l2cap_sock_set_timer(sk, HZ * 5);
7229 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
7230 __l2cap_sock_close(sk, ECONNREFUSED);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007231 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007232 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
7233 l2cap_sock_clear_timer(sk);
Marcel Holtmannf62e4322009-01-15 21:58:44 +01007234 }
7235}
7236
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007237static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007238{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007239 struct l2cap_chan_list *l;
Marcel Holtmann40be4922008-07-14 20:13:50 +02007240 struct l2cap_conn *conn = hcon->l2cap_data;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007241 struct sock *sk;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007242
Marcel Holtmann01394182006-07-03 10:02:46 +02007243 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007244 return 0;
Marcel Holtmann01394182006-07-03 10:02:46 +02007245
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007246 l = &conn->chan_list;
7247
Linus Torvalds1da177e2005-04-16 15:20:36 -07007248 BT_DBG("conn %p", conn);
7249
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007250 read_lock(&l->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007251
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007252 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007253 bh_lock_sock(sk);
7254
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007255 BT_DBG("sk->scid %d", l2cap_pi(sk)->scid);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007257 if (l2cap_pi(sk)->scid == L2CAP_CID_LE_DATA) {
Brian Gixa68668b2011-08-11 15:49:36 -07007258 if (!status && encrypt)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007259 l2cap_pi(sk)->sec_level = hcon->sec_level;
Brian Gixa68668b2011-08-11 15:49:36 -07007260
Brian Gixe9ceb522011-09-22 10:46:35 -07007261 del_timer(&hcon->smp_timer);
Brian Gixa68668b2011-08-11 15:49:36 -07007262 l2cap_chan_ready(sk);
7263 smp_link_encrypt_cmplt(conn, status, encrypt);
Vinicius Costa Gomesa5474a82011-01-26 21:42:57 -03007264
7265 bh_unlock_sock(sk);
7266 continue;
7267 }
7268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007269 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
Marcel Holtmann6a8d3012009-02-06 23:56:36 +01007270 bh_unlock_sock(sk);
7271 continue;
7272 }
7273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007274 if (!status && (sk->sk_state == BT_CONNECTED ||
7275 sk->sk_state == BT_CONFIG)) {
7276 l2cap_check_encryption(sk, encrypt);
Marcel Holtmann9719f8a2008-07-14 20:13:45 +02007277 bh_unlock_sock(sk);
7278 continue;
7279 }
7280
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007281 if (sk->sk_state == BT_CONNECT) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007282 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007283 l2cap_pi(sk)->conf_state |=
7284 L2CAP_CONF_CONNECT_PEND;
7285 if (l2cap_pi(sk)->amp_pref ==
7286 BT_AMP_POLICY_PREFER_AMP) {
7287 amp_create_physical(l2cap_pi(sk)->conn,
7288 sk);
7289 } else
7290 l2cap_send_conn_req(sk);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007291 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007292 l2cap_sock_clear_timer(sk);
7293 l2cap_sock_set_timer(sk, HZ / 10);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007294 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007295 } else if (sk->sk_state == BT_CONNECT2) {
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007296 struct l2cap_conn_rsp rsp;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007297 __u16 result;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007298
7299 if (!status) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007300 if (l2cap_pi(sk)->amp_id) {
7301 amp_accept_physical(conn,
7302 l2cap_pi(sk)->amp_id, sk);
7303 bh_unlock_sock(sk);
7304 continue;
Johan Hedbergdf3c3932011-06-14 12:48:19 +03007305 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007306
7307 sk->sk_state = BT_CONFIG;
7308 result = L2CAP_CR_SUCCESS;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007309 } else {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007310 sk->sk_state = BT_DISCONN;
7311 l2cap_sock_set_timer(sk, HZ / 10);
7312 result = L2CAP_CR_SEC_BLOCK;
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007313 }
7314
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007315 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
7316 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
7317 rsp.result = cpu_to_le16(result);
7318 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
7319 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
7320 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007321 }
7322
Linus Torvalds1da177e2005-04-16 15:20:36 -07007323 bh_unlock_sock(sk);
7324 }
7325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007326 read_unlock(&l->lock);
Marcel Holtmannb1235d72008-07-14 20:13:54 +02007327
Linus Torvalds1da177e2005-04-16 15:20:36 -07007328 return 0;
7329}
7330
7331static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
7332{
7333 struct l2cap_conn *conn = hcon->l2cap_data;
7334
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007335 if (!conn && hcon->hdev->dev_type != HCI_BREDR)
7336 goto drop;
7337
Andrei Emeltchenko5a08ecc2011-01-11 17:20:20 +02007338 if (!conn)
7339 conn = l2cap_conn_add(hcon, 0);
7340
7341 if (!conn)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007342 goto drop;
7343
7344 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
7345
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007346 if (flags & ACL_START) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007347 struct l2cap_hdr *hdr;
7348 int len;
7349
7350 if (conn->rx_len) {
7351 BT_ERR("Unexpected start frame (len %d)", skb->len);
7352 kfree_skb(conn->rx_skb);
7353 conn->rx_skb = NULL;
7354 conn->rx_len = 0;
7355 l2cap_conn_unreliable(conn, ECOMM);
7356 }
7357
Andrei Emeltchenkoaae7fe22010-09-15 14:28:43 +03007358 /* Start fragment always begin with Basic L2CAP header */
7359 if (skb->len < L2CAP_HDR_SIZE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07007360 BT_ERR("Frame is too short (len %d)", skb->len);
7361 l2cap_conn_unreliable(conn, ECOMM);
7362 goto drop;
7363 }
7364
7365 hdr = (struct l2cap_hdr *) skb->data;
7366 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
7367
7368 if (len == skb->len) {
7369 /* Complete frame received */
7370 l2cap_recv_frame(conn, skb);
7371 return 0;
7372 }
7373
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007374 if (flags & ACL_CONT) {
7375 BT_ERR("Complete frame is incomplete "
7376 "(len %d, expected len %d)",
7377 skb->len, len);
7378 l2cap_conn_unreliable(conn, ECOMM);
7379 goto drop;
7380 }
7381
Linus Torvalds1da177e2005-04-16 15:20:36 -07007382 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
7383
7384 if (skb->len > len) {
7385 BT_ERR("Frame is too long (len %d, expected len %d)",
7386 skb->len, len);
7387 l2cap_conn_unreliable(conn, ECOMM);
7388 goto drop;
7389 }
7390
7391 /* Allocate skb for the complete frame (with header) */
Gustavo F. Padovanaf05b302009-04-20 01:31:08 -03007392 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
7393 if (!conn->rx_skb)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007394 goto drop;
7395
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007396 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007397 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007398 conn->rx_len = len - skb->len;
7399 } else {
7400 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
7401
7402 if (!conn->rx_len) {
7403 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
7404 l2cap_conn_unreliable(conn, ECOMM);
7405 goto drop;
7406 }
7407
7408 if (skb->len > conn->rx_len) {
7409 BT_ERR("Fragment is too long (len %d, expected %d)",
7410 skb->len, conn->rx_len);
7411 kfree_skb(conn->rx_skb);
7412 conn->rx_skb = NULL;
7413 conn->rx_len = 0;
7414 l2cap_conn_unreliable(conn, ECOMM);
7415 goto drop;
7416 }
7417
Arnaldo Carvalho de Melod626f622007-03-27 18:55:52 -03007418 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
Marcel Holtmanne1027a72009-02-09 09:18:02 +01007419 skb->len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007420 conn->rx_len -= skb->len;
7421
7422 if (!conn->rx_len) {
7423 /* Complete frame received */
7424 l2cap_recv_frame(conn, conn->rx_skb);
7425 conn->rx_skb = NULL;
7426 }
7427 }
7428
7429drop:
7430 kfree_skb(skb);
7431 return 0;
7432}
7433
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007434static int l2cap_debugfs_show(struct seq_file *f, void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007435{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007436 struct sock *sk;
7437 struct hlist_node *node;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007438
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007439 read_lock_bh(&l2cap_sk_list.lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007440
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007441 sk_for_each(sk, node, &l2cap_sk_list.head) {
7442 struct l2cap_pinfo *pi = l2cap_pi(sk);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007443
Gustavo F. Padovan903d3432011-02-10 14:16:06 -02007444 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007445 batostr(&bt_sk(sk)->src),
7446 batostr(&bt_sk(sk)->dst),
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007447 sk->sk_state, __le16_to_cpu(pi->psm),
7448 pi->scid, pi->dcid,
7449 pi->imtu, pi->omtu, pi->sec_level,
7450 pi->mode);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007451 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007452
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007453 read_unlock_bh(&l2cap_sk_list.lock);
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007454
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007455 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007456}
7457
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007458static int l2cap_debugfs_open(struct inode *inode, struct file *file)
7459{
7460 return single_open(file, l2cap_debugfs_show, inode->i_private);
7461}
7462
7463static const struct file_operations l2cap_debugfs_fops = {
7464 .open = l2cap_debugfs_open,
7465 .read = seq_read,
7466 .llseek = seq_lseek,
7467 .release = single_release,
7468};
7469
7470static struct dentry *l2cap_debugfs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07007471
Linus Torvalds1da177e2005-04-16 15:20:36 -07007472static struct hci_proto l2cap_hci_proto = {
7473 .name = "L2CAP",
7474 .id = HCI_PROTO_L2CAP,
7475 .connect_ind = l2cap_connect_ind,
7476 .connect_cfm = l2cap_connect_cfm,
7477 .disconn_ind = l2cap_disconn_ind,
Marcel Holtmann2950f212009-02-12 14:02:50 +01007478 .disconn_cfm = l2cap_disconn_cfm,
Marcel Holtmann8c1b2352009-01-15 21:58:04 +01007479 .security_cfm = l2cap_security_cfm,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007480 .recv_acldata = l2cap_recv_acldata,
7481 .create_cfm = l2cap_create_cfm,
7482 .modify_cfm = l2cap_modify_cfm,
7483 .destroy_cfm = l2cap_destroy_cfm,
Linus Torvalds1da177e2005-04-16 15:20:36 -07007484};
7485
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007486int __init l2cap_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007487{
7488 int err;
Marcel Holtmannbe9d1222005-11-08 09:57:38 -08007489
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007490 err = l2cap_init_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007491 if (err < 0)
7492 return err;
7493
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007494 _l2cap_wq = create_singlethread_workqueue("l2cap");
7495 if (!_l2cap_wq) {
7496 err = -ENOMEM;
7497 goto error;
7498 }
7499
Linus Torvalds1da177e2005-04-16 15:20:36 -07007500 err = hci_register_proto(&l2cap_hci_proto);
7501 if (err < 0) {
7502 BT_ERR("L2CAP protocol registration failed");
7503 bt_sock_unregister(BTPROTO_L2CAP);
7504 goto error;
7505 }
7506
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007507 if (bt_debugfs) {
7508 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
7509 bt_debugfs, NULL, &l2cap_debugfs_fops);
7510 if (!l2cap_debugfs)
7511 BT_ERR("Failed to create L2CAP debug file");
7512 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07007513
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007514 if (amp_init() < 0) {
7515 BT_ERR("AMP Manager initialization failed");
7516 goto error;
7517 }
7518
Linus Torvalds1da177e2005-04-16 15:20:36 -07007519 return 0;
7520
7521error:
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007522 destroy_workqueue(_l2cap_wq);
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007523 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007524 return err;
7525}
7526
Gustavo F. Padovan64274512011-02-07 20:08:52 -02007527void l2cap_exit(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007528{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007529 amp_exit();
7530
Marcel Holtmannaef7d972010-03-21 05:27:45 +01007531 debugfs_remove(l2cap_debugfs);
Linus Torvalds1da177e2005-04-16 15:20:36 -07007532
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007533 flush_workqueue(_l2cap_wq);
7534 destroy_workqueue(_l2cap_wq);
7535
Linus Torvalds1da177e2005-04-16 15:20:36 -07007536 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
7537 BT_ERR("L2CAP protocol unregistration failed");
7538
Gustavo F. Padovanbb58f742011-02-03 20:50:35 -02007539 l2cap_cleanup_sockets();
Linus Torvalds1da177e2005-04-16 15:20:36 -07007540}
7541
Gustavo F. Padovand1c4a172010-07-18 16:25:54 -03007542module_param(disable_ertm, bool, 0644);
7543MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07007544
7545module_param(enable_reconfig, bool, 0644);
7546MODULE_PARM_DESC(enable_reconfig, "Enable reconfig after initiating AMP move");